blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
75b2262d0926d42582eb07bc231e896130a4c287 | Python | Eason97/Spatio-temporal-Video-Super-Resolution | /dataset.py | UTF-8 | 3,855 | 2.5625 | 3 | [] | no_license | import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
import torch
import torchvision.transforms.functional as TF
import random
import torchvision
#data augmentation for image rotate
def augment(frame0, frame1,frame2,mask):
# 0 represent rotate,1 represent vertical,2 represent horizontal,3 represent remain orginal
augmentation_method=random.choice([0,1,2,3,4,5])
rotate_degree = random.choice([0, 90, 180, 270])
'''Rotate'''
if augmentation_method==0:
frame0 = transforms.functional.rotate(frame0, rotate_degree)
frame1 = transforms.functional.rotate(frame1, rotate_degree)
frame2 = transforms.functional.rotate(frame2, rotate_degree)
mask = transforms.functional.rotate(mask, rotate_degree)
return frame0,frame1,frame2,mask
'''Vertical'''
if augmentation_method==1:
vertical_flip = torchvision.transforms.RandomVerticalFlip(p=1)
frame0 = vertical_flip(frame0)
frame1 = vertical_flip(frame1)
frame2 = vertical_flip(frame2)
mask = vertical_flip(mask)
return frame0, frame1, frame2, mask
'''Horizontal'''
if augmentation_method==2:
horizontal_flip= torchvision.transforms.RandomHorizontalFlip(p=1)
frame0 = horizontal_flip(frame0)
frame1 = horizontal_flip(frame1)
frame2 = horizontal_flip(frame2)
mask = horizontal_flip(mask)
return frame0, frame1, frame2, mask
'''no change'''
if augmentation_method==3 or augmentation_method==4 or augmentation_method==5:
return frame0, frame1, frame2, mask
class vimeo_dataset(Dataset):
def __init__(self, viemo_dir, mask_dir):
self.transform = transforms.Compose([transforms.ToTensor()])
self.list_train=[]
for line in open('/ssd2/minghan/vimeo_triplet/'+'tri_trainlist.txt'):
line = line.strip('\n')
if line!='':
self.list_train.append(line)
self.root=viemo_dir
#/home/fum16/context_aware_mask/mask
self.mask_dir = mask_dir
self.file_len = len(self.list_train)
def __getitem__(self, index, is_train = True):
frame0 = Image.open(self.root + self.list_train[index] + '/' + "im1.png")
frame1 = Image.open(self.root + self.list_train[index] + '/' + "im2.png")
frame2 = Image.open(self.root + self.list_train[index] + '/' + "im3.png")
mask_image = Image.open(self.mask_dir+ self.list_train[index] + '.png')
#crop a patch
i,j,h,w = transforms.RandomCrop.get_params(frame0, output_size = (256, 256))
frame0_ = TF.crop(frame0, i, j, h, w)
frame1_ = TF.crop(frame1, i, j, h, w)
frame2_ = TF.crop(frame2, i, j, h, w)
mask_=TF.crop(mask_image, i, j, h, w)
#data argumentation
frame0_arg, frame1_arg,frame2_arg,mask_arg = augment(frame0_, frame1_,frame2_,mask_)
#BICUBIC down-sampling
frame0_arg_down = frame0_arg.resize((int(frame0_arg.size[0]//4), int(frame0_arg.size[1]//4)), Image.BICUBIC)
frame2_arg_down = frame2_arg.resize((int(frame2_arg.size[0]//4), int(frame2_arg.size[1]//4)), Image.BICUBIC)
gt_arg_down= frame1_arg.resize((int(frame1_arg.size[0]//4), int(frame1_arg.size[1]//4)), Image.BICUBIC)
#transform
frame0_high = self.transform(frame0_arg)#torch.Size([3, 64, 64])
frame0_low = self.transform(frame0_arg_down)
frame1_high = self.transform(frame1_arg)#torch.Size([3, 256, 256])
frame1_low = self.transform(gt_arg_down)
frame2_high = self.transform(frame2_arg)#torch.Size([3, 64, 64])
frame2_low = self.transform(frame2_arg_down)
return frame0_low,frame0_high,frame1_low,frame1_high,frame2_low,frame2_high
def __len__(self):
return self.file_len | true |
cbde4ed3773f315ebf9f2d05fe1a05083b1c7647 | Python | Escaity/Library | /python/atcoder/KH_seisen/1.3/a108.py | UTF-8 | 150 | 2.78125 | 3 | [] | no_license | # https://atcoder.jp/contests/abc108/tasks/abc108_a
n = int(input())
if n % 2 != 0:
print(n // 2 * (n // 2 + 1))
else:
print((n // 2) ** 2)
| true |
098945a2fecf6ff0d0e45b33f4026dc016367553 | Python | Aasthaengg/IBMdataset | /Python_codes/p03047/s549085166.py | UTF-8 | 298 | 2.546875 | 3 | [] | no_license | import sys
sys.setrecursionlimit(10 ** 8)
ni = lambda: int(sys.stdin.readline())
nm = lambda: map(int, sys.stdin.readline().split())
nl = lambda: list(nm())
ns = lambda: sys.stdin.readline().rstrip()
N, K = nm()
def solve():
return N - K + 1
if __name__ == "__main__":
print(solve())
| true |
93d29e07c4168ff730c97fd809a2b3cc61055ddf | Python | penguinmenac3/keras-starterpack | /utils/plot_losses.py | UTF-8 | 4,814 | 2.84375 | 3 | [] | no_license | import keras
import matplotlib.pyplot as plt
import numpy as np
def f1_score(true, pred, f1_score_class, tresh=0.5):
correct_positives = 0
pred_positives = 0
true_positives = 0
for t, p in zip(true, pred):
if t[f1_score_class] > 0.5 and p[f1_score_class] > tresh:
correct_positives += 1
if t[f1_score_class] > 0.5:
true_positives += 1
if p[f1_score_class] > tresh:
pred_positives += 1
if pred_positives > 0:
precision = correct_positives / pred_positives
else:
precision = 0
if true_positives > 0:
recall = correct_positives / true_positives
else:
recall = 0
if precision == 0 and recall == 0:
return 0, 0, 0
false_positive = pred_positives - correct_positives
true_negative = len(true) - true_positives
fpr = false_positive / true_negative
tpr = correct_positives / true_positives
return 2 * precision * recall / (precision + recall), tpr, fpr
class PlotLosses(keras.callbacks.Callback):
def __init__(self, loss_image_path, accuracy_image_path, f1_image_path=None, precision_recall_image_path=None, validation_data=None, f1_score_class=0):
super().__init__()
self.loss_image_path = loss_image_path
self.accuracy_image_path = accuracy_image_path
self.f1_image_path = f1_image_path
self.precision_recall_image_path = precision_recall_image_path
self.validation_data = validation_data
self.f1_score_class = f1_score_class
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
self.fig = plt.figure()
self.logs = []
self.val_f1s = []
self.tpr = []
self.fpr = []
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
self.fig = plt.figure()
self.logs = []
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
self.i += 1
print("Iter %04d: loss=%02.2f acc=%02.2f val_loss=%02.2f val_acc=%02.2f" % (self.i, self.losses[-1], self.acc[-1], self.val_losses[-1], self.val_acc[-1]))
if self.i % 10 != 0:
return
if self.i % 10 == 0:
plt.xlabel("iter")
plt.ylabel("loss")
plt.plot(self.x, self.losses, label="loss")
plt.plot(self.x, self.val_losses, label="val_loss")
plt.legend()
plt.savefig(self.loss_image_path)
plt.clf()
plt.xlabel("iter")
plt.ylabel("acc")
plt.plot(self.x, self.acc, label="acc")
plt.plot(self.x, self.val_acc, label="val_acc")
plt.legend()
plt.savefig(self.accuracy_image_path)
plt.clf()
print("Loss & acc plots updated.")
if self.f1_image_path is not None and self.validation_data is not None:
val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()
val_targ = self.validation_data[1]
self.tpr = [0]
self.fpr = [0]
best_f1 = 0.0
for idx in range(100):
tresh = idx / 100.0
_val_f1, tpr, fpr = f1_score(val_targ, val_predict, self.f1_score_class, tresh=tresh)
self.tpr.append(tpr)
self.fpr.append(fpr)
best_f1 = max(best_f1, _val_f1)
self.val_f1s.append(best_f1)
self.tpr.append(1)
self.fpr.append(1)
if self.i % 10 == 0:
plt.plot(self.x[0::10], self.val_f1s, label="f1_validation")
plt.xlabel("iter")
plt.ylabel("f1")
plt.legend()
plt.savefig(self.f1_image_path)
plt.clf()
plt.plot(self.fpr, self.tpr, label="roc_validation")
plt.xlabel("fpr")
plt.ylabel("tpr")
plt.legend()
plt.savefig(self.precision_recall_image_path)
plt.clf()
print("f1 & roc plots updated.")
else:
self.val_f1s.append(0)
print("Iter %04d: loss=%02.2f acc=%02.2f val_loss=%02.2f val_acc=%02.2f f1=%02.2f" % (self.i, self.losses[-1], self.acc[-1], self.val_losses[-1], self.val_acc[-1], self.val_f1s[-1]))
| true |
8c51584da1550b697334bee30814b84641be9be2 | Python | mickael-grima/petrinetX | /src/tests/tokenTest.py | UTF-8 | 686 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 01 21:38:37 2015
@author: Mickael Grima
"""
import sys
sys.path.append("/home/mickael/Documents/projects/petrinetX/src/")
import unittest
from Token import Token
class TokenTest(unittest.TestCase):
""" test everything about Token class
"""
def setUp(self):
self.token = Token(name='name_castle_city', show=True, fire=True)
def testCopy(self):
""" does token.copy() copy everything?
"""
tok = self.token.copy()
for key in tok.__dict__.iterkeys():
self.assertNotEqual(tok.__dict__[key] or '', self.token.__dict__[key] or {})
if __name__ == '__main__':
unittest.main()
| true |
23383b86b80a36ff36b6c36b7b201e97d6c15089 | Python | layely/YOLO_pytorch | /metrics.py | UTF-8 | 785 | 2.953125 | 3 | [] | no_license | import torch
def IOU(x1, y1, w1, h1, x2, y2, w2, h2):
return 0
def mAP():
return 0
def confidence(x, y, w, h, ground_truths):
"""
Given a bbox defined by x,y,w,h and the coordonates
of all objects in an image. Compute the confidence.
ground_truths should be a 'list' of bboxes (I mean a tensor).
return the max IOU. If there is no object, then confidence should
be 0.
"""
max_iou = 0
for i in ground_truths.shape[0]:
actual_bbox = ground_truths[i]
gt_x = actual_bbox[0].item()
gt_y = actual_bbox[1].item()
gt_w = actual_bbox[2].item()
gt_h = actual_bbox[3].item()
iou = IOU(x, y, w, h, gt_x, gt_y, gt_w, gt_h)
max_iou = max(max_iou, iou)
return max_iou
| true |
51acb9a9bf443aea8f555b618e39a88b0fb559be | Python | DaltonBorges/Curso-de-Python | /pythonDesafios/des057.py | UTF-8 | 455 | 4.09375 | 4 | [] | no_license | # Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores 'M' ou 'F'. Caso esteja errado, peça a digitação novamente, até ter um valor correto.
sexo = str(input('''Informe o sexo
---------------
[ M ] Masculino
[ F } Feminino
---------------\n''')).strip().upper()[0]
while sexo not in 'MmFf':
sexo = str(input('Dados inválidos. Digite novamente')).strip().upper()[0]
print('Seso {} registrado com sucesso'.format(sexo))
| true |
b438361b78bafb005e221e444f9d7900d07453a4 | Python | ygberg/labbar | /listlabb 1-4/list2.py | UTF-8 | 140 | 3.515625 | 4 | [] | no_license | n = int(input('type first number:'))
nf = int(input('type second number:'))
s=[]
i=1
while i !=nf+1:
s.append(n*i)
i +=1
print(s)
| true |
d203a7b74f67e106987c5595de3e581c47ad8d6a | Python | biocad/bcd_mcts | /tic_tac_toe/player/HumanPlayer.py | UTF-8 | 498 | 3.078125 | 3 | [] | no_license | import numpy as np
from .TicTacToePlayer import *
class HumanPlayer(TicTacToePlayer):
"""
This class encapsulates input-output related to taking turns by human
"""
def turn(self, node):
r, c = [int(i) for i in input().split()]
turns = [t[0] for t in node.get_available_child_nodes()]
diff_fields = [turn for turn in turns if np.where(turn.field != node.field) == (r, c)]
assert len(diff_fields) == 1, "Illegal move!"
return diff_fields[0]
| true |
a79e3f53017be5d95975db7cd99110558de086a4 | Python | hua372494277/SVM | /np_random_multivariate_normal.py | UTF-8 | 435 | 3.5 | 4 | [] | no_license | # Draw random samples from a multivariate normal distribution
import numpy as np
import matplotlib.pyplot as plt
def formatOutputMultivariateNormal():
matrix = np.random.multivariate_normal(mean, cov, 10)
print str(matrix)
print "\n"
#.T is to transpose the matrix
print str(matrix.T)
return
if __name__ == "__main__":
mean = [0, 0]
cov = [ [1, 0], [0, 1]]
formatOutputMultivariateNormal()
| true |
3410861e3463ba95f2b45086e2e302757235d601 | Python | IsaacAnthonyHu/Checkio | /1_Home/12_The_Warriors_Clear_Solution.py | UTF-8 | 503 | 3.859375 | 4 | [] | no_license | class Warrior:
def __init__(self):
self.health = 50
self.attack = 5
@property # 通过@property装饰器将方法变成属性
def is_alive(self) -> bool:
return self.health > 0
class Knight(Warrior):
def __init__(self):
super().__init__() # 通过super()调用父类,进而调用父类方法
self.attack = 7
def fight(unit1, unit2):
while unit1.is_alive and unit2.is_alive:
unit2.health -= unit1.attack
if unit2.is_alive:
unit1.health -= unit2.attack
return unit1.is_alive
| true |
4c0280c6106ae901007b504ee20080ae2ae43ddf | Python | cindykimxp/in-toto | /in_toto/models/metadata.py | UTF-8 | 8,603 | 2.828125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-dco-1.1"
] | permissive | """
<Program Name>
metadata.py
<Author>
Lukas Puehringer <lukas.puehringer@nyu.edu>
Santiago Torres <santiago@nyu.edu>
<Started>
Oct 23, 2017
<Copyright>
See LICENSE for licensing information.
<Purpose>
Provides a container class `Metablock` for signed metadata and
functions for signing, signature verification, de-serialization and
serialization from and to JSON.
"""
import attr
import json
import securesystemslib.keys
import securesystemslib.formats
import securesystemslib.exceptions
import securesystemslib.gpg.functions
from in_toto.models.common import ValidationMixin
from in_toto.models.link import Link
from in_toto.models.layout import Layout
from in_toto.exceptions import SignatureVerificationError
@attr.s(repr=False, init=False)
class Metablock(ValidationMixin):
"""A container for signed in-toto metadata.
Provides methods for metadata JSON (de-)serialization, reading from and
writing to disk, creating and verifying signatures, and self-validation.
Attributes:
signed: A subclass of Signable which has the actual metadata payload,
usually a Link or Layout object.
signatures: A list of signatures over the canonical JSON representation
of the value of the signed attribute.
compact_json: A boolean indicating if the dump method should write a
compact JSON string representation of the metadata.
"""
signatures = attr.ib()
signed = attr.ib()
def __init__(self, **kwargs):
self.signatures = kwargs.get("signatures", [])
self.signed = kwargs.get("signed")
self.compact_json = kwargs.get("compact_json", False)
self.validate()
def __repr__(self):
"""Returns the JSON string representation. """
indent = None if self.compact_json else 1
separators = (',', ':') if self.compact_json else (',', ': ')
return json.dumps(
{
"signatures": self.signatures,
"signed": attr.asdict(self.signed)
},
indent=indent,
separators=separators,
sort_keys=True
)
def dump(self, path):
"""Writes the JSON string representation of the instance to disk.
Arguments:
path: The path to write the file to.
Raises:
IOError: File cannot be written.
"""
with open(path, "wb") as fp:
fp.write("{}".format(self).encode("utf-8"))
@staticmethod
def load(path):
"""Loads the JSON string representation of in-toto metadata from disk.
Arguments:
path: The path to read the file from.
Raises:
IOError: The file cannot be read.
securesystemslib.exceptions.FormatError: Metadata format is invalid.
Returns:
A Metablock object whose signable attribute is either a Link or a Layout
object.
"""
with open(path, "r") as fp:
data = json.load(fp)
signatures = data.get("signatures", [])
signed_data = data.get("signed", {})
signed_type = signed_data.get("_type")
if signed_type == "link":
signed = Link.read(signed_data)
elif signed_type == "layout":
signed = Layout.read(signed_data)
else:
raise securesystemslib.exceptions.FormatError("Invalid Metadata format")
return Metablock(signatures=signatures, signed=signed)
@property
def type_(self):
"""A shortcut to the `type_` attribute of the object on the signable
attribute (should be one of "link" or "layout"). """
# NOTE: Trailing underscore is used by convention (pep8) to avoid conflict
# with Python's type keyword.
return self.signed.type_
def sign(self, key):
"""Creates signature over signable with key and adds it to signatures.
Uses the UTF-8 encoded canonical JSON byte representation of the signable
attribute to create signatures deterministically.
Attributes:
key: A signing key. The format is securesystemslib.formats.KEY_SCHEMA.
Raises:
securesystemslib.exceptions.FormatError: Key argument is malformed.
securesystemslib.exceptions.CryptoError, \
securesystemslib.exceptions.UnsupportedAlgorithmError:
Signing errors.
Returns:
The signature. Format is securesystemslib.formats.SIGNATURE_SCHEMA.
"""
securesystemslib.formats.KEY_SCHEMA.check_match(key)
signature = securesystemslib.keys.create_signature(key,
self.signed.signable_bytes)
self.signatures.append(signature)
return signature
def sign_gpg(self, gpg_keyid=None, gpg_home=None):
"""Creates signature over signable with gpg and adds it to signatures.
Uses the UTF-8 encoded canonical JSON byte representation of the signable
attribute to create signatures deterministically.
Arguments:
gpg_keyid (optional): A keyid used to identify a local gpg signing key.
If omitted the default signing key is used.
gpg_home (optional): A path to the gpg home directory. If not set the
default gpg home directory is used.
Raises:
ValueError, OSError, securesystemslib.gpg.exceptions.CommandError, \
securesystemslib.gpg.exceptions.KeyNotFoundError:
gpg signing errors.
Side Effects:
Calls system gpg command in a subprocess.
Returns:
The signature. Format is securesystemslib.formats.GPG_SIGNATURE_SCHEMA.
"""
signature = securesystemslib.gpg.functions.create_signature(
self.signed.signable_bytes, gpg_keyid, gpg_home)
self.signatures.append(signature)
return signature
def verify_signature(self, verification_key):
"""Verifies a signature over signable in signatures with verification_key.
Uses the UTF-8 encoded canonical JSON byte representation of the signable
attribute to verify the signature deterministically.
NOTE: Only the first signature in the signatures attribute, whose keyid
matches the verification_key keyid, is verified. If the verification_key
format is securesystemslib.formats.GPG_PUBKEY_SCHEMA, subkey keyids are
considered too.
Arguments:
verification_key: A verification key. The format is
securesystemslib.formats.ANY_VERIFICATION_KEY_SCHEMA.
Raises:
securesystemslib.exceptions.FormatError: The passed key is malformed.
SignatureVerificationError: No signature keyid matches the verification
key keyid, or the matching signature is malformed, or the matching
signature is invalid.
securesystemslib.gpg.exceptions.KeyExpirationError: Passed verification
key is an expired gpg key.
"""
securesystemslib.formats.ANY_VERIFICATION_KEY_SCHEMA.check_match(
verification_key)
verification_keyid = verification_key["keyid"]
# Find a signature that corresponds to the keyid of the passed
# verification key or one of its subkeys
signature = None
for signature in self.signatures:
if signature["keyid"] == verification_keyid:
break
if signature["keyid"] in list(
verification_key.get("subkeys", {}).keys()):
break
else:
raise SignatureVerificationError("No signature found for key '{}'"
.format(verification_keyid))
if securesystemslib.formats.GPG_SIGNATURE_SCHEMA.matches(signature):
valid = securesystemslib.gpg.functions.verify_signature(signature,
verification_key, self.signed.signable_bytes)
elif securesystemslib.formats.SIGNATURE_SCHEMA.matches(signature):
valid = securesystemslib.keys.verify_signature(
verification_key, signature, self.signed.signable_bytes)
else:
valid = False
if not valid:
raise SignatureVerificationError("Invalid signature for keyid '{}'"
.format(verification_keyid))
def _validate_signed(self):
"""Private method to check if the 'signed' attribute contains a valid
Layout or Link object. """
if not (isinstance(self.signed, Layout) or isinstance(self.signed, Link)):
raise securesystemslib.exceptions.FormatError("The Metblock's 'signed'"
" property has has to be of type 'Link' or 'Layout'.")
# If the signed object is a Link or Layout object validate it.
self.signed.validate()
def _validate_signatures(self):
"""Private method to check that the 'signatures' attribute is a list of
signatures in the format 'securesystemslib.formats.ANY_SIGNATURE_SCHEMA'.
"""
if not isinstance(self.signatures, list):
raise securesystemslib.exceptions.FormatError("The Metablock's"
" 'signatures' property has to be of type 'list'.")
for signature in self.signatures:
securesystemslib.formats.ANY_SIGNATURE_SCHEMA.check_match(signature)
| true |
c2238b2390ebbc5ecafcd03f1f1ab4f3d8cbb5e2 | Python | gene63/BaekJoonStep | /step10/1.py | UTF-8 | 315 | 3.453125 | 3 | [] | no_license | n = int(input())
nums = input().split()
primes = []
for num in nums:
num = int(num)
if num == 1:
prime = False
else:
prime = True
for i in range(2,num):
if num % i == 0:
prime = False
break
if prime:
primes.append(num)
print(len(primes))
| true |
d3077f266d3257e54ba28db9dae7f88b109ed5e2 | Python | mingijeon/tic-tac-toe | /tic-tac-toe.py | UTF-8 | 1,641 | 2.9375 | 3 | [] | no_license | from tkinter import *
def checked(i) :
global player
global count
button = list[i]
if button["text"] != " " :
return
button["text"] = player
button["bg"] = "yellow"
if player == "X" :
player = "O"
button["bg"] = "yellow"
else :
player = "X"
button["bg"] = "lightgreen"
count = count + 1
if count >= 5 and checkedwinner(i//3, i%3) == 1 :
print_whowin()
window.destroy()
elif count == 9 and checkedwinner(i//3, i%3) == 0 :
print_draw()
window.destroy()
# cheked Who Win
def checkedwinner(row, column) :
if list[row*3]["text"] == list[row*3+1]["text"] == list[row*3+2]["text"] :
return 1
elif list[column]["text"] == list[column+3]["text"] == list[column+6]["text"] :
return 1
elif list[0]["text"] == list[4]["text"] == list[8]["text"] :
return 1
elif list[2]["text"] == list[4]["text"] == list[6]["text"] :
return 1
else :
return 0
# print who win
def print_whowin() :
if player == "X" :
print("O Win!")
elif player == "O" :
print("X Win!")
# print draw
def print_draw() :
print("Draw!")
window = Tk()
player = "X"
count = 0
list= []
for i in range(9) :
b = Button(window, text=" ", command=lambda k=i: checked(k))
b.grid(row=i//3, column=i%3)
list.append(b)
window.mainloop()
| true |
af57a0c18c65fd229c0cc9feb26f9c275ec07002 | Python | maskani-moh/personalization_project | /utils/loss_functions.py | UTF-8 | 2,735 | 3.578125 | 4 | [] | no_license | def mean_squared_error(prediction, test_set, verbose=True):
"""
Calculates the mean squared error between the test_set and the prediction
:params prediction: dict | input of form: {(i,u): value}
:params test_set: dict | input of form: {(i,u): value}
"""
error = 0
for i in test_set.keys():
i_error = (prediction[i] - test_set[i]) ** 2
error += i_error
error = error / len(test_set.keys())
if verbose:
print("MSE: " + str(error))
return error
def absolute_mean_error(prediction, test_set, verbose=True):
"""
Calculates the absolute mean error between the test_set and the prediction
:params prediction: dict | input of form: {(i,u): value}
:params test_set: dict | input of form: {(i,u): value}
"""
error = 0
for i in test_set.keys():
i_error = abs(prediction[i] - test_set[i])
error += i_error
error = error / len(test_set.keys())
if verbose:
print("AME: " + str(error))
return error
def precision(prediction, test_set, threshhold=0.5, verbose=True):
"""
Calculates the precision between the test_set and the prediction
:param prediction: dict | input of form: {(i,u): value}
:param test_set: input of form: {(i,u): value}
:param threshhold: Probability threshhold for prediction
:return: value of Precision
"""
prediction1 = dict(prediction)
for j in prediction1.keys():
if prediction1[j] >= threshhold:
prediction1[j] = 1
else:
prediction1[j] = 0
tp = 0
p = 0
for i in test_set.keys():
if prediction1[i] == 1:
p = p + 1
if prediction1[i] == 1 and test_set[i] == 1:
tp = tp + 1
prec = float(tp) / float(p)
if verbose:
print("Precision: " + str(prec))
return prec
def recall(prediction, test_set, threshhold=0.5, verbose=True):
"""
Calculates the recall between the test_set and the prediction
:param prediction: dict | input of form: {(i,u): value}
:param test_set: input of form: {(i,u): value}
:param threshhold: Probability threshhold for prediction
:return: value of recall
"""
# TODO: not best way to implement it, rethink
prediction1 = dict(prediction)
for j in prediction1.keys():
if prediction1[j] >= threshhold:
prediction1[j] = 1
else:
prediction1[j] = 0
tp = 0.00
fn = 0.00
for i in test_set.keys():
if prediction1[i] == 1 and test_set[i] == 1:
tp = tp + 1
if prediction1[i] == 0 and test_set[i] == 1:
fn = fn + 1
rec = float(tp) / (float(tp) + float(fn))
if verbose:
print("Recall: " + str(rec))
return rec
| true |
be9f0655207c3f07c649485153a40da9be48a4a0 | Python | rkrishnanal/Python | /number.py | UTF-8 | 98 | 2.515625 | 3 | [] | no_license | x = 1 # int
y = 2.3 # float
z = 3 + 1j # complex
a = 99e3 #E respents for power 10
print(type(z))
| true |
97f42f4a581ae8d64e2b7302f38ba5f26481c6a1 | Python | paulhankin/aoc2019 | /day22.py | UTF-8 | 1,571 | 3.625 | 4 | [] | no_license | # computes r * a^n, where a*b is mul(a, b)
def exp(r, a, n, mul):
while n:
if n % 2 == 1: r = mul(r, a)
a = mul(a, a)
n //= 2
return r
# idea: represent a shuffle as a pair (a, b) such that the card at
# position i goes to (i*a+b) (modulo deck size).
# This representation is good for cuts, deals, and deck reversals.
# It's also closed under composition.
# do shuffle s1, then shuffle s2 on a deck of the given size.
def compose(s1, s2, DS):
# card at posn. x goes first to posn s1a*x+b
# then goes to (s2a*(s1a*x+s1bb) + s2b)
return (s1[0]*s2[0] % DS, (s2[0]*s1[1] + s2[1]) % DS)
def read_cmds():
with open('day22.txt') as f:
lines = f.read().split('\n')
for parts in (line.strip().split() for line in lines):
if parts[0] == 'cut': yield (1, -int(parts[-1]))
elif parts[0] == 'deal' and parts[-1] == 'stack': yield (-1, -1)
elif parts[0] == 'deal': yield (int(parts[-1]), 0)
else: raise Exception('bad instruction %r' % parts)
cmds = list(read_cmds())
print('part 1')
DS1 = 10007
shuf = (1, 0)
for cmd in cmds:
shuf = compose(shuf, cmd, DS1)
print((shuf[0] * 2019 + shuf[1]) % DS1)
print('part 2')
DS2 = 119315717514047
ITERS = 101741582076661
shuf = (1, 0)
for cmd in cmds:
shuf = compose(shuf, cmd, DS2)
shuf = exp((1, 0), shuf, ITERS, lambda a, b: compose(a, b, DS2))
TARGET = 2020
# DS2 is prime, so the modular inverse of x mod DS2 is pow(x, DS2-2, DS2).
# find loc such that shuf[0] * loc + shuf[1] is TARGET.
loc = (pow(shuf[0], DS2-2, DS2) * (TARGET - shuf[1])) % DS2
print(loc)
assert (shuf[0] * loc + shuf[1]) % DS2 == TARGET
| true |
0e8082317c82934fadc3e4920ee870d50bafdf7f | Python | PerfXWeb/python-workshop | /sample_code/project_covid_happiness/project_covid.py | UTF-8 | 5,775 | 3.65625 | 4 | [] | no_license |
import os
import json
import glob
# need to pip-install these first
import requests # python3 -m pip install requests
import openpyxl # python3 -m pip install openpyxl
########## APPROACH ##########
#
# 1. Get the data out of every employee file
# - To do so, we need to temporarily store the data in variables
# - In the end, all we need is JUST the AVERAGE of the employees, not every single data point
# - Technically, we could go through all 150 employee files 52 times and calculate the average of a single week every single time
# - Instead, we will do the following (more efficient):
# 1. We will go though all employee files only ONCE and ADD UP the data points of each week
# 2. In the end, we have only 52 values, each one stores the SUM of all employees ratings of a single week
# 3. We can then divide this sum by the number of employee files to get the average
#
# 2. Calculate the average by using the SUM of the employee ratings divided by the number of employee files
# 3. Write the new averages into our main Excel sheet
# 4. Get COVID data from an API and insert into our Excel sheet
# - We have to look at the API and check what data is actually being served
# - The data from the API we are using provides us with TOTAL COVID cases of every single DAY
# - Since we only need the data for every WEEK, we need to extract only every 7th data point
# - We also need to make sure that the COVID data aligns with our week numbers.
# - We are only provided with data starting at the end of February, so we need to adjust for that.
# Set up our list in which we will store all sums of our employee happiness for each week
sums = [0 for i in range(52)]
# Step 1: Get the data out of all employee sheets
# os.path.join - This helps us to correctly join folders and filenames in order for our code to work on all operating systems
# the asterisk "*" serves as a wildcard here, meaning that ANYTHING can be in between "2 happiness" and ".xlsx"
for filename in glob.glob(os.path.join('sheets', '2 happiness*.xlsx')):
print(f"Opening: {filename}") # printing something always helps us to understand where our code is at the moment
wb = openpyxl.load_workbook(filename=filename, data_only=True) # This opens our Excel file. "data_only=True" makes sure that we get the DATA from the sheet and not the formulas in a cell (if there are any)
ws = wb.active # This opens the active (i.e. the first) worksheet in the Excel file
for i in range(52): # The for loop runs 52 times to get 52 data points
sums[i] = sums[i] + ws[f'B{i+6}'].value # We use ".value" to get the value of a specific cell. We also need to use {i+6} because we want to start getting the data from row number 6 (rather than row number 0 or 1, have a look at the Excel sheet)
print(f"Week {i+1}: {ws[f'B{i+6}'].value}") # printing the current week (don't forget range() starts with ZERO) and the value of the cell
# ------------------------------------------------------------------------------------------------------------------------------
# Step 2: Calculate the averages
# Here we use "enumerate":
# - enumerate() counts the number of times a loop has gone through. Since it also starts at 0, it is equal to the INDEX of the current value that we are looking at.
for e,i in enumerate(sums):
sums[e] = i/len(glob.glob(os.path.join('sheets', '2 happiness*.xlsx'))) # We are changing every value in our list from being the SUM of all happiness values to being the AVERAGE of all happiness values
print("Step 2 done")
# ------------------------------------------------------------------------------------------------------------------------------
# Step 3: Write the new averages into that sheet
wb = openpyxl.load_workbook(filename=os.path.join("sheets", "0 correlation_sheet.xlsx"), data_only=True) # same as before
ws = wb.active # same as before
for e,i in enumerate(sums): # we need to use enumerate() again to know the current week we are looking at.
print(f"Average for week {e+1}: {i}") # same as before, enumerate() starts at ZERO so we need to add 1 to also start with week 1
ws[f'B{4+e}'].value = i # Once again, we need to use {4+e} because that is where our data should be inserted
wb.save(filename = os.path.join("sheets", "0 correlation_sheet.xlsx")) # Now we just need to save our new values
# ------------------------------------------------------------------------------------------------------------------------------
# Step 4: Get COVID data from an API and add it to the sheet
data = requests.get("https://api.covid19api.com/dayone/country/austria/status/confirmed") # This gets our raw JSON formatted API data
data = json.loads(data.content) # Since it is JSON data, we need to convert it to become a Python list that we can work with
wb = openpyxl.load_workbook(filename=os.path.join("sheets", "0 correlation_sheet.xlsx"), data_only=True) # same as before
ws = wb.active # same as before
for i in range(52-8): # Once again, we loop through every week. BUT we need to subtract 8 weeks since the COVID data is not provided for the first 8 weeks of 2020
# Once again, we need to use {i+12} because that's where we first want to put our data in.
# Since we only need the data for every 7 days, we are using [i*7] to multiply our current i value with 7. We also want to OFFSET our data gathering by 5 (to align with our week numbers)
ws[f'C{i+12}'].value = data[i*7+5]['Cases'] # ['Cases'] holds the TOTAL number of cases so far
ws[f'D{i+12}'].value = data[i*7+5]['Date'] # ['Date'] holds the date of that value. We use this to verify if we actually get the right data.
wb.save(filename = os.path.join("sheets", "0 correlation_sheet.xlsx")) # saving again, and done
| true |
91188c62beeab2e2040b958416e054af8f79f50b | Python | jguerra7/python-for-pentesters | /03-network-security/01-multiprocessing-echo.py | UTF-8 | 934 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
#
# Echo server using multiprocessing, can accept more than one client
# Up to 5 clients simultaneously
import signal
import socket
from multiprocessing import Process
tcpSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
HOST = "0.0.0.0"
PORT = 8000
tcpSocket.bind((HOST, PORT))
tcpSocket.listen(1)
class WorkerProcess(Process):
def __init__(self):
Process.__init__(self)
def run(self):
while True:
(client, addr) = tcpSocket.accept()
print "Client connected! From ", addr
client.send("Connection established!\n")
data = client.recv(2048)
while data:
client.send(data)
data = client.recv(2048)
for i in range(5):
worker = WorkerProcess()
worker.daemon = True
worker.start()
def exit_handler(signum, frm):
print "Terminating..."
exit()
signal.signal(signal.SIGINT, exit_handler)
while True:
pass
| true |
c425999f786a56b435d55891e4c2936ee73f32a7 | Python | meorkamir/lazada-automation | /pages/homepage.py | UTF-8 | 964 | 3.015625 | 3 | [] | no_license | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait as Wait
from selenium.webdriver.support import expected_conditions as EC
class HomePage():
def __init__(self, driver):
self.driver = driver
#define locator here
self.search_textbox_id = "q"
self.search_button_class = "search-box__button--1oH7"
def wait_homepage_load(self):
Wait(self.driver, 10).until(EC.presence_of_element_located((By.ID, self.search_textbox_id)))
def search_item(self, item):
self.driver.find_element_by_id(self.search_textbox_id).clear()
self.driver.find_element_by_id(self.search_textbox_id).send_keys(item)
def click_search_icon(self):
self.driver.find_element_by_class_name(self.search_button_class).click()
def user_input_and_click_search(self, item):
self.wait_homepage_load()
self.search_item(item)
self.click_search_icon()
| true |
a01d9dc28cd1899fc322e2f09d9c9804adc699f9 | Python | djoo1028/AlgoritmProblems | /Codeup-100problem_challenge/Q1075.py | UTF-8 | 1,421 | 4 | 4 | [] | no_license | '''
print alphabet by order
if user type f then print a ~ f
chr -> int -> letter
ord -> letter -> integer
a -> 97
z -> 122
'''
def order(arg1):
a = arg1
a = ord(a)
loc = 0
for i in range(97, a+1): # i is a variable between start and end
print(chr(i), end=' ') #when result print in a row
i = i + 1
'''
print number util get the number
for example, user type 3 then print 0 1 2 3 in vertical
'''
def orderNum(arg1):
a = int(arg1)
for i in range(0, a + 1):
print(i)
i = i + 1
'''
add even number in range
Q1078
'''
def sumEven(arg1):
a = int(arg1)
sum = 0
for i in range(a+1):
if i % 2 == 0:
sum = sum + i
else:
sum
print(sum)
'''
Q1079
keep printing until u get 'q'
'''
def findQ(arg1):
a = arg1.split()
for i in range(len(a)):
if a[i] == 'q':
print(a[i])
break
else:
print(a[i])
'''
Q1080
keep adding num until the number is greater than typed number
for example if user type 55 then it should print 10
'''
def summation(arg1):
a = int(arg1)
result = 0
for i in range(1001):
result = result + i
#print(result)
if result >= a:
print(i)
break
x = input()
#orderNum(x)
#sumEven(x)
#findQ(x)
summation(x) | true |
36e2104e5026628435762dbcb3e94c57e8fd6228 | Python | Jrobertzz/password-safe | /PassSys.py | UTF-8 | 4,926 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Jacob Wilson
"""
import base64
import os
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
class PassSys:
def __init__(self, ufile):
self.ufile = ufile
#def loginVerification(self, username, mPassword): #returns true if input password hash matches the username's master password hash
def getUsers(self): #function to get users from the database: Returns a list of users built from the user list file.
with open(self.ufile,'r') as user:
users = []
for u in user:
u = u.rstrip()
if not u: continue
users.append(u)
return (users)
def addUser(self, username, mPassword): #add user to the database and make a new table for that user
found = 0
user = open(self.ufile,'r')
for u in user:
if u.strip() == username.strip():
found = 1
user.close()
if found == 0:
user = open(self.ufile,'a')
user.write(username + '\n')
user.close()
sfile = (username + '_s.dat')
salt = open(sfile,'wb')
salt.write(os.urandom(16)) #each salt is 16 bytes, this is important for verification
salt.close()
else:
print("username already used")
def deleteUser(self, username): #search through database table and delete user and their table
user = open(self.ufile,'r')
users = user.readlines()
user.close()
user = open(self.ufile,'w')
for u in users:
if u != (username + '\n'):
user.write(u)
user.truncate()
user.close()
sfile = (username + '_s.dat')
try:
os.remove(sfile)
except FileNotFoundError:
print("must delete a valid user")
dfile = (username + '_d.txt')
try:
os.remove(dfile)
except FileNotFoundError:
print("user had no passwords stored")
def getNames(self, username, mPassword): #takes a user and returns the name associated with the password e.g. reddit, gmail, or facebook.
dfile = (username + '_d.txt')
sfile = (username + '_s.dat')
namesList = []
s = open(sfile,'rb')
salt = s.read(16)
s.close()
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(mPassword.encode()))
f = Fernet(key)
try:
user = open(dfile,'r')
count = 0
for u in user:
count += 1
ub = u.encode()
if (count % 2 == 1):
namesList.append(f.decrypt(ub).decode())
user.close()
except FileNotFoundError:
print("user information dosent exist")
return namesList
def addPassword(self, username, mPassword, name, password): #add new row to user table with appropriate data and encrypts the password before storing
dfile = (username + '_d.txt')
sfile = (username + '_s.dat')
s = open(sfile,'rb')
salt = s.read(16)
s.close()
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(mPassword.encode()))
f = Fernet(key)
token1 = f.encrypt(name.encode())
token2 = f.encrypt(password.encode())
try:
d = open(dfile,'a')
except FileNotFoundError:
d = open(dfile,'w')
d.write(token1.decode() + '\n')
d.write(token2.decode() + '\n')
d.close()
def getPassword(self, username, mPassword, name): #querys database, decrypts password and returns it
dfile = (username + '_d.txt')
sfile = (username + '_s.dat')
temp = ""
s = open(sfile,'rb')
salt = s.read(16)
s.close()
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(mPassword.encode()))
f = Fernet(key)
d = open(dfile,'r')
found = 0
for p in d:
p = p.encode()
if found == 1:
return f.decrypt(p).decode().strip()
if f.decrypt(p).decode().strip() == name.strip():
found = 1
d.close()
def deletePassword(self, username, mPassword, name): #deletes the row associated with the name from the user table
dfile = (username + '_d.txt')
sfile = (username + '_s.dat')
s = open(sfile,'rb')
salt = s.read(16)
s.close()
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend()
)
key = base64.urlsafe_b64encode(kdf.derive(mPassword.encode()))
f = Fernet(key)
d = open(dfile,'r')
dl = d.readlines()
d.close()
found = 0
d = open(dfile,'wb')
for l in dl:
tl = l.encode()
tl = f.decrypt(tl).decode().strip()
if tl != name and found != 1:
d.write(l.encode())
else:
found += 1
d.truncate()
d.close()
def editPassword(self, username, mPassword, name, new_password):
self.deletePassword(username, mPassword, name)
self.addPassword(username, mPassword, name, new_password)
| true |
a0868ee6eca3ee522f1662b7d781b74a6fdaf438 | Python | hanekensama/AOJ | /0180/0180.py | UTF-8 | 511 | 3 | 3 | [] | no_license | import heapq
def createGraph(n, m):
adjacency = [[] for i in range(n)]
for i in range(m):
a, b, cost = map(int, input().split())
adjacency[a].append((b, cost))
adjacency[b].append((a, cost))
return adjacency
def solve(n, m):
adjacency = createGraph(n, m)
cost = 0
h = []
heapq.heappush(h, 0)
while h:
pass
def main():
while True:
n, m = map(int, input().split())
print(solve(n, m))
if __name__ == '__main__':
main()
| true |
ad2fd9f9750e29379c04e2caf9fd9c473097ed78 | Python | quangntran/cs162-continuous-integration | /tests/integrate.py | UTF-8 | 2,024 | 3.328125 | 3 | [] | no_license | """
This file is taken from Weiting's PCW for the last session.
1. POST an HTTP request with a valid expression to the server.
Examine the response and confirm that the correct answer is returned.
2. Establish a connection to the database directly and verify
that the string you sent has been correctly stored in the database.
For this step, you can use SQLAlchemy, or write the SQL directly if you prefer,
however note that this is a postgres database which does have subtly different
syntax from sqlite. (For simple queries this shouldn't be a big issue.)
3. POST an HTTP request with an invalid expression to the server.
Examine the response and confirm that an error is raised.
4. Confirm that no more rows have been added to the database since the last
valid expression was sent to the server. (For the purposes of this class, you
can assume that no-one else is accessing the database while the tests are
running.)
5. If any of the tests fail, then your program should raise an exception, and
stop running. Your program should only complete successfully if all tests pass.
"""
import unittest
import requests
from app import Expression
import app
class ComputationServerTest(unittest.TestCase):
def make_request(self,url,data):
requests.post(url,data=data)
return val = Expression.query(value).last()
def test_HTTPreq(self):
"""Tests for correct value returned"""
url = "http://{}:8000/add/".format(self.get_docker_host())
data = {'expression'='5*5'}
self.assertEqual(make_request(url,data),25)
def test_invalidHTTPreq(self):
"""Checks for InvalidExpressionError when invalid expression entered"""
url = "http://{}:8000/add/".format(self.get_docker_host())
data = {'expression'='5-'}
self.assertRaises(app.InvalidExpressionError,make_request(url,data))
def test_dbRows(self):
"""Checks no more rows have been added since the last valid expression"""
self.assertEqual(Expression.query(id).count(),1)
| true |
d7349b520bc3d240567e2191c123cb334af175cd | Python | GodamSwapna/Leetcode-question | /Add two nums.py | UTF-8 | 416 | 3.40625 | 3 | [] | no_license | def addTwoNumbers(l1, l2):
i=0
str1=""
str2=""
sum=0
while i<len(l1):
str1+=str(l1[i])
i+=1
k=0
while k<len(l2):
str2+=str(l2[k])
k+=1
sum=str(int(str1)+int(str2))
# return type(sum)
j=0
l3=[]
while j<len(sum):
l3.append(sum[j])
j+=1
return l3
l1 = [2,4,3]
l2 = [5,6,4]
print(addTwoNumbers(l1,l2))
| true |
028f74c0dc4a1ecfe0f9fbd764389c5108ac7965 | Python | wooy0ung/challenge_sh | /逆向/题目1/逆向一/solved.py | UTF-8 | 200 | 2.5625 | 3 | [] | no_license | src = "flag{PbkD7j4X|8Wz;~;z_O1}"
flag = "flag{"
for i in xrange(5, 12 + 1):
flag += chr(ord(src[i]) ^ 7)
for i in xrange(13, 20 + 1):
flag += chr(ord(src[i]) ^ 8)
flag += "_O1}"
print flag | true |
32a46bd88251b12c6872b6562d6a81b7733b445e | Python | homeworkprod/better-bomb-defusal-manual | /bombdefusalmanual/ui/tk.py | UTF-8 | 3,823 | 2.96875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Graphical user interface to ask questions and collect answers.
Based on Tk/Tkinter.
:Copyright: 2015 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
import sys
import tkinter as tk
from tkinter import ttk
from tkinter import E, N, S, W
from .console import ConsoleUI
class TkGUI(ConsoleUI):
def ask_for_text(self, question_label):
def create_frame(parent):
return TextFrame(parent, question_label)
frame = self.run_frame(create_frame)
return frame.text.get()
def ask_for_choice(self, question_label, choices, *, color_map=None):
def create_frame(parent):
return ChoiceFrame(parent, question_label, choices, color_map)
frame = self.run_frame(create_frame)
return frame.selected_choice_value
def run_frame(self, create_frame):
gui = BaseGUI()
frame = create_frame(gui)
gui.set_frame(frame)
gui.mainloop()
return frame
class BaseGUI(tk.Tk):
def __init__(self):
super().__init__()
self.bind('<Escape>', self.exit)
def exit(self, event):
self.destroy()
sys.exit()
def set_frame(self, frame):
self.frame = frame
self.frame.grid(row=0, sticky=(N, W, E, S))
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
class TextFrame(ttk.Frame):
def __init__(self, parent, question_label):
super().__init__(parent)
self.parent = parent
self.add_question_label(question_label)
self.add_text_entry()
def add_question_label(self, question_label):
label = ttk.Label(self, text=question_label)
label.grid(row=0, sticky=(N, W, E, S))
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
def add_text_entry(self):
self.text = tk.StringVar()
entry = tk.Entry(self, textvariable=self.text)
entry.grid(row=1, sticky=(N, W, E, S))
entry.bind('<Return>', self.submit)
entry.focus()
self.rowconfigure(1, weight=1)
def submit(self, event):
self.parent.destroy()
class ChoiceFrame(ttk.Frame):
def __init__(self, parent, question_label, choices, color_map):
super().__init__(parent)
self.parent = parent
self.selected_choice_value = None
self.add_question_label(question_label)
self.add_choice_buttons(choices, color_map)
def add_question_label(self, question_label):
label = ttk.Label(self, text=question_label)
label.grid(row=0, sticky=(N, W, E, S))
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
def add_choice_buttons(self, choices, color_map):
if color_map is None:
color_map = {}
for row, choice in enumerate(choices, start=1):
button = ChoiceButton(self, text=choice.label, value=choice.value)
button.grid(row=row, sticky=(N, W, E, S))
self.rowconfigure(row, weight=1)
color = color_map.get(choice.value)
if color:
button.configure(style=create_style(color))
def set_selected_choice_and_close(self, selected_choice_value):
self.selected_choice_value = selected_choice_value
self.parent.destroy()
def create_style(color_name):
style_name = '{}.TButton'.format(color_name)
style = ttk.Style()
style.configure(style_name, background=color_name)
return style_name
class ChoiceButton(ttk.Button):
def __init__(self, *args, **kwargs):
self.value = kwargs.pop('value')
super().__init__(*args, **kwargs)
self.configure(command=self.set_selected_choice)
def set_selected_choice(self):
self.master.set_selected_choice_and_close(self.value)
| true |
14272cf483a111c949b9a934ef7079797e95dd2f | Python | EEEGUI/Dengue | /code/main.py | UTF-8 | 1,276 | 2.65625 | 3 | [] | no_license | import utils
from model import Model
SUBMISSION_PATH = '..\output\submission.csv'
params = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'mae',
'num_leaves': 15,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': -1
}
def main_v1():
"""
两个城市一起训练
:return:
"""
df_train, df_train_label, df_test, df_test_pred = utils.load_data()
# params = utils.load_param()
model = Model(df_train, df_train_label, df_test, params)
y_pred = model.train()
utils.generate_submission(df_test_pred, y_pred, SUBMISSION_PATH)
def main_v2():
"""
两个城市分开训练
:return:
"""
data = utils.load_data_respectively()
prediction = []
for each in data:
# params = utils.load_param(each)
model = Model(data[each][0], data[each][1], data[each][2], params)
y_pred = model.train(city=each)
prediction.append(utils.generate_submission(data[each][3], y_pred, None))
df_submission = prediction[0].append(prediction[1])
df_submission.to_csv(SUBMISSION_PATH, index=False)
if __name__ == '__main__':
main_v2()
| true |
27a26e8e865ed29f8b1c1fea37969cb7c4fc371e | Python | swipswaps/scan-manager | /pysideline/core.py | UTF-8 | 7,339 | 2.703125 | 3 | [] | no_license | """
PySideLine is a simple wrapper to allow semi-declarative programming of PySide (Qt4) GUIs in python
The basic idea is that instead of laying out widgets in a tree structure procedurally, it should be possible to lay them out as a set of nested classes.
Similarly signals, instead of needing to be connected procedurally using .connect calls, should be connected automatically to methods on the widget based on method names.
"""
from PySide import QtCore
from PySide import QtGui
import itertools
import sys
__all__ = ['NOTSET','signalsCache','BaseInstantiable','BaseWidget','BaseLayout','BaseDialog','BaseRootInstantiable','Application']
class __NOTSET(object):
def __nonzero__(self):
return False
def __eq__(self,other):
if isinstance(other,self.__class__):
return True
else:
return False
NOTSET = __NOTSET()
signalsCache = {} #: a local cache of class->(dict of signal id->QtCore.Signal)
classDefinitionOrderCounter = itertools.count() #: a (thread-safe) counter used to number classes so we can sort them in order of class definition later
class CounterMetaclass(type(QtCore.QObject)):
"""
Metaclass that allows us to track the order in which sub-classes are defined in the source files
This lets us auto-instantiate them in the right order (vital for automatic layouts)
"""
def __new__(cls, name, bases, dct):
global classDefinitionOrderCounter
### 3.0
#dct['_psl_serial'] = classDefinitionOrderCounter.__next__()
dct['_psl_serial'] = classDefinitionOrderCounter.next()
return super(CounterMetaclass, cls).__new__(cls, name, bases, dct)
### 3.0
#class BaseInstantiable(metaclass=CounterMetaclass):
class Acquisition(object):
def __init__(self,base):
self.base = base
def __getattr__(self,k):
o = self.base
while 1:
if hasattr(o,k):
return getattr(o,k)
if hasattr(o,'_up') and o._up is not None:
o = o._up
else:
raise AttributeError(k)
#ei = sys.exc_info()
#if ei[2]:
# raise ei[0],None,ei[2].tb_next
class BaseInstantiable(object):
"""
Base class for all objects that should be auto-instantiated
Also, if not overridden acts as a top-level namespace for all objects registered under it with M{registerObject} (see below)
"""
__metaclass__ = CounterMetaclass
def __init__(self,*args,**kargs):
self.aq = Acquisition(self)
if 'noAutoInstantiate' in kargs:
noAutoInstantiate = kargs['noAutoInstantiate']
del(kargs['noAutoInstantiate'])
else:
noAutoInstantiate = False
super(BaseInstantiable,self).__init__(*args,**kargs)
if not noAutoInstantiate:
parent = (args and args[0]) or kargs.get('parent',None)
self._up = parent
toInit = self._autoInstantiate()
for o in toInit:
o.init()
self.init()
def _autoInstantiate(self,recurse=True):
"""
Swap attributes of this class that are classes that subclass BaseInstantiable into instances of themselves
If these classes are also subclasses of BaseWidget then self is set as their parent
"""
if hasattr(self,'_findSignals') and not hasattr(self,'_signals'):
self._findSignals()
toInstantiate = []
for k in dir(self):
# ignore anything starting with '_' -- can't be a subobject or a signal handler method
if k.startswith('_') or k == 'app':
continue
# check if this attribute is a class that needs instantiation
v = getattr(self,k)
if isinstance(v,type) and issubclass(v,BaseInstantiable):
toInstantiate.append((k,v))
# connect signals to methods named on<signalName> automatically
if k.startswith('on') and k[2:] in self._signals:
signal = getattr(self,k[2:])
signal.connect(v)
# instantiate and initialise sub-objects in the order in which they were defined
toInstantiate.sort(key=lambda a:a[1]._psl_serial)
done = []
for k,v in toInstantiate:
if hasattr(v,'args'):
o = v(*v.args,noAutoInstantiate=True)
else:
o = v(noAutoInstantiate=True)
self._registerSubObject(k,o)
if isinstance(self,QtGui.QWidget) and isinstance(o,QtGui.QWidget):
o.setParent(self)
o._up = self
done.append(o)
if recurse:
done = done[:-1] + o._autoInstantiate() + done[-1:]
setattr(self,k,o)
return done
def _registerSubObject(self,k,v):
"""
This version of _registerSubObject just passes the call up to the parent
"""
if self._up:
self._up._registerSubObject(k,v)
def _findSignals(self):
"""
Dynamically find any signals exposed by the class and keep a list of them in _signals
Used for quickly auto-connecting signals to named C{on...} event handlers. Signals found are cached in C{singalsCache}
"""
self._signals = {}
for parent in self.__class__.__bases__:
if parent not in signalsCache:
signals = {}
for k in dir(parent):
v = getattr(parent,k)
if isinstance(v,QtCore.Signal):
signals[k] = v
signalsCache[parent] = signals
self._signals.update(signalsCache[parent])
@property
def app(self):
if isinstance(self,QtGui.QApplication):
return self
else:
return self._up.app
def init(self):
""" Placeholder for user-defined initialisation functions """
pass
class BaseWidget(BaseInstantiable):
"""
Mixin class for all PySide widgets to be used with PySideLine
"""
pass
class BaseLayout(BaseInstantiable):
"""
Mixin class for all PySide layouts to be used with PySideLine
"""
def init(self,*args,**kargs):
self._up.setLayout(self)
class BaseDialog(BaseWidget):
def __init__(self,*args,**kargs):
super(BaseDialog,self).__init__(*args,**kargs)
class BaseRootInstantiable(BaseInstantiable):
def __init__(self,*args,**kargs):
self._subObjects = {}
super(BaseRootInstantiable,self).__init__(*args,**kargs)
def _registerSubObject(self,k,v):
"""
Register a child object of this object so that it appears in the app-level namespace (used as a shortcut for accessing deeply nested widgets)
"""
self._subObjects[k] = v
parent = self.parent()
if parent is not None:
parent._registerSubObject(k,v)
def __getattr__(self,k):
""" This maps all objects defined under the application to the applications namespace
It's designed to do away with long chains of attribute access like C{app.mywindow.mytabs.mytab.mygroup.mycontrol}
"""
if k in self._subObjects:
return self._subObjects[k]
else:
raise AttributeError(k)
class Application(BaseRootInstantiable,QtGui.QApplication):
"""
A main application
The Application class does a bit of contorsion to ensure that the init methods of all the classes get executed
within the main GUI thread and after the QApplication has been initialised. This makes it easier to do startup tasks that
e.g. need to popup dialogue boxes etc. on failuer.
"""
def __init__(self,*args,**kargs):
self._subObjects = {}
self._up = None
QtGui.QApplication.__init__(self,*args,**kargs)
QtCore.QTimer.singleShot(0,self._on_startup)
def _on_startup(self):
toInit = self._autoInstantiate()
for o in toInit:
o.init()
self.init()
| true |
560f2a1c8fb69a080bcf660355f592de6bcfa0fa | Python | rtSblnkv/StudHack | /database_select.py | UTF-8 | 4,135 | 2.671875 | 3 | [] | no_license | import database
def select_from(table):
try:
sqlite_connection = database.create_connection()
cursor = sqlite_connection.cursor()
cursor.execute('''SELECT * FROM {0}'''.format(table))
with sqlite_connection:
print(cursor.fetchall())
except Exception as exp:
print(exp)
finally:
database.close_connection(sqlite_connection)
def select_all():
select_from('cathedras')
select_from('students')
select_from('teachers')
select_from('science_works')
def get_cathedras():
try:
print('Get cathedras')
sqlite_connection = database.create_connection()
cursor = sqlite_connection.cursor()
cursor.execute(''' SELECT * FROM cathedras''')
with sqlite_connection:
data = cursor.fetchall()
except Exception as exp:
print(exp)
finally:
database.close_connection(sqlite_connection)
return data
def get_themes(cathedra_name):
print ('Get Themes')
try:
sqlite_connection = database.create_connection()
cursor = sqlite_connection.cursor()
cursor.execute('''SELECT
work_name,work_id,teachers.id
FROM science_works INNER JOIN teachers
ON science_works.teacher_id = teachers.id
WHERE teachers.cathedra_id = ?''',(get_cathedra_id(cathedra_name),))
with sqlite_connection:
data = cursor.fetchall()
print(data)
except Exception as exp:
print(exp)
finally:
database.close_connection(sqlite_connection)
return data
def get_cathedra_id(cathedra_name):
try:
sqlite_connection = database.create_connection()
cursor = sqlite_connection.cursor()
cursor.execute('''SELECT cathedra_id FROM cathedras where name = ? ''',(cathedra_name,))
with sqlite_connection:
data = cursor.fetchone()
except Exception as exp:
print(exp)
finally:
database.close_connection(sqlite_connection)
return data[0]
def get_teacher_themes(teacherId):
print ('Getting teacher Themes')
try:
sqlite_connection = database.create_connection()
cursor = sqlite_connection.cursor()
cursor.execute('''SELECT * FROM science_works WHERE teacher_id = ?''',(teacherId,))
with sqlite_connection:
data = cursor.fetchone()
except Exception as exp:
print(exp)
finally:
database.close_connection(sqlite_connection)
return data
def get_teacher(teacher_id):
print ('Getting teacher')
try:
sqlite_connection = database.create_connection()
cursor = sqlite_connection.cursor()
cursor.execute(''' SELECT * FROM teachers WHERE id = ?''',(teacher_id, ))
with sqlite_connection:
data = cursor.fetchone()
print(data)
except Exception as exp:
print(exp)
finally:
database.close_connection(sqlite_connection)
return data
def get_teachers_ids():
print ('Getting teacher_ids')
result = list()
try:
sqlite_connection = database.create_connection()
cursor = sqlite_connection.cursor()
cursor.execute(''' SELECT id FROM teachers ''')
with sqlite_connection:
data = cursor.fetchall()
for item in data:
result.append(item[0])
except Exception as exp:
print(exp)
finally:
database.close_connection(sqlite_connection)
return result
'''science_works
(work_id INTEGER PRIMARY KEY NOT NULL,
work_name TEXT,
teacher_id INTEGER NOT NULL,
FOREIGN KEY (teacher_id) REFERENCES teachers(teacher_id)
)'''
def get_teacher_id(work_id):
try:
sqlite_connection = database.create_connection()
cursor = sqlite_connection.cursor()
cursor.execute(''' SELECT teacher_id FROM science_works WHERE work_id = ? ''',(work_id,))
with sqlite_connection:
data = cursor.fetchone()
except Exception as exp:
print(exp)
finally:
database.close_connection(sqlite_connection)
return data[0]
| true |
39bf9d1169faa0d1b280f54faa34131c0c13ca29 | Python | urvalla/automata-task | /automata/machine_rotation.py | UTF-8 | 611 | 3.5 | 4 | [] | no_license | class MachineRotation:
TOP = 0
RIGHT = 1
BOTTOM = 2
LEFT = 3
MOVES = (
(0, 1),
(1, 0),
(0, -1),
(-1, 0),
)
def __init__(self):
self.rotation = self.RIGHT
def rotate_clockwise(self):
if self.LEFT == self.rotation:
self.rotation = self.TOP
else:
self.rotation+= 1
def rotate_counter_clockwise(self):
if self.TOP == self.rotation:
self.rotation = self.LEFT
else:
self.rotation-= 1
def forward_operator(self):
return self.MOVES[self.rotation] | true |
2521bd70059bdcd1e6b58b635ecb37ebdb26f52f | Python | prp-e/persian_normalizer | /normalizer.py | UTF-8 | 1,653 | 3.640625 | 4 | [] | no_license | import re
def fix_prefix(input_text):
'''Fixes Persian prefixes می/نمی/بی by adding a ZWNJ'''
pattern = r'\s*(ن?می)\s+'
pattern_bi = r'\s*(بی)\s+'
output_text = re.sub(pattern, ' \\1', input_text)
output_text = re.sub(pattern_bi, ' \\1', input_text)
return output_text
def fix_suffix(input_text):
'''Fixes Persian suffixes تر/ترین/ها/های by adding a ZWNJ'''
pass
def fix_english_quote(input_text):
'''Replaces english quotation marks with Persian Gioumeh'''
pass
def fix_numbers(input_text):
'''Replaces Arabic and English numbers with Persian ones'''
numerals_dic = input_text.maketrans("0123456789", "۰۱۲۳۴۵۶۷۸۹")
arabic_numerals_dic = input_text.maketrans("١٢٣٤٥٦٧٨٩٠", "۱۲۳۴۵۶۷۸۹۰")
input_text = input_text.translate(numerals_dic)
input_text = input_text.translate(arabic_numerals_dic)
return input_text
def fix_en_numbers(input_text):
'''Replaces Persian numbers with English ones in an English phrase'''
pattern = r'[a-zA-Z\-_]{2,}[۰-۹]+|[۰-۹]+[a-zA-Z\-_]{2,}'
translation_dic = input_text.maketrans("۰۱۲۳۴۵۶۷۸۹", "0123456789")
input_text = input_text.split(' ')
new_string = []
for element in input_text:
new_string.append(element.translate(translation_dic))
new_string = ' '.join(new_string)
return new_string
def fix_whole_numbers(input_text):
input_text = fix_numbers(input_text)
input_text = fix_en_numbers(input_text)
return input_text
def fix_badchars(input_text):
'''Repleaces ك and ي with ک and ی.'''
input_text = input_text.replace("ي", "ی")
input_text = input_text.replace("ك", "ک")
return input_text
| true |
c119066523edd5729362975318476e53051e3a9d | Python | grimapatroy/Python_NETACAD | /Modulo5/resumen5.1.9.11/miParte5.1.9.11_5.1.11.11/find.py | UTF-8 | 839 | 2.859375 | 3 | [] | no_license | # txt = """A variation of the ordinary lorem ipsum
# text has been used in typesetting since the 1960s
# or earlier, when it was popularized by advertisements
# for Letraset transfer sheets. It was introduced to
# the Information Age in the mid-1980s by the Aldus Corporation,
# which employed it in graphics and word-processing templates
# for its desktop publishing program PageMaker (from Wikipedia)"""
# fnd = txt.find('the')
# while fnd != -1:
# print(fnd)
# fnd = txt.find('the', fnd + 1)
# .find(cadena)
# .find(cadena,posInicial)
# .find(cadena,posInicial,NoseTieneEncuentaDuranteLabusquedad)
# print('kakalalakaka' in 'k')
# print('kappa'.find('a', 1, 4))
# print('kappa'.find('a', 2, 4))
print("tau tau tau".rfind("ta"))
print("tau tau tau".rfind("ta", 9))
print("tau 3tau tau".rfind("ta", 3, 9)) | true |
621c11a9ea0e9ae6f63649cc18283a32eb65c630 | Python | South-Paw/Python-UDP | /channel.py | UTF-8 | 5,411 | 2.9375 | 3 | [
"MIT"
] | permissive | ''' channel.py
Usage: python3 channel.py <channel_sender_in> <channel_sender_out> <channel_receiver_in> <channel_receiver_out> <sender_in> <receiver_out> <drop_rate> <hash>
Example: python3 channel.py 5006 5005 5007 5008 5004 5001 0.01 somehash
Authors: James Paterson & Alex Gabites
Date: August 2015
'''
import socket
import select
import random
import sys
UDP_IP = "127.0.0.1"
class channel:
def __init__(self):
# Random number
self.random_num_gen = random
# Get required arguments
self.grab_args()
# Create and bind the ports
self.try_bind()
# Status report
print("Channel ONLINE")
# Stay alive
self.channel_alive()
def grab_args(self):
# Checking port numbers
for arg in sys.argv[1:7]:
if not arg.isdigit():
print("Incorrect arguments!")
print("Try: 'python3 channel.py channel_sender_in channel_sender_out channel_receiver_in channel_receiver_out sender_in receiver_in drop_rate hash'")
exit(1)
elif not(1024 < int(arg) < 64001):
print("Invalid port number!")
print("'{}' should be within the range of 1025 - 64000".format(arg))
exit(1)
self.channel_sender_in = int(sys.argv[1])
self.channel_sender_out = int(sys.argv[2])
self.channel_receiver_in = int(sys.argv[3])
self.channel_receiver_out = int(sys.argv[4])
self.sender_in = int(sys.argv[5])
self.receiver_in = int(sys.argv[6])
# Check drop rate is within range
try:
self.drop_rate = float(sys.argv[7])
if not 0 <= self.drop_rate < 1:
raise ValueError("Drop rate value out of bounds!")
except:
print("Drop rate must be a value in the range 0 - 1")
exit(1)
# Check theres no port conflicts
self.port_list = [self.channel_sender_in, self.channel_sender_out, self.channel_receiver_in, self.channel_receiver_out, self.sender_in, self.receiver_in]
for i in range(5):
if self.port_list[i] in self.port_list[i+1:]:
print("channel.py Error!")
print("Port collision: {} is used twice.".format(self.port_list[i]))
exit(1)
# Create our pseudo random number using the hash given
if len(sys.argv) > 7:
self.random_seed = "".join(sys.argv[7:])
self.random_num_gen.seed(self.random_seed)
def try_bind(self):
# Try channel_sender_in socket
try:
self.sock_sender_in = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock_sender_in.bind((UDP_IP, self.channel_sender_in))
self.sock_sender_in_id = self.sock_sender_in.fileno()
except OSError:
print("channel.py Error!")
print("Port: {} is already in use.".format(self.channel_sender_in))
print("Exiting...")
exit(1)
# Try channel_sender_out socket
try:
self.sock_sender_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock_sender_out.bind((UDP_IP, self.channel_sender_out))
self.sock_sender_out_id = self.sock_sender_out.fileno()
except OSError:
print("channel.py Error!")
print("Port: {} is already in use.".format(self.channel_sender_out))
print("Exiting...")
exit(1)
# Try channel_receiver_in socket
try:
self.sock_receiver_in = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock_receiver_in.bind((UDP_IP, self.channel_receiver_in))
self.sock_receiver_in_id = self.sock_receiver_in.fileno()
except OSError:
print("channel.py Error!")
print("Port: {} is already in use.".format(self.channel_receiver_in))
print("Exiting...")
exit(1)
# Try channel_receiver_out socket
try:
self.sock_receiver_out = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock_receiver_out.bind((UDP_IP, self.channel_receiver_out))
self.sock_receiver_out_id = self.sock_receiver_out.fileno()
except OSError:
print("channel.py Error!")
print("Port: {} is already in use.".format(self.channel_receiver_out))
print("Exiting...")
exit(1)
def channel_alive(self):
self.to_sender = (UDP_IP, self.sender_in)
self.to_receiver = (UDP_IP, self.receiver_in)
self.selection = select
while True:
(self.s_read, self.s_write, self.s_except) = self.selection.select([self.sock_sender_in_id, self.sock_receiver_in_id], [], [])
(self.s_read1, self.s_write, self.s_except1) = self.selection.select([], [self.sock_sender_out_id, self.sock_receiver_out_id], [], 0)
for fd in self.s_read:
if (fd == self.sock_sender_in_id):
self.data, self.source_address = self.sock_sender_in.recvfrom(1024)
if (self.random_num_gen.random() > self.drop_rate and self.sock_receiver_out_id in self.s_write):
self.sock_receiver_out.sendto(self.data, self.to_receiver)
# Operations debug
print("Message received from {}:{}".format(self.source_address[0], self.source_address[1]))
print("Passed on to {}:{}\n".format(self.to_receiver[0], self.to_receiver[1]))
else:
print("Dropped packet from: {}\n".format(self.source_address))
if (fd == self.sock_receiver_in_id and self.sock_sender_out_id in self.s_write):
self.data, self.source_address = self.sock_receiver_in.recvfrom(1024)
if (self.random_num_gen.random() > self.drop_rate):
self.sock_sender_out.sendto(self.data,self.to_sender)
# Operations debug
print("Message received from {}:{}".format(self.source_address[0], self.source_address[1]))
print("Passed on to {}:{}\n".format(self.to_sender[0], self.to_sender[1]))
else:
print("Dropped packet from: {}\n".format(self.source_address))
# Run!
if __name__ == "__main__":
channel()
| true |
b17fcf6a7eff2185603cec48984d6f9a8d4de85b | Python | PankajMehar/senior-data-science | /3rd-place/code/src/feature_extraction_v17.py | UTF-8 | 4,822 | 2.6875 | 3 | [] | no_license | from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from visualise_data import Sequence
import numpy as np
import os
import pandas as pd
import scipy.stats.stats as st
import warnings
warnings.filterwarnings('ignore')
def energy(arr):
"""
Energy measure. Sum of the squares divided by the number of values.
:param arr:
:return: float
"""
return np.sum(np.power(arr,2))/len(arr)
def mad(arr):
"""
Median Absolute Deviation: https://en.wikipedia.org/wiki/Median_absolute_deviation
http://stackoverflow.com/questions/8930370/where-can-i-find-mad-mean-absolute-deviation-in-scipy
:param arr:
:return: float
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
def iqr(arr):
"""
Interquartile Range: http://stackoverflow.com/questions/23228244/how-do-you-find-the-iqr-in-numpy
:param arr:
:return:
"""
q75, q25 = np.percentile(arr, [75 ,25])
return q75 - q25
# We will want to keep track of the feature names for later, so we will collect these in the following list:
column_names = []
# These are the modalities that are available in the dataset, and the .iterate() function returns the data
# in this order
modality_names = ['acceleration', 'rssi', 'pir', 'video_living_room', 'video_kitchen', 'video_hallway']
#acceleration feature
components = 6
feature_pca = ['pca_%d' % i for i in range(components)]
#column_names = feature_pca
feature_functions = [np.mean, np.std, np.min, np.median, np.max, np.var, st.skew, st.kurtosis,
st.sem,st.moment,iqr, mad, energy,np.linalg.norm]
feature_names = ['mean', 'std', 'min', 'median', 'max', 'var', 'skew', 'kur',
'sem', 'moment','iqr','mad','energy','mag']
feature_names = map(lambda x: "resample_%s" % x, feature_names)
num_ff = len(feature_names)
"""
Iterate over all training directories
"""
for train_test in ('train', 'test',):
if train_test is 'train':
print ('Extracting features from training data.\n')
else:
print ('\n\n\nExtracting features from testing data.\n')
for fi, file_id in enumerate(os.listdir('../input/public_data/{}/'.format(train_test))):
stub_name = str(file_id).zfill(5)
column_names =[]
if train_test is 'train' or np.mod(fi, 50) == 0:
print ("Starting feature extraction for {}/{}".format(train_test, stub_name))
features = pd.read_csv(os.path.join('../input/public_data/{}/{}'.format(train_test, stub_name), 'columns_v5.csv'))
features = features.fillna(-9999)
#print data.shape
data_normalized = normalize(features.values)
pca = PCA(n_components=components)
data_x_projected_pca = pca.fit_transform(features)
data = Sequence('../input/public_data', '../input/public_data/{}/{}'.format(train_test, stub_name))
data.load()
if len(column_names) == 0:
for lu, modalities in data.iterate():
for i, (modality, modality_name) in enumerate(zip(modalities, modality_names)):
for column_name, column_data in modality.transpose().iterrows():
for feature_name in feature_names:
column_names.append('{0}_{1}_{2}'.format(modality_name, column_name, feature_name))
# Break here
break
column_names.extend(feature_pca)
rows = []
for ri, (lu, modalities) in enumerate(data.iterate()):
row = []
for i, modality in enumerate(modalities):
modality = modality[0:components]
for name, column_data in modality.transpose().iterrows():
if len(column_data) > 3:
row.extend(map(lambda ff: ff(column_data), feature_functions))
else:
row.extend([np.nan] * num_ff)
rows.append(row)
# Report progress
if train_test is 'train':
if np.mod(ri + 1, 50) == 0:
print ("{:5}".format(str(ri + 1))),
if np.mod(ri + 1, 500) == 0:
print
data = np.hstack((np.array(rows), data_x_projected_pca))
df = pd.DataFrame(data)
#print df.head(4)
assert df.shape[1] == len(column_names)
df.columns = column_names
df.to_csv('../input/public_data/{}/{}/columns_v17.csv'.format(train_test, stub_name),
index=False) # if train_test is 'train' or np.mod(fi, 50) == 0:
if train_test is 'train': print
print ("Finished feature extraction for {}/{}\n".format(train_test, stub_name))
#break
| true |
73e7a39de5c837425a92db19fa1a39e1641bde2b | Python | WOWOStudio/Python_test | /Xin/spider/frameTest.py | UTF-8 | 948 | 3.1875 | 3 | [] | no_license | import scrapy
# 定义一个类叫做 TitleSpider 继承自 scrapy.Spider
class TitleSpider(scrapy.Spider):
name = 'title-spider'
# 设定开始爬取的页面
start_urls = ['https://www.appinn.com/category/windows/']
def parse(self, response):
# 找到所有 article 标签
for article in response.css('article'):
# 解析 article 下面 a 标签里的链接和标题
a = article.css('h2.title a')
if a:
result = {
'title': a.attrib['title'],
'url': a.attrib['href'],
}
# 得到结果
yield result
# 解析下一页的链接
next_page = response.css('a.next::attr(href)').get()
if next_page is not None:
# 开始爬下一页,使用 parse 方法解析
yield response.follow(next_page, self.parse)
| true |
fa39553a20c708c8ce9cfa67db2def1cc07b30ec | Python | cameronkelahan/AstroResearch | /kerasDatasetSelection.py | UTF-8 | 16,799 | 2.6875 | 3 | [] | no_license | import numpy as np
import random
import sys
from keras.callbacks import ModelCheckpoint, CSVLogger, LambdaCallback
from numpy import genfromtxt
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import load_model
from sklearn import metrics
from sklearn.model_selection import train_test_split
import time
from playsound import playsound
# from multiDataPreprocessing import processTestData
import argparse
# This program uses the old strategy of running through 1000 randomly subsampled data sets and picks the model which
# performs best on its validation set
# def parseArguments():
# parser = argparse.ArgumentParser(
# description='Build a Keras model for Image classification')
#
# parser.add_argument('--training_x', action='store',
# dest='XFile', default="", required=True,
# help='matrix of training images in npy')
# parser.add_argument('--training_y', action='store',
# dest='yFile', default="", required=True,
# help='labels for training set')
#
# parser.add_argument('--outModelFile', action='store',
# dest='outModelFile', default="", required=True,
# help='model name for your Keras model')
#
# return parser.parse_args()
def print_stats(epoch, logs):
if (epoch + 1) % 100 == 0:
print(epoch+1, logs['val_acc'])
def main():
############################################ Read In Data From File ################################################
# Read in column 0 from Table1 for the name of the galaxy
galaxyName = genfromtxt('Paper2Table1.csv', delimiter=',', skip_header=1, dtype=None, encoding="utf8", usecols=0)
# Read in Column 6 from Table1 (Maser Classification)
maserType = genfromtxt('Paper2Table1.csv', delimiter=',', skip_header=1, dtype=None, encoding="utf8", usecols=6)
# Read in L12 from Table1
L12 = genfromtxt('Paper2Table1.csv', delimiter=',', skip_header=1, dtype=None, encoding="utf8", usecols=7)
# Read in Lobs from Table2
Lobs = genfromtxt('Paper2Table2.csv', delimiter=',', skip_header=1, dtype=None, encoding="utf8", usecols=4)
########################################## Normalize the Data ######################################################
# Normalize L12
maxValL12 = np.amax(L12)
minValL12 = np.amin(L12)
countL12 = 0
for value in L12:
L12[countL12] = (value - minValL12) / (maxValL12 - minValL12)
countL12 += 1
# Normalize Lobs
maxValLobs = np.amax(Lobs)
minValLobs = np.amin(Lobs)
countLobs = 0
for value in Lobs:
Lobs[countLobs] = (value - minValLobs) / (maxValLobs - minValLobs)
countLobs += 1
########################################## Reshape the Data Matrix #################################################
# Currently, the shape of the data matrix is flipped
# Reshape the data matrix to have 2 columns, one for each attribute
# and as many rows as there are examples (galaxies)
data = []
count = 0
for value in L12:
data.append([L12[count], Lobs[count]])
count += 1
if len(data) == 642 and len(data[0]) == 2 and len(maserType) == 642:
print("Data loaded properly")
else:
exit("Data loaded improperly")
print("Length of data = ", len(data))
print("Length of Data[0]", len(data[0]))
print("Length of MaserType[] = ", len(maserType))
########################################## Sort the Masers from the Non-Masers #####################################
# Sort out the masers and non masers for selection of training data
# Change all non-zero values of maser classification to 1 for easy binary classification
# Create a list of all non-masers and masers
masers = []
nonMasers = []
count = 0
# This is the number of masers; will be used to know how many non-masers to choose for the training data
maserCount = 0
for value in maserType:
if value > 0:
maserType[count] = 1
maserCount += 1
masers.append(data[count])
count += 1
else:
nonMasers.append(data[count])
count += 1
if len(masers) == 68 and len(nonMasers) == 574:
print("Total Masers and NonMasers Separated Correctly")
print("Number of Total Maser Galaxies = ", len(masers))
print("Number of Total NonMaser Galaxies = ", len(nonMasers))
else:
exit("Maser and NonMaser Separation Error")
####################################################################################################################
######################################## Perform Undersampling of NonMaser Data ####################################
# Create a random sampling of training data from the nonMaser list
# Creates a data range the size of the nonMaser dataset for undersampling purposes
upperBound = len(nonMasers)
dataRange = range(0, upperBound)
######################################## Outer Loop: Choosing Random Data ##########################################
# Chooses random data numOfIterations times to perform KNN analysis and Stratified Validation
# kAverages = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# # Used to graph accuracy of each k value; k = 1-15
# f1Averages = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# # Used to graph f1 score of each k value; k = 1-15
# kAverages = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# # Used to graph accuracy of each k value; k = 1-30
# f1Averages = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# # Used to graph f1 score of each k value; k = 1-30
numOfIterations = 1000
print("Number of Iterations This Run = ", numOfIterations)
dataIterations = range(0, numOfIterations)
bestAcc = 0
bestF1Score = 0
bestTrainSetX = []
bestTrainSetY = []
bestValidationSetX = []
bestValidationSetY = []
bestTestSetX = []
bestTestSetY = []
# bestKVal = 0
bestXDataSet = []
for num in dataIterations:
if (num % (numOfIterations / 10)) == 0:
print("Iteration Number ", num)
# Choose k number of random nonMaser galaxies where k = number of Maser galaxies
chosen = random.sample(dataRange, k=maserCount)
# Build the X dataset for use in the neural network training based on the randomly selected nonMaser galaxies
# ALTERNATE adding maser, non-maser to X data set
X = []
# Create the class value list to go with the data set for accuracy testing
Class = []
count = 0
for value in chosen:
X.append(nonMasers[value])
Class.append(0)
X.append((masers[count]))
Class.append(1)
count += 1
# print(X)
# print(Class)
#################################### Testing the Neural network Model ##########################################
# Implements Stratified Test Set Validation to test accuracy of KNN model
# Creates a random selection of Train and Test data
# Test data is 20%, Train data is 80%
randNum = random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(X, Class, test_size=0.2, random_state=randNum)
# Creates a random selection of Train and Validation data
# Validation is 25% of Train data, which is 20% of the total data
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, random_state=randNum)
# Create lists used to store all the masers and nonmasers from the training and validation set
# Used for graphing the training and validation set for visualization
trainMaserListX = []
trainMaserListY = []
trainNonMaserListX = []
trainNonMaserListY = []
valMaserListX = []
valMaserListY = []
valNonMaserListX = []
valNonMaserListY = []
# Add masers from training set to the training maser list
# Add nonmasers from the training set to the training nonmaser list
count = 0
for value in y_train:
if value == 0:
for val in X_train[count]:
trainNonMaserListX.append(X_train[count][0])
trainNonMaserListY.append((X_train[count][1]))
else:
for val in X_train[count]:
trainMaserListX.append(X_train[count][0])
trainMaserListY.append((X_train[count][1]))
count += 1
# Add masers from validation set to the validation maser list
# Add nonmasers from the validation set to the validation nonmaser list
count = 0
for value in y_val:
if value == 0:
for val in X_val[count]:
valNonMaserListX.append(X_val[count][0])
valNonMaserListY.append(X_val[count][1])
else:
for val in X_val[count]:
valMaserListX.append(X_val[count][0])
valMaserListY.append(X_val[count][1])
count += 1
# ######## Create the Large 6 Layer NN model to train
# num_epochs = 550
#
# model = Sequential()
# model.add(Dense(10, input_dim=2, activation='relu'))
# model.add(Dense(10, activation='relu'))
# model.add(Dense(20, activation='relu'))
# model.add(Dense(50, activation='relu'))
# model.add(Dense(17, activation='relu'))
# model.add(Dense(1, activation='sigmoid'))
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
######## Create the 3 Layer NN model to train
num_epochs = 1000
model = Sequential()
model.add(Dense(5, input_dim=2, activation='relu'))
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# ######## Create the 4 Layer NN model to train
# num_epochs = 1000
#
# model = Sequential()
# model.add(Dense(6, input_dim=2, activation='relu'))
# model.add(Dense(4, activation='relu'))
# model.add(Dense(2, activation='relu'))
# model.add(Dense(1, activation='sigmoid'))
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# ######## Create the Small 6 Layer NN model to train
# num_epochs = 550
#
# model = Sequential()
# model.add(Dense(6, input_dim=2, activation='relu'))
# model.add(Dense(5, activation='relu'))
# model.add(Dense(4, activation='relu'))
# model.add(Dense(3, activation='relu'))
# model.add(Dense(2, activation='relu'))
# model.add(Dense(1, activation='sigmoid'))
# model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model on the training set and predict the labels of the X-validation set
model.fit(np.array(X_train), y_train, epochs=num_epochs, batch_size=68,
validation_data=(np.array(X_val), y_val), verbose=0)
yValPred = model.predict_classes(np.array(X_val))
# print("yValPred: ", yValPred)
# print("Type of yValPred: ", type(yValPred))
# Compute the accuracy of the predicted values
sklearn_acc = metrics.accuracy_score(y_val, yValPred)
# print('accuracy from sklearn is:', sklearn_acc)
# # Compute the f1 score of the predicted values
f1 = metrics.f1_score(y_val, yValPred)
# print('f1 is:', f1)
# If the F1 score of this k value and training set is better than any other previous f1 value,
# store the accuracy, f1 score, training set, test set, and k value for use later
# Will be used to rebuild this exact model to test the test data on
if f1 > bestF1Score:
bestF1Score = f1
bestAcc = sklearn_acc
bestTrainSetX = X_train
bestTrainSetY = y_train
bestTestSetX = X_test
bestTestSetY = y_test
# model.save('bestNNModel.h5')
model.save('./DataSetNN_3Layer/bestNNModel.h5')
bestXDataSet = X
# print("bestTrainSetX = ", bestTrainSetX)
# testLarge6LayerModel = load_model('./DatasetNN_6LayerLarge/bestNNModel.h5')
# testSmall6LayerModel = load_model('./DatasetNN_6LayerSmall/bestNNModel.h5')
# test4LayerModel = load_model('./DataSetNN_4Layer/bestNNModel.h5')
test3LayerModel = load_model('./DataSetNN_3Layer/bestNNModel.h5')
testYPred = test3LayerModel.predict_classes(np.array(bestTestSetX))
testAcc = metrics.accuracy_score(bestTestSetY, testYPred)
testF1 = metrics.f1_score(bestTestSetY, testYPred)
f = open('./DataSetNN_3Layer/bestNNDataStats.txt', 'w')
# f = open('./DataSetNN_4Layer/bestNNDataStats.txt', 'w')
# f = open('./DataSetNN_6LayerLarge/bestNNDataStats.txt', 'w')
# f = open('./DataSetNN_6LayerSmall/bestNNDataStats.txt', 'w')
f.write("Best NN Training Accuracy = %.3f\n" % bestAcc)
f.write("Best NN Training F1 Score = %.3f\n" % bestF1Score)
f.write("Best Unweighted Test Accuracy = %.3f\n" % testAcc)
f.write("Best Unweighted Test F1 Score = %.3f" % testF1)
# f.write("Number of Iterations of Undersampled NonMaser Datasets = " % numOfIterations)
f.close()
# Constant names for the long string names of the saved .npy files for the 3 layer model
xNNDataTrain = './DataSetNN_3Layer/DataTrainingXNN'
yNNDataTrain = './DataSetNN_3Layer/DataTrainingYNN'
xNNDataVal = './DataSetNN_3Layer/DataValidationXNN'
yNNDataVal = './DataSetNN_3Layer/DataValidationYNN'
xNNDataTest = './DataSetNN_3Layer/DataTestXNN'
yNNDataTest = './DataSetNN_3Layer/DataTestYNN'
bestNNDataStats = './DataSetNN_3Layer/BestNNDataSetStats'
bestXDataSetStr = './DataSetNN_3Layer/BestXDataSet'
# # Constant names for the long string names of the saved .npy files for the 4 layer model
# xNNDataTrain = './DataSetNN_4Layer/DataTrainingXNN'
# yNNDataTrain = './DataSetNN_4Layer/DataTrainingYNN'
# xNNDataVal = './DataSetNN_4Layer/DataValidationXNN'
# yNNDataVal = './DataSetNN_4Layer/DataValidationYNN'
# xNNDataTest = './DataSetNN_4Layer/DataTestXNN'
# yNNDataTest = './DataSetNN_4Layer/DataTestYNN'
# bestNNDataStats = './DataSetNN_4Layer/BestNNDataSetStats'
# bestXDataSetStr = './DataSetNN_4Layer/BestXDataSet'
# # Constant names for the long string names of the saved .npy files for the 6 layer large model
# xNNDataTrain = './DataSetNN_6LayerLarge/DataTrainingXNN'
# yNNDataTrain = './DataSetNN_6LayerLarge/DataTrainingYNN'
# xNNDataVal = './DataSetNN_6LayerLarge/DataValidationXNN'
# yNNDataVal = './DataSetNN_6LayerLarge/DataValidationYNN'
# xNNDataTest = './DataSetNN_6LayerLarge/DataTestXNN'
# yNNDataTest = './DataSetNN_6LayerLarge/DataTestYNN'
# bestNNDataStats = './DataSetNN_6LayerLarge/BestNNDataSetStats'
# bestXDataSetStr = './DataSetNN_6LayerLarge/BestXDataSet'
# # Constant names for the long string names of the saved .npy files for the 6 layer small model
# xNNDataTrain = './DataSetNN_6LayerSmall/DataTrainingXNN'
# yNNDataTrain = './DataSetNN_6LayerSmall/DataTrainingYNN'
# xNNDataVal = './DataSetNN_6LayerSmall/DataValidationXNN'
# yNNDataVal = './DataSetNN_6LayerSmall/DataValidationYNN'
# xNNDataTest = './DataSetNN_6LayerSmall/DataTestXNN'
# yNNDataTest = './DataSetNN_6LayerSmall/DataTestYNN'
# bestNNDataStats = './DataSetNN_6LayerSmall/BestNNDataSetStats'
# bestXDataSetStr = './DataSetNN_6LayerSmall/BestXDataSet'
np.save(xNNDataTrain, bestTrainSetX)
np.save(yNNDataTrain, bestTrainSetY)
np.save(xNNDataVal, X_val)
np.save(yNNDataVal, y_val)
np.save(xNNDataTest, bestTestSetX)
np.save(yNNDataTest, bestTestSetY)
np.save(bestXDataSetStr, bestXDataSet)
print("Number of Iterations = ", numOfIterations)
print("Best Training Accuracy = ", bestAcc)
print("Best Training F1 Score = ", bestF1Score)
print("Test Accuracy = ", testAcc)
print("Test F1 Score = ", testF1)
# #### Play a fun sound to alert me to it being done
# playsound('collect_coin_8bit.mp3')
# playsound('collect_coin_8bit.mp3')
# playsound('collect_coin_8bit.mp3')
if __name__ == '__main__':
main()
| true |
37a1f863334b84d0dfd1cb53e101384195e85183 | Python | mtouzot/PapibotPi | /papibot.py | UTF-8 | 1,125 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from database import *
from twitterBot import *
import random
import csv
import time
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def papibotPi():
"""Programme principal du bot Twitter PapiBot Revillon"""
print("Connecting to the database")
dbInfo = dict()
with open('connexionBDD.json') as db:
dbInfo = json.load(db)
connexion = connect(dbInfo)
print("Database connected")
print("Connecting to Twitter")
with open('twitterKeys.json') as userKeys:
keys = json.load(userKeys)
print("Connected to @PapibotRevillon")
authorCount = getAuthorsCount(connexion)
quoteCount = getQuotesCount(connexion)
quoteID = random.randrange(1,quoteCount)
quote = getQuoteInfosFromID(connexion, quoteID)
author = getAuthorInfosFromID(connexion, quote["AUTH_ID"])
print("The following message will be tweeted :")
message = formatAuthor(author) + "\n\"" + quote["QUOTE"] + "\""
print(message)
tweet(keys,message)
connexion.close()
if __name__ == "__main__":
papibotPi()
| true |
75b326cff820938b4a6b0abed76810957f2c093a | Python | MJK0211/bit_seoul | /keras/keras35_CNN1_maxpooling.py | UTF-8 | 2,655 | 2.890625 | 3 | [] | no_license | #Maxpooling & Flatten
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten #MaxPooling2D 추가, Flatten 추가
#1. 데이터
#2. 모델구성
model = Sequential()
model.add(Conv2D(10, (2,2), input_shape=(10,10,1))) #(9,9,10)
model.add(Conv2D(5, (2,2), padding='same')) #(9,9,5)
model.add(Conv2D(3, (3,3), padding='valid')) #(7,7,3)
model.add(Conv2D(7, (2,2))) #(6,6,7)
model.add(MaxPooling2D()) #기본 Default는 2이다 - (3,3,7)
model.add(Flatten()) #현재까지 내려왔던 것을 일자로 펴주는 기능 - 이차원으로 변경 (3*3*7 = 63,) = (63,) 다음 Dense층과 연결시키기 위해 사용
model.add(Dense(1))
model.summary()
# Model: "sequential"
# _________________________________________________________________
# Layer (type) Output Shape Param # -> (input값 * 커널사이즈 + bias) * 다음 출력 갯수
# =================================================================
# conv2d (Conv2D) (None, 9, 9, 10) 50 -> (2*2)커널 * 1(흑백채널) * 1(장)(최초입력) * 10(장) + 10(bias) = 4*1*10 + 10
# _________________________________________________________________
# conv2d_1 (Conv2D) (None, 9, 9, 5) 205 -> (2*2)커널 * 1(흑백채널) * 10(장)(conv2d_입력) * 5(장)(출력) + 5(bias) = 4*1*10*5 + 5
# _________________________________________________________________
# conv2d_2 (Conv2D) (None, 7, 7, 3) 138 -> (3*3)커널 * 1(흑백채널) * 5(장)(conv2d_1_입력) * 3(장)(출력) + 3(bias) = 9*1*5*3 + 3
# _________________________________________________________________
# conv2d_3 (Conv2D) (None, 6, 6, 7) 91 -> (2*2)커널 * 1(흑백채널) * 3(장)(conv2d_2_입력) * 7(장)(출력) + 7(bias) = 4*1*3*7 + 7
# _________________________________________________________________
# max_pooling2d (MaxPooling2D) (None, 3, 3, 7) 0 -> Default 2 (2개씩 나눔)
# _________________________________________________________________
# flatten (Flatten) (None, 63) 0 -> 이차원으로 변경 (3*3*7 = 63) - (63,)
# _________________________________________________________________
# dense (Dense) (None, 1) 64 -> (63+1)*1
# =================================================================
# Total params: 548
# Trainable params: 548
# Non-trainable params: 0
# _________________________________________________________________
#3. 컴파일, 훈련
#4. 평가, 예측 | true |
c176eff6381392a8c3de1bf8e7bc372fdd19315f | Python | RaviAB/advent_of_code_2020 | /solutions/day12/solution.py | UTF-8 | 6,636 | 2.953125 | 3 | [] | no_license | from __future__ import annotations
import re
from dataclasses import dataclass
from enum import Enum, unique
from functools import reduce, singledispatch
from typing import Dict, Iterable, Tuple, Union
INSTRUCTION_PATTERN = r"([A-Z])([0-9]+)"
TEST_INPUT = """\
F10
N3
F7
R90
F11
"""
class Direction(Enum):
NORTH = "N"
SOUTH = "S"
EAST = "E"
WEST = "W"
NONE = None
@unique
class Action(Enum):
LEFT = "L"
RIGHT = "R"
FORWARD = "F"
Operation = Union[Direction, Action]
@dataclass
class Instruction:
operation: Operation
magnitude: int
@dataclass
class State:
direction: Direction
position: Tuple[int, int]
def __str__(self):
return f"{self.direction} {self.position}"
MOVEMENT_ORDERING = [
Direction.NORTH,
Direction.EAST,
Direction.SOUTH,
Direction.WEST,
]
MOVEMENT_MAPPING: Dict[Direction, Tuple[int, int]] = {
Direction.EAST: (1, 0),
Direction.SOUTH: (0, -1),
Direction.WEST: (-1, 0),
Direction.NORTH: (0, 1),
}
def scale_tuple(initial_tuple: Tuple[int, int], scale: int) -> Tuple[int, int]:
return (initial_tuple[0] * scale, initial_tuple[1] * scale)
def add_tuples(tuple1: Tuple[int, int], tuple2: Tuple[int, int]) -> Tuple[int, int]:
return (tuple1[0] + tuple2[0], tuple1[1] + tuple2[1])
def get_input() -> Iterable[str]:
with open("input.txt") as file_handle:
yield from file_handle.read().splitlines()
def get_test_input() -> Iterable[str]:
return iter(TEST_INPUT.splitlines())
def get_operation(operation_str: str) -> Operation:
try:
return Action(operation_str)
except ValueError:
return Direction(operation_str)
def get_instructions(input_lines: Iterable[str]) -> Iterable[Instruction]:
for line in input_lines:
if match := re.match(INSTRUCTION_PATTERN, line):
operation_str, magnitude = match.groups()
yield Instruction(get_operation(operation_str), int(magnitude))
else:
raise AssertionError(f"Cannot match line: '{line}'")
@singledispatch
def do_operation(
_operation: Operation, _magnitude: int, _initial_state: State
) -> State:
pass
@do_operation.register
def do_direction(direction: Direction, magnitude: int, initial_state: State) -> State:
movement_vector = MOVEMENT_MAPPING[direction]
displacement_vector = (
magnitude * movement_vector[0],
magnitude * movement_vector[1],
)
return State(
initial_state.direction,
(
initial_state.position[0] + displacement_vector[0],
initial_state.position[1] + displacement_vector[1],
),
)
@do_operation.register
def do_action(action: Action, magnitude: int, initial_state: State) -> State:
if action == Action.FORWARD:
movement_vector = MOVEMENT_MAPPING[initial_state.direction]
displacement_vector = (
magnitude * movement_vector[0],
magnitude * movement_vector[1],
)
return State(
initial_state.direction,
(
initial_state.position[0] + displacement_vector[0],
initial_state.position[1] + displacement_vector[1],
),
)
if action in (action.LEFT, action.RIGHT):
rotation_direction = 1 if action == action.RIGHT else -1
current_direction_index = MOVEMENT_ORDERING.index(initial_state.direction)
new_direction_index = (
current_direction_index + (magnitude // 90) * rotation_direction
) % len(MOVEMENT_ORDERING)
return State(MOVEMENT_ORDERING[new_direction_index], initial_state.position)
raise AssertionError
def _do_operation(initial_state: State, instruction: Instruction) -> State:
state = do_operation(instruction.operation, instruction.magnitude, initial_state)
return state
def get_final_state(initial_state: State, instructions: Iterable[Instruction]) -> State:
return reduce(_do_operation, instructions, initial_state)
INITIAL_STATE = State(Direction.EAST, (0, 0))
INSTRUCTIONS = get_instructions(get_input())
FINAL_STATE = get_final_state(INITIAL_STATE, INSTRUCTIONS)
print(FINAL_STATE)
print(abs(FINAL_STATE.position[0]) + abs(FINAL_STATE.position[1]))
print()
@dataclass
class States:
boat_state: State
wp_state: State
@singledispatch
def do_operation_wp(_operation: Operation, _magnitude: int, _states: States) -> States:
pass
@do_operation_wp.register
def do_direction_wp(direction: Direction, magnitude: int, states: States) -> States:
movement_vector = MOVEMENT_MAPPING[direction]
displacement_vector = scale_tuple(movement_vector, magnitude)
new_wp_state = State(
states.wp_state.direction,
add_tuples(states.wp_state.position, displacement_vector),
)
return States(states.boat_state, new_wp_state)
@do_operation_wp.register
def do_action_wp(action: Action, magnitude: int, states: States) -> States:
if action == Action.FORWARD:
boat_displacement = scale_tuple(states.wp_state.position, magnitude)
new_boat_state = State(
states.boat_state.direction,
add_tuples(states.boat_state.position, boat_displacement),
)
return States(new_boat_state, states.wp_state)
if action in (action.LEFT, action.RIGHT):
rotation_functions = [
lambda x, y: (x, y),
lambda x, y: (y, -x),
lambda x, y: (-x, -y),
lambda x, y: (-y, x),
]
rotation_direction = 1 if action == action.RIGHT else -1
rotation_index = ((magnitude // 90) * rotation_direction) % len(
rotation_functions
)
new_wp_state = State(
states.wp_state.direction,
rotation_functions[rotation_index](*states.wp_state.position),
)
return States(states.boat_state, new_wp_state)
raise AssertionError
def _do_operation_wp(states: States, instruction: Instruction) -> States:
return do_operation_wp(instruction.operation, instruction.magnitude, states)
def get_final_state_wp(
initial_states: States, instructions: Iterable[Instruction]
) -> States:
return reduce(_do_operation_wp, instructions, initial_states)
INSTRUCTIONS = get_instructions(get_input())
INITIAL_STATE = State(Direction.NONE, (0, 0))
INITIAL_WP_STATE = State(Direction.NONE, (10, 1))
FINAL_BOAT_STATES = get_final_state_wp(
States(INITIAL_STATE, INITIAL_WP_STATE), INSTRUCTIONS
)
print(FINAL_BOAT_STATES)
print(
abs(FINAL_BOAT_STATES.boat_state.position[0])
+ abs(FINAL_BOAT_STATES.boat_state.position[1])
)
| true |
8baa8dde548305574ef7595eeb81165b275a631e | Python | chomskim/OSS | /pgzero-timer.py | UTF-8 | 391 | 3 | 3 | [] | no_license | import pgzrun
timer = 0
frame_count = 0
def update(dt):
global timer
global frame_count
timer += dt
frame_count += 1
def draw():
screen.clear()
screen.draw.text(f"Time passed: {timer} frame count:{frame_count}", (0, 0))
if timer > 5:
print("FPS: ", frame_count/timer)
screen.draw.textbox("Time's up!", Rect(50, 50, 200, 200))
pgzrun.go() | true |
812b9d2621deaa4292cd3e6744b21287da5ad4f2 | Python | STR-HK/BaekJoon | /2884.py | UTF-8 | 133 | 3.28125 | 3 | [] | no_license | inp = input().split(' ')
H = int(inp[0])
M = int(inp[1])
M -= 45
if M < 0:
H -= 1
M += 60
if H < 0:
H = 23
print(H, M) | true |
69235d5645f960882426c95f5c1ba04c84296feb | Python | ahmad-dci/python_TDD | /2-exercise1/tests/test_string/test_tools.py | UTF-8 | 349 | 2.546875 | 3 | [] | no_license | from text.tools import upper_case, revers
def test_upper_case1():
assert upper_case("python") == "PYTHON"
# why this will failed
def test_upper_case2():
assert upper_case("lion") == "LOIN"
def test_revers1():
assert revers("lion") == "noil"
# why this test will failed
def test_revers2():
assert revers("LION") == "noil" | true |
ded5f37b5b6ce8e7022bd4b3784fcb45ac93803e | Python | Kelta-King/OpenCV-learning | /set_properties_on_camera.py | UTF-8 | 661 | 3.171875 | 3 | [] | no_license | import cv2 as c
cap = c.VideoCapture(0)
# With cap.get methods, different values of camera can be accessed
print(cap.get(c.CAP_PROP_FRAME_WIDTH))
print(cap.get(c.CAP_PROP_FRAME_HEIGHT))
# Setting values using set methods
# This values will be set to the possible values. If values are very high then it will set maximum value
cap.set(c.CAP_PROP_FRAME_WIDTH, 1208)
cap.set(c.CAP_PROP_FRAME_HEIGHT, 720)
# Video reading and showing
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
c.imshow('image', frame)
if c.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
c.destroyAllWindows()
| true |
cfafeefc0e2aef0b6d64eb45dabc46b28bd21dc1 | Python | thanhchauns2/Python_PTIT | /MaHoa3.py | UTF-8 | 755 | 2.9375 | 3 | [] | no_license | N = int(input())
def solution():
s = input()
a = ""
b = ""
c = (int)((int)(len(s)) / 2)
ttl1 = 0
ttl2 = 0
a1 = ""
b1 = ""
a2 = ""
for i in range(0, (int)(c)):
a += s[i]
b += s[i + c]
ttl1 += (int)(ord(s[i])) - 65
ttl2 += (int)(ord(s[i + c])) - 65
for i in range(0, c):
x = (int)(ord(a[i]) - ord('A'))
x += ttl1
x %= 26
a1 += chr(x + ord('A'))
x = (int)(ord(b[i]) - ord('A'))
x += ttl2
x %= 26
b1 += chr(x + ord('A'))
x = ord(a1[i]) - ord('A') + x
x %= 26
a2 += chr(x + ord('A'))
print(a2)
def main():
for i in range(0, N):
solution()
if __name__ == "__main__":
main()
| true |
23c8574834aba6ab9150364c359214c8ac41e1fb | Python | hernan-alperin/segmentacion | /sandbox/conjuntos_factibles.py | UTF-8 | 810 | 3.21875 | 3 | [
"MIT"
] | permissive | """
título: conjuntos_factibles.py
descripción: calcula la suma de conjuntos generados por adyacencias y la intersecta con las particiones
quedan todas las particiones de los componentes que respetan las secuencias de adyacencias
autor: -h
fecha: 2019-06
"""
import particiones
import conjuntos_adyacentes
componentes = [1, 2, 3, 4, 5]
adyacencias = [(5,4), (1,2), (2,3), (3,4)]
factibles = []
c_adys = conjuntos_adyacentes.conjuntos_adyacentes(componentes, adyacencias)
for c in c_adys:
c.sort()
for p in particiones.partition(componentes):
incluida = True
for c_p in p:
if c_p not in c_adys:
incluida = False
break
if incluida:
factibles.append(p)
for c in c_adys:
print(c)
print('---------------------')
for p in factibles:
print(p)
| true |
26bbc348b38c927eca4c67c78a90716baa81a1f2 | Python | benwizen/scratches | /pass_generator/pass_generator.py | UTF-8 | 780 | 3.1875 | 3 | [] | no_license | import random
def rand_pass():
with open('./rand_verbs') as verbs:
verb_list = [verb[:-1] for verb in verbs]
with open('./rand_nouns') as nouns:
noun_list = [noun[:-1] for noun in nouns]
who_list = ['I', 'She', 'He', 'We', 'You', 'They']
who = random.sample(who_list, 1)
verb = random.sample(verb_list, 1)
noun = random.sample(noun_list, 1)
if who[0] in ['She','He']:
if verb[0][-1] == 'e':
verb[0] += 's'
else:
verb[0] += 'es'
passwd = ' '.join( who + verb + noun)
replacement_dict = {'e': '3', 's': '$', 'a': '&', 'o': '0'}
for old, new in replacement_dict.items():
passwd = passwd.replace(old, new)
return passwd
if __name__ == "__main__":
print(rand_pass())
| true |
c2aeea2518c988fc8fbb436e5c248bb3fc6b1755 | Python | Zt-1021/ztpython | /study/test_1/test_10.py | UTF-8 | 620 | 4.375 | 4 | [] | no_license | # 问题:编写一个程序,接受一系列空格分隔的单词作为输入,并在删除所有重复的单词并按字母数字排序后打印这些单词。
# 假设向程序提供以下输入:
# hello world and practice makes perfect and hello world again
# 则输出为:
# again and hello makes perfect practice world
str01 = input("请输入一段字符:")
list01 = str01.split(' ')
str02 = set(list01)
list02 = list(str02)
list02.sort()
print(list02)
# x = [4, 6, 2, 1, 7, 9]
# print(x.sort())
# x = [4, 6, 2, 1, 7, 9]
# x.sort()
# print(x)
# x = [4, 6, 2, 1, 7, 9]
# print(sorted(x))
| true |
e61839de768e10a35910452acd6d9ad788f4bde5 | Python | ashwinvidiyala/Python-Fundamentals | /fun_with_functions.py | UTF-8 | 812 | 4.46875 | 4 | [] | no_license | # Odd_Even Function
def odd_even(end):
odd_or_even = ""
for x in range(1,end+1):
if x % 2 == 0:
odd_or_even = 'even'
print "Number is {}. This is an {} number.".format(x, odd_or_even)
else:
odd_or_even = 'odd'
print "Number is {}. This is an {} number.".format(x, odd_or_even)
odd_even(2000)
# Multiply Function
def multiply(list, value):
for x in range(len(list)):
list[x] = value * list[x]
return list
a = [2,4,10,16]
print multiply(a, 5)
# Hacker challenge: Layered Multiples Function
def layered_multiples(array):
new_array = []
new_sub_array = []
for x in array:
new_sub_array = x * [1]
new_array.append(new_sub_array)
return new_array
print layered_multiples(multiply([1,2,3], 5))
| true |
41e9c1f116478963a71d56f5cdee6ac5e7c41a82 | Python | luohaha66/MyCode | /python/PyGame/music/music.py | UTF-8 | 1,701 | 3.0625 | 3 | [] | no_license | '''
Created on Aug 18, 2018
@author: Administrator
'''
import pygame
import sys
from pygame.constants import K_SPACE
def music():
pygame.init()
pygame.mixer.init()
size = (width, height) = (300, 200)
bg = (255, 255, 255)
clock = pygame.time.Clock()
screen = pygame.display.set_mode(size)
pygame.display.set_caption('music')
pause = False
play = pygame.image.load('play.png').convert_alpha()
stop = pygame.image.load('stop.png').convert_alpha()
play_r = play.get_rect()
play_r.left, play_r.top = (width - play_r.width) // 2, (height - play_r.height) // 2
pygame.mixer.music.load('fade.ogg')
pygame.mixer.music.set_volume(0.2)
pygame.mixer.music.play()
win_sound = pygame.mixer.Sound('winner.wav')
win_sound.set_volume(0.2)
loser_sound = pygame.mixer.Sound('loser.wav')
loser_sound.set_volume(0.2)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
win_sound.play()
if event.button == 3:
loser_sound.play()
if event.type == pygame.KEYDOWN:
if event.key == K_SPACE:
pause = not pause
screen.fill(bg)
if pause:
screen.blit(stop, play_r)
pygame.mixer.music.pause()
else:
screen.blit(play, play_r)
pygame.mixer.music.play()
pygame.display.flip()
clock.tick(30)
if __name__ == '__main__':
music()
| true |
edb5b6cf5400702dcc8c6bee31d8f6e4d4cda7c4 | Python | FilipLe/DailyInterviewPro-Unsolved | /Merge Sorted Array (SOLVED)/mergeArr.py | UTF-8 | 859 | 3.625 | 4 | [] | no_license | class Solution:
def merge(self, nums1, m, nums2, n):
"""
Do not return anything, modify nums1 in-place instead.
"""
#Initialize counter to iterate through 2nd array
count = 0
#Iterate 2nd array and append those elements into 1st arr
while count < n:
nums1.append(nums2[count])
count += 1
#Sort the list into ascending order
nums1.sort()
#Since list sorted, all 0's at the beginning
#nums1 has enough space to hold extra elements from nums2
#--> Amount of 0 at the beginning = n (size of nums2)
counter = 0
while counter < n:
#Remove the 0's
nums1.remove(0)
counter += 1
return nums1
print(Solution().merge([1,2,3,0,0,0],3,[2,5,6],3))
| true |
e75acd7268fcbba356a2d6635d8b207eafad78e9 | Python | KSrinuvas/ALL | /PYTHON/task5.py | UTF-8 | 564 | 4.3125 | 4 | [] | no_license | #!/usr/bin/python3
'''
Define a class which has at least two methods:
getString: to get a string from console input
printString: to print the string in upper case.
Also please include simple test function to test the class methods.
'''
#Hints:
#Use __init__ method to construct some parameters
#Solution:
class InputOutString(object):
def __init__(self):
self.s = ''
def getString(self):
self.s = input()
def printString(self):
print (self.s.upper())
strObj1 = InputOutString()
strObj1.getString()
strObj1.printString()
| true |
4d1732121cb84b383d78a00d269b3eed9c0b1fde | Python | spiianzin/training | /algorithms_book/folder_1/ex1.py | UTF-8 | 685 | 4.03125 | 4 | [] | no_license |
class Stack:
def __init__(self):
self.arr = []
def pop(self):
if self.empty():
return
length = len(self.arr)
head = self.arr[length-1]
self.arr.remove(head)
return head
def push(self, elem):
self.arr.append(elem)
def top(self):
if self.empty():
return
length = len(self.arr)
return self.arr[length-1]
def empty(self):
return len(self.arr) == 0
#
#
# s = Stack()
#
# s.push(1)
# s.push(1)
# s.push(2)
# s.push(3)
# s.push(5)
#
# print(s.pop())
# print(s.pop())
# print(s.pop())
# print(s.pop())
# print(s.pop())
# print(s.pop())
# print(s.pop())
| true |
2b009ecc016059fdea3383670832daf760f6967a | Python | htl1126/leetcode | /766.py | UTF-8 | 554 | 3.578125 | 4 | [] | no_license | # Ref: https://leetcode.com/problems/toeplitz-matrix/discuss/113385/Python-Easy-and-Concise-Solution
class Solution(object):
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
for i in xrange(len(matrix) - 1):
for j in xrange(len(matrix[0]) - 1):
if matrix[i][j] != matrix[i + 1][j + 1]:
return False
return True
if __name__ == "__main__":
sol = Solution()
print sol.isToeplitzMatrix([
[1,2],
[2,2]
])
| true |
365f01653f5386b3ee4de62a2b5a0db721158b2e | Python | rhdedgar/toolsets | /python/sockets/udp/udp_client_working.py | UTF-8 | 424 | 2.953125 | 3 | [] | no_license | import socket
UDP_IP = b"127.0.0.1"
UDP_PORT = 5005
MESSAGE = b"Hello, World!"
print ("UDP target IP:", UDP_IP)
print ("UDP target port:", UDP_PORT)
print ("message:", MESSAGE)
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
total_sent = 0
for i in range(1, 100):
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
total_sent += 1
print("done")
print("total sent", total_sent)
| true |
57b5868e81785a5d2d9932981ed0c41b06bd57d3 | Python | dave-89/meta-security-checker | /src/tag_checker.py | UTF-8 | 384 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | import requests
import re
def get_http_equiv_tags(url):
resp = requests.get(url)
_meta_tags = re.findall(r'<meta http-equiv=(.*) content=(.*)>', resp.text)
meta_tags = []
for tag in _meta_tags:
meta_tags.append(
[
tag[0].replace('"', ''),
tag[1].replace('"', '')
]
)
return meta_tags | true |
82700b55eb3957d19c1720bfa9b8832315ee8e38 | Python | yzhuan/pat | /pat-b-practise/1014.py | UTF-8 | 791 | 3.46875 | 3 | [] | no_license | week = {"A":"MON","B":"TUE","C":"WED","D":"THU","E":"FRI","F":"SAT","G":"SUN"}
time = "0123456789ABCDEFGHIJKLMN"
line = [None] * 4
for i in range(4):
line[i] = raw_input()
flag = 0
rlt = ""
cnt = min(len(line[0]),len(line[1]))
for i in range(cnt):
if flag == 0:
if line[0][i] >= 'A' and line[0][i] <= 'G' and line[0][i] == line[1][i]:
rlt += week[line[0][i]]
flag = 1
else:
if ((line[0][i] >= 'A' and line[0][i] <= 'N') or (line[0][i] >= '0' and line[0][i] <= '9')) and line[0][i] == line[1][i]:
rlt += " %02d:" % (time.index(line[0][i]))
break
cnt = min(len(line[2]),len(line[3]))
for i in range(cnt):
if ((line[2][i] >= 'A' and line[2][i] <= 'Z') or (line[2][i] >= 'a' and line[2][i] <= 'z')) and line[2][i] == line[3][i]:
rlt += "%02d" % (i)
break
print rlt
| true |
14ad0ccdaab32b04334eb2a1a40d3c0f24f025fd | Python | llamicron/junc | /junc/tests/test_junc.py | UTF-8 | 4,491 | 2.578125 | 3 | [
"MIT"
] | permissive | import unittest
import json
import os
from docopt import docopt
from terminaltables import AsciiTable
from .. import Junc, __doc__ as doc
from .. server import ValidationError
class TestJunc(unittest.TestCase):
def setUp(self):
self.junc = Junc(testing = True)
def tearDown(self):
files = [
self.junc.st.file_path,
self.junc.st.file_path + '.bak',
self.junc.st.file_path + '.custom_backup'
]
for fi in files:
if os.path.isfile(fi):
os.remove(fi)
def seed(self):
"""
Adds 2 servers to the server list
"""
servers = [
{
'name': 'a_valid_name',
'username': 'a_valid_username',
'ip': '123.456.789',
'location': 'Pytest :)'
},
{
'name': 'another_valid_name',
'username': 'a_not_short_username',
'ip': '321.654.987',
'location': 'Pytest :)'
}
]
for server in servers:
self.junc.sl.add(server)
assert len(self.junc.sl.servers) == 2
def test_list(self):
self.seed()
args = docopt(doc, ['list'])
results = self.junc.what_to_do_with(args)
assert type(results) is AsciiTable
args = docopt(doc, ['list', '--json'])
results = self.junc.what_to_do_with(args)
assert type(results) is str
assert json.loads(results)
def test_add(self):
self.seed()
old_size = len(self.junc.sl.servers)
args = docopt(doc, ['add', 'server_name', 'username', '123.456', 'Pytest :)'])
results = self.junc.what_to_do_with(args)
assert type(results) is AsciiTable
assert len(self.junc.sl.servers) == old_size + 1
with self.assertRaises(ValidationError):
args = docopt(doc, ['add', 'valid_name', 'not@a%valid&username', '123', ''])
self.junc.what_to_do_with(args)
args = docopt(doc, ['add', '', 'valid_username', '', ''])
self.junc.what_to_do_with(args)
def test_remove(self):
self.seed()
old_size = len(self.junc.sl.servers)
in_use_server_name = self.junc.sl.servers[0].name
args = docopt(doc, ['remove', in_use_server_name])
self.junc.what_to_do_with(args)
assert len(self.junc.sl.servers) == old_size - 1
with self.assertRaises(ValueError):
args = docopt(doc, ['remove', 'not_a_server'])
self.junc.what_to_do_with(args)
def test_bacup(self):
base_file = self.junc.st.file_path
backup_file = base_file + '.bak'
assert os.path.isfile(base_file)
assert not os.path.isfile(backup_file)
args = docopt(doc, ['backup'])
self.junc.what_to_do_with(args)
assert os.path.isfile(base_file)
assert os.path.isfile(backup_file)
def test_backup_to_custom_location(self):
base_file = self.junc.st.file_path
backup_file = base_file + '.custom_backup'
assert os.path.isfile(base_file)
assert not os.path.isfile(backup_file)
args = docopt(doc, ['backup', backup_file])
self.junc.what_to_do_with(args)
assert os.path.isfile(base_file)
assert os.path.isfile(backup_file)
def test_restore(self):
base_file = self.junc.st.file_path
backup_file = base_file + '.bak'
assert os.path.isfile(base_file)
assert not os.path.isfile(backup_file)
args = docopt(doc, ['backup'])
self.junc.what_to_do_with(args)
os.remove(base_file)
assert not os.path.isfile(base_file)
assert os.path.isfile(backup_file)
args = docopt(doc, ['restore'])
self.junc.what_to_do_with(args)
assert os.path.isfile(base_file)
def test_restore_from_custom_location(self):
base_file = self.junc.st.file_path
backup_file = base_file + '.custom_backup'
assert os.path.isfile(base_file)
assert not os.path.isfile(backup_file)
args = docopt(doc, ['backup', backup_file])
self.junc.what_to_do_with(args)
os.remove(base_file)
assert not os.path.isfile(base_file)
assert os.path.isfile(backup_file)
args = docopt(doc, ['restore', backup_file])
self.junc.what_to_do_with(args)
assert os.path.isfile(base_file)
| true |
20baa59df6c95aa4fef600239f4923fcfe07743c | Python | sholloway/queueing-sims | /queueing_sims/linear-examples/nurse-example.py | UTF-8 | 2,893 | 3.75 | 4 | [] | no_license | #!/usr/bin/env python3
###############################################################################
# This is a simple example based on this video:
# https://www.youtube.com/watch?v=jXDjrWKcu6w
#
# The simulation is simply a group of patients waiting to see a nurse.
# There is only 1 nurse.
# The nurse can only see a single patient at a time.
# Patients wait in a proverbial waiting room until their turn.
# The nurse sees patients in the order they arrived (FIFO).
# Patients arrive following an exponential (posson) distribution.
# The amount of time an appointment takes also follows an exponential distribution.
###############################################################################
import simpy
import random
# Patient arrival builder function.
# Responsible for creating new patients.
def patient_builder(env, patient_arrival_time, mean_consult, nurse):
p_id = 1
# Create patients until the program ends.
while True:
# Create an instance of an activity generator function.
ca = consultation_activity(env, mean_consult, nurse, p_id)
# Run the activity for this patient.
env.process(ca)
# Determine the sample time until the next patient arrives at the office.
# Using exponential distribution.
# Is this the same as Poisson
time_until_next_patient = random.expovariate(1.0 / patient_arrival_time)
# Wait until the time has passed.
yield env.timeout(time_until_next_patient)
p_id += 1
def consultation_activity(env, mean_consult, nurse, p_id):
time_entered_queue_for_nurse = env.now
print(f"Patient: {p_id} entered queue at {time_entered_queue_for_nurse}.")
# Request a consultation with the nurse.
with nurse.request() as req:
# Wait until the request can be met.
# The nurse resource will automatically start
# the generator function back up when the resource capacity
# is available.
yield req
# Calculate the time the patient was waiting.
time_left_queue_for_nurse = env.now
print(f"Patient: {p_id} left queue at {time_left_queue_for_nurse}.")
time_in_queue = time_left_queue_for_nurse - time_entered_queue_for_nurse
print(f" Patient: {p_id} waited for {time_in_queue}")
# Determine how long the consultation takes.
consultation_time = random.expovariate(1.0 / mean_consult)
# Wait until the consultation is over.
yield env.timeout(consultation_time)
# Setup the simulation environment.
env = simpy.Environment()
# Setup the resources.
# In this example, there is only one nurse.
# The nurse can only consult with one person at a time,
# so the resource capcity is set to 1.
nurse = simpy.Resource(env, capacity=1)
patient_arrival_time = 5
mean_consult_time = 6
# Register the creation of the patient arrivals
env.process(patient_builder(env, patient_arrival_time, mean_consult_time, nurse))
# Run the simulation
env.run(until=120) | true |
20cca20fd0d90494f08805097e35c0ab87516a7a | Python | lizhengbio/transfer_tools | /transfertools_customer.py | UTF-8 | 1,360 | 3.015625 | 3 | [] | no_license | #coding:utf-8
import socket
import time
ip = '192.168.0.117'
port = 8000
#创建套接字
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
def sendFile(filename):
with open(filename,'rb') as f:
while True:
data = f.read(1024)
if not data:
break
sock.send(data)
time.sleep(1)
sock.send('EOF')
def recvFile(filename):
with open(filename,'wb') as f:
while True:
recvData = sock.recv(1024)
if recvData == 'EOF':
break
f.write(recvData)
def confirm(command):
sock.send(command)
data = sock.recv(1024)
if data == 'ok':
return True
try:
#连接
sock.connect((ip,port))
#发送数据
while True:
command = raw_input('Please Enter(upload file or download file,q\Q exit):')
if command == 'q' or command == 'Q':
print 'Bye'
break
elif not command:
continue
action,filename = command.split()
if action == 'upload':
if confirm(command):
sendFile(filename)
elif action == 'download':
if confirm(command):
recvFile(filename)
else:
print 'check command...'
except socket.error,e:
print 'Error:',e
finally:
sock.close() | true |
d8b3303b5f2e161f1705e78142e0f79f45305c63 | Python | nicecore/mineral-catalog-searching | /mineral_catalog/minerals/tests.py | UTF-8 | 4,375 | 2.515625 | 3 | [] | no_license | from django.test import TestCase
from django.core.urlresolvers import reverse
import unittest
from .models import Mineral
mineral_one = {
"name": "Madamite",
"image_filename": "240px-Adamite-179841.jpg",
"image_caption": "Yellow-green adamite in limonite",
"category": "Arsenate",
"formula": "Zn<sub>2</sub>AsO<sub>4</sub>OH",
"strunz_classification": "08.BB.30",
"crystal_system": "Orthorhombic Dipyramidal H-M Symbol (2/m 2/m 2/m) Space Group: Pnnm",
"color": "Pale yellow, honey-yellow, brownish yellow, reddish; rarely white, colorless, blue, pale green to green, may be zoned;",
"cleavage": "{101}, good; {010}, poor",
"mohs_scale_hardness": "3.5",
"luster": "Vitreous",
"streak": "white to pale green",
"optical_properties": "Biaxial (+/-)",
"refractive_index": "nα=1.708 - 1.722, nβ=1.742 - 1.744, nγ=1.763 - 1.773",
"crystal_habit": "Wedge-like prisms typically in druses and radiating clusters - also smooth botryoidal masses.",
"specific_gravity": "4.32–4.48 measured",
"group": "Arsenates"
}
mineral_two = {
"name": "Blageirne",
"image_filename": "250px-8336M-aegirine.jpg",
"image_caption": "Monoclinic crystal of aegirine with orthoclase, from Mount Malosa, Zomba District, Malawi (size: 85 mm x 83 mm; 235 g)",
"category": "Silicate, Pyroxene",
"formula": "<sub>231</sub>.<sub>00</sub>",
"strunz_classification": "09.DA.25",
"crystal_system": "Monoclinic Prismatic",
"unit_cell": "a = 9.658 Å, b = 8.795 Å, c = 5.294 Å, β = 107.42°; Z=4",
"color": "Dark Green, Greenish Black",
"crystal_symmetry": "Monoclinic 2/m",
"cleavage": "Good on {110}, (110) ^ (110) ≈87°; parting on {100}",
"mohs_scale_hardness": "6",
"luster": "Vitreous to slightly resinous",
"streak": "Yellowish-grey",
"diaphaneity": "Translucent to opaque",
"optical_properties": "Biaxial (-)",
"refractive_index": "nα = 1.720 - 1.778 nβ = 1.740 - 1.819 nγ = 1.757 - 1.839",
"crystal_habit": "Prismatic crystals may be in sprays of acicular crystals, fibrous, in radial concretions",
"specific_gravity": "3.50 - 3.60",
"group": "Silicates"
}
class MineralViewsTests(TestCase):
def setUp(self):
self.mineral_one = Mineral.objects.create(**mineral_one)
self.mineral_two = Mineral.objects.create(**mineral_two)
def test_mineral_list_view(self):
resp = self.client.get(reverse('minerals:list'))
self.assertEqual(resp.status_code, 200)
self.assertIn(self.mineral_one, resp.context['minerals'])
self.assertTemplateUsed(resp, 'minerals/mineral_list.html')
self.assertContains(resp, self.mineral_one.name)
def test_mineral_detail_view(self):
resp = self.client.get(reverse(
'minerals:detail',
kwargs={'pk': self.mineral_two.pk}
))
self.assertEqual(resp.status_code, 200)
self.assertIn(self.mineral_two.name, resp.context['mineral'].values())
self.assertTemplateUsed(resp, 'minerals/mineral_detail.html')
def test_mineral_list_sort_by_group(self):
resp = self.client.get(reverse(
'minerals:group',
kwargs={'group': 'Arsenates'}
))
self.assertEqual(resp.status_code, 200)
self.assertIn(self.mineral_one, resp.context['minerals'])
self.assertNotIn(self.mineral_two, resp.context['minerals'])
self.assertTemplateUsed(resp, 'minerals/mineral_list.html')
def test_search_by_term(self):
resp = self.client.get(reverse('minerals:search'), {'q': 'madamite'})
resp2 = self.client.get(reverse('minerals:search'), {'q': 'blageirne'})
self.assertEqual(resp.status_code, 200)
self.assertIn(self.mineral_one, resp.context['minerals'])
self.assertTemplateUsed(resp, 'minerals/mineral_list.html')
def test_search_by_letter(self):
resp = self.client.get(
reverse('minerals:letter', kwargs={'letter': 'm'}))
self.assertEqual(resp.status_code, 200)
self.assertIn(self.mineral_one, resp.context['minerals'])
self.assertTemplateUsed(resp, 'minerals/mineral_list.html')
class MineralModelTest(TestCase):
def test_mineral_creation(self):
mineral = Mineral.objects.create(**mineral_one)
self.assertEqual(mineral.name, 'Madamite')
| true |
f87ce1df129cd1c75db60f6b289979d54fa0bcdc | Python | Ran-Dou/MySQL-Practice | /Course Material/week5/connecting_Python_to_mySQL_2019_09_24.py | UTF-8 | 787 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import mysql.connector
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "Esther99!",
database = "HW3"
)
print(mydb)
mycursor = mydb.cursor()
mycursor.execute("show tables;")
mycursor.close()
for x in mycursor: print(x)
sql_query = "SELECT * FROM intl_football;"
mycursor = mydb.cursor()
mycursor.execute(sql_query)
myresult = mycursor.fetchall()
mycursor.close()
dta_at_country_date = pd.DataFrame(myresult)
# In[]
#data_url = 'http://bit.ly/2cLzoxH'
#dta_at_country_year = pd.read_csv(data_url)
#dta_at_country_year.to_excel('D:\data_country_db_at_country_year.xlsx')
| true |
7aa0570e6d298acbbfa51a4a38324b275f1ebfaf | Python | lajthabalazs/diet-futar | /src/cache_composit.py | UTF-8 | 4,450 | 2.578125 | 3 | [] | no_license | '''
Created on Aug 11, 2012
@author: lajthabalazs
'''
from google.appengine.api import memcache
from model import DishCategory, MenuItem, Composit,\
CompositMenuItemListItem
from cache_menu_item import getMenuItem
from cache_dish_category import getDishCategory
COMPOSIT_FOR_DAY="COMP_DAY"
def createCompositData(compositDb):
menuItemKeys=[]
for component in compositDb.components:
menuItem = component.menuItem
menuItemKeys.append(
{
'menuItemKey':str(menuItem.key()),
'componentKey':str(component.key())
}
)
compositObject={
'key':str(compositDb.key()),
'categoryKey':compositDb.categoryKey,
'price':compositDb.price,
'day':compositDb.day,
'active':compositDb.active,
'menuItemKeys':menuItemKeys,
'alterable':True
}
return compositObject
def fetchMenuItemsForComposit(compositObject):
# Fetch menu item data for keys
menuItems=[]
i = 0
for keyPair in compositObject['menuItemKeys']:
menuItemObject = getMenuItem(keyPair['menuItemKey'])
menuItemObject['uid'] = compositObject['key'] + str(i)
menuItemObject['componentKey'] = keyPair['componentKey']
i = i + 1
menuItems.append(menuItemObject)
return menuItems
def getComposit(key):
client = memcache.Client()
composit = client.get(key)
if composit == None:
try:
compositDb = Composit.get(key)
if compositDb == None:
return None
except:
return None
composit = createCompositData(compositDb)
client.set(key,composit)
# Fetch menu item data for keys
composit['category'] = getDishCategory(composit['categoryKey'])
composit['components'] = fetchMenuItemsForComposit(composit)
return composit
def getDaysComposits(day, categoryKey):
client = memcache.Client()
key = COMPOSIT_FOR_DAY+ str(day) + "_" + str(categoryKey)
daysItems = client.get(key)
if daysItems == None:
composits = Composit.all().filter("day = ", day).filter("categoryKey = ", categoryKey)
daysItems=[]
for composit in composits:
compositObject = createCompositData(composit)
client.set(compositObject['key'], compositObject)
daysItems.append(compositObject)
client.set(key,daysItems)
retItems = []
# Fetch menu item data for keys
for composit in daysItems:
composit['components'] = fetchMenuItemsForComposit(composit)
retItems.append(composit)
return retItems
def addComposit(categoryKey, day):
# Add it to database
composit = Composit()
composit.day=day
composit.category=DishCategory.get(categoryKey)
composit.price = composit.category.basePrice
composit.categoryKey=str(categoryKey)
composit.put()
# Adds it to cache
client = memcache.Client()
key = COMPOSIT_FOR_DAY+ str(composit.day) + "_" + str(composit.categoryKey)
daysComposits = client.get(key)
#If we have something to update
if daysComposits != None:
# Just add this menu item
compositObject = createCompositData(composit)
daysComposits.append(compositObject)
client.set(compositObject['key'], compositObject)
client.set(key,daysComposits)
def modifyComposit(compositDb):
client = memcache.Client()
daysCompositsKey = COMPOSIT_FOR_DAY+ str(compositDb.day) + "_" + str(compositDb.categoryKey)
daysComposits = client.get(daysCompositsKey)
compositKey=str(compositDb.key())
#If we have something to update
composit = createCompositData(compositDb)
client.set(compositKey, composit)
if daysComposits != None:
newComposits = []
for dayItem in daysComposits:
if (dayItem['key'] == compositKey):
# Add menu item to new array
newComposits.append(composit)
else:
newComposits.append(dayItem)
# Finally just add it to the cache
client.set(daysCompositsKey, newComposits)
def addMenuItemToComposit(compositKey, menuItemKey):
composit = Composit.get(compositKey)
menuItem = MenuItem.get(menuItemKey)
compositItem = CompositMenuItemListItem()
compositItem.menuItem = menuItem
compositItem.composit = composit
compositItem.put()
modifyComposit(composit)
def deleteComposit(composit):
client = memcache.Client()
key = COMPOSIT_FOR_DAY+ str(composit.day) + "_" + str(composit.categoryKey)
daysComposits = client.get(key)
compositKey=str(composit.key())
#If we have something to update
if daysComposits != None:
newComposits = []
for dayItem in daysComposits:
if (dayItem['key'] == compositKey):
# Find item by key
client.delete(compositKey)
else:
# Add menu item to new array
newComposits.append(dayItem)
# Finally just add it to the cache
client.set(key,newComposits) | true |
100b38124b77c0855ad66cb4779d0040142acdee | Python | jiajikang-nlp/Data-structures-and-algorithms | /剑指Offer/[19_03]顺时针打印矩阵.py | UTF-8 | 971 | 3.4375 | 3 | [] | no_license | # -*- coding:utf-8 -*-
class Solution:
# matrix类型为二维列表,需要返回列表
# write code here
def printMatrix(self, matrix):
left = top = 0
row = len(matrix)# 表示矩阵的逻辑结构中的行数
col = len(matrix[0])
xn = len(matrix)-1
yn = len(matrix[0])-1
#right = row-1 #
#bottom = col-1#
list = []
while left<=xn and top<=yn:
for y in range(top, yn+1):
list.append(matrix[left][y])
for x in range(left+1, xn+1):
list.append(matrix[x][yn])
if left < xn:
for y in range(yn-1, top-1, -1):
list.append(matrix[xn][y])
if top < yn:
for x in range(xn-1, left, -1):
list.append(matrix[x][top])
left += 1
top += 1
xn -= 1
yn -= 1
return list | true |
3d5030e0b95b5c5a5219a490eadbc2a9036169e4 | Python | cniajaya/Remedial_Mod1 | /Celine Kurniajaya JCDS 07 Remedial Modul 1.py.py | UTF-8 | 1,771 | 3.84375 | 4 | [] | no_license | # Celine Kurniajaya JCDS 07 Remedial Modul 1
# Soal no 1
def Find_short(s):
huruf = s.split(' ')
jumlah_huruf = []
for item in huruf:
if item !='':
angka = len(item)
jumlah_huruf.append(angka)
jumlah_huruf.sort()
print (jumlah_huruf[0])
Find_short("Many people get up early in the morning ")
Find_short("Every office would getting newest monitor ")
Find_short("Create new file after this morning")
# Soal no 2
def persistence(n):
strn = str(n)
check = False
total = 0
while check == False:
hasil = 1
for item in strn:
hasil*=int(item)
total +=1
if hasil < 10:
check = True
else:
strn = str(hasil)
check = False
if n < 10:
total =0
print(total)
persistence(39)
persistence(999)
persistence(4)
# Soal no 3
def multiplication_table(rows,cols):
for row in range(1,rows+1):
for col in range(1,cols+1):
print(row*col, end=" ")
print()
multiplication_table(3,3)
multiplication_table(5,3)
multiplication_table(3,5)
# Soal no 4
def alphabet_position(text):
hrfl=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
textl = text.lower()
kamus = {}
angka = 1
hasil = ''
for item in hrfl:
kamus[item] = angka
angka+=1
for huruf in textl:
if huruf.isalpha() == True:
hasil += f'{kamus[huruf]} '
else:
hasil+=''
print(hasil)
alphabet_position("The sunset sets at twelve o' clock.")
alphabet_position("it’s never too late to try")
alphabet_position("Have you done your homework?") | true |
b63a638daf4dcf6fec09c4db65c745c81657c13f | Python | bef1re/aoc2020 | /day05/script1.py | UTF-8 | 307 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env python3
highest = 0
with open("input.txt", "r") as f:
for line in f:
bits = line.strip().replace("B", "1").replace("F", "0").replace("L", "0").replace("R", "1")
seat_id = int(bits, 2)
if seat_id > highest:
print(highest)
print(highest)
| true |
8945cfbf36a78c224077ca72717886d3c60913cd | Python | subashreeashok/python-set3 | /problemset3/q3_1.py | UTF-8 | 280 | 3.734375 | 4 | [] | no_license | '''
Name : Subashree
Setno: 3
Question_no:1
Description:
'''
def is_palindrome(str):
print(str)
f=str[0:5:2]
print(f)
if(str==str[::-1]):
print "is palindrome"
else:
print "not palindrome"
str=raw_input("enter string: ")
is_palindrome(str)
| true |
d446cf023828fc444b84d02082f27ab84671161f | Python | lonzor/holbertonschool-higher_level_programming | /0x03-python-data_structures/6-print_matrix_integer.py | UTF-8 | 325 | 3.640625 | 4 | [] | no_license | #!/usr/bin/python3
def print_matrix_integer(matrix=[[]]):
for i in range(len(matrix)):
for k in range(len(matrix[i])):
if k < len(matrix[i]) - 1:
print("{:d}".format(matrix[i][k]), end=" ")
else:
print("{:d}".format(matrix[i][k]), end="")
print("")
| true |
96d51debeedca808dfce66893a01ba808273a14d | Python | jaf9897/Wumpurson | /cogs/ISS.py | UTF-8 | 1,429 | 2.828125 | 3 | [] | no_license | import discord
from discord.ext import commands
import requests
class SpaceStation(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def iss(self, ctx):
gmaps_API = 'AIzaSyB_z3gerJFcy_TId8h9NJJEed94yem9RUM'
# base url to access the gmaps static API
gmaps_URL = 'https://maps.googleapis.com/maps/api/staticmap?center='
req = requests.get("http://api.open-notify.org/iss-now.json")
size = '640x640'
if req.status_code == 200:
print("Success")
iss_pos = req.json()
latitude_longitude = '{0},{1}'.format(iss_pos["iss_position"]["latitude"], iss_pos["iss_position"]["longitude"])
# this abomination is to build the url that will give us the image for the lat long of the ISS
map_url = '{0}{1}&zoom=6&size={2}&markers=color:red|{1}&key={3}'.format(gmaps_URL, latitude_longitude, size, gmaps_API)
embed = discord.Embed(title="International Space Station Location",
description=latitude_longitude,
color=discord.Color.dark_grey()
)
embed.set_image(url=map_url)
await ctx.send(embed=embed)
elif req.status_code == 400:
print("Not found")
await ctx.send("Error reaching ISS location")
def setup(bot):
bot.add_cog(SpaceStation(bot))
| true |
d59e7e13a2b6616cc4eacf53af61637491efdaa1 | Python | Sviridovamd/my_study | /scratch_19.py | UTF-8 | 891 | 4.1875 | 4 | [] | no_license | #Bob is preparing to pass IQ test. The most frequent task in this test is to find out which one of the given numbers differs from the others. Bob observed that one number usually differs from the others in evenness. Help Bob — to check his answers, he needs a program that among the given numbers finds one that is different in evenness, and return a position of this number.
! Keep in mind that your task is to help Bob solve a real IQ test, which means indexes of the elements start from 1 (not 0)
def iq_test(numbers):
numbers1 = [str(s) for s in numbers.split()]
i = 0
test = [x for x in numbers.split() if not int(x) % 2]
test2 = [x for x in numbers.split() if int(x) % 2]
if len(test)>len(test2):
while numbers1[i] != test2[0]:
i += 1
else:
while numbers1[i] != test[0]:
i += 1
return i+1 | true |
cb548e6c013679a0409918d16418a07b65eff174 | Python | ifran007/Status_Five_Currency | /Test/utils/excelUtils.py | UTF-8 | 2,976 | 3.078125 | 3 | [] | no_license | import openpyxl
from openpyxl.styles import Alignment
def getRowCount(file, sheetName):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetName]
return sheet.max_row
def getColCount(file, sheetName):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetName]
return sheet.max_column
def readData(file, sheetName, rowno, colno):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetName]
return sheet.cell(rowno, colno).value
def writeData(file, sheetName, rowno, colno, data):
workbook = openpyxl.load_workbook(file)
sheet = workbook[sheetName]
sheet.cell(rowno, colno).value = data
workbook.save(file)
def readsinglerow(file, sheetName, rownum, colnum):
single_row = []
for i in range(1, colnum + 1):
data = readData(file, sheetName, rownum, i)
single_row.append(data)
return single_row
def readsinglecol(file, sheetName, srow, maxrownum, colnum):
single_col = []
for i in range(srow, maxrownum + 1):
data = readData(file, sheetName, i, colnum)
single_col.append(data)
return single_col
def writelistoflist(file, sheetName, data):
wb = openpyxl.load_workbook(file)
sheet = wb[sheetName]
row = 1
for element in data:
col = 1
for d in element:
if d:
sheet.cell(row, col).value = d
col += 1
row += 1
wb.save(file)
def writesinglerow(file, sheetName, rownum, colnum, scol, data):
wb = openpyxl.load_workbook(file)
sheet = wb[sheetName]
for i in range(1, colnum + 1):
sheet.cell(rownum, i + scol).value = data[i - 1]
# print(data[i - 1])
wb.save(file)
def writesinglecol(file, sheetName, rownum, colnum, srow, data):
wb = openpyxl.load_workbook(file)
sheet = wb[sheetName]
for i in range(1, rownum + 1):
sheet.cell(i + srow, colnum).value = str(data[i - 1])
# print(data[i - 1])
wb.save(file)
def mergecell(file, sheetName, mcell1, mcell2, rownum, colnum, ):
wb = openpyxl.load_workbook(file)
sheet = wb[sheetName]
sheet.merge_cells(f'{mcell1}:{mcell2}')
cell = sheet.cell(rownum, colnum)
cell.alignment = Alignment(horizontal='center', vertical='center')
wb.save(file)
def writeinmergecell(file, sheetName, mcell1, mcell2, rownum, colnum, scol, data):
mergecell(file, sheetName, mcell1, mcell2, rownum, scol + 1)
writesinglerow(file, sheetName, rownum, colnum, scol, data)
def createlktestresultExcelfile(filepath):
wb = openpyxl.Workbook()
# ws = wb.active
wb.create_sheet("GetAppointments", 0)
wb.create_sheet("GetMedications", 1)
wb.create_sheet("GetAllergies", 2)
wb.create_sheet("GetImmunizations", 3)
wb.create_sheet("GetLabResults", 4)
sheet = wb['Sheet']
wb.remove(sheet)
wb.save(filename=filepath)
def remove_items(list1, item):
res = [i for i in list1 if i != item]
return res
| true |
b202bb844b3f67d7f84ba9a4e588d42fec6caefb | Python | risen2513/excel_xlwings | /function.py | UTF-8 | 573 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
def copy_data_search(file_name):
set_cols = []
find_cols = []
file_object = open(file_name)
i = 1
for line in file_object:
line = line.strip()
if i == 1:
find_cols = line.split(',')
if i == 2:
arr = line.split('#')
set_cols.append(arr[0].split(','))
set_cols.append(arr[1].split(','))
i += 1
return find_cols, set_cols
def create_conf(file_name, data):
file_object = open(file_name, 'w')
file_object.write(data)
file_object.close()
| true |
f20969562b15f51c1f7c1a82bc300205e338edd1 | Python | batiaZinger/Posture-Perfect-Project | /python-19-06/main.py | UTF-8 | 5,006 | 2.59375 | 3 | [] | no_license | # מקבל קריאה מהלקוח- אנגולר לשמירת קובץ
# הלקוח מעלה סרטון לשרת והוא שומר אותו בנתיב שהכנסנו
import os
from flask_cors import CORS
from flask import request, jsonify, send_file, Flask
from app import app
from werkzeug.utils import secure_filename
from workDetection import detection
from lectureDetection import detectionl
from common import user, videos
import sqlFunctions
from datetime import datetime
CORS(app)
ALLOWED_EXTENSIONS = set(['mp4', 'avi', 'webm'])
# check if the video extention is allowed
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# get a request with a file, user mail, subjectId
# enter to the uploads file and send to the detection function
# and then insert the processed video in the data base
@app.route('/file-upload', methods=['POST'])
def upload_file():
# videoPath = ""
lst = list(request.form)
userMail = lst[0]
subjectId = lst[1]
# check if the post request/ has the file part
if 'file' not in request.files:
resp = jsonify({'message': 'No file part in the request'})
resp.status_code = 400
return resp
file = request.files['file']
# check if the file is not empty
if file.filename == '':
resp = jsonify({'message': 'No file selected for uploading'})
resp.status_code = 400
return resp
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
filePath = r"uploadsVideos/" + filename
# send to detection function
if subjectId == '1': # work interview
videoPath, result, score, remark = detection.finalResult(filePath, filename)
else: # lecture
videoPath, result, score, remark = detectionl.finalResult(filePath, filename)
# video = videos.Video(userMail, videoPath, subjectId, score, remark)
# insert in the data base
sqlFunctions.videosInsert(userMail, videoPath, subjectId, score, remark, int(result[0]), int(result[1]),
int(result[2]), int(result[3]))
result[0] = 'face: ' + str(result[0]) + ' %'
result[1] = 'eyes: ' + str(result[1]) + ' %'
result[2] = 'smile: ' + str(result[2]) + ' %'
result[3] = 'hands: ' + str(result[3]) + ' %'
result[4] = 'movment: ' + str(result[4]) + ' %'
print(result)
resp = jsonify({'message': 'File successfully uploaded'},
{'path': str(videoPath)}, {'result': str(result)},
{'score': str(score)}, {'remark': str(remark)})
resp.status_code = 201
return resp
else:
resp = jsonify({'message': 'Allowed file types are avi ,mp4,webm'})
resp.status_code = 400
return resp
@app.route('/users/add', methods=['POST'])
def add_user():
# get the new user
data = request.get_json()
# create a new user object
newUser = user.User(data['mail'], data['name'], data['password'])
print(newUser.mail + " " + newUser.name + " " + newUser.password + " ")
# insert to the dataBase
sqlFunctions.userInsert(user.Convert(newUser))
return jsonify({'message': 'New user successfully added'})
@app.route("/users/all", methods=['GET'])
def getAllUsers():
# call to a sql function to get all the users
lst = list(sqlFunctions.getAllUsers())
return jsonify({'result': [list(row) for row in lst]})
@app.route("/videos/filter/", methods=['GET'])
def getAllVideosFilter():
userMail = request.args['mail']
# call to a sql function to get all the videos from a specific user
lst, s = list(sqlFunctions.getAllVideosFilter(userMail))
return jsonify({'result': s})
@app.route("/userAndLesson/filter/", methods=['GET'])
def getAllUserAndLessonFilter():
userMail = request.args['mail']
subjectId = request.args['subject']
# call to a sql function to get all the videos from a specific user
lst = list(sqlFunctions.getAllUserAndLesson(userMail, subjectId))
return jsonify({'result': [list(row) for row in lst]})
@app.route('/userAndLesson/add', methods=['POST'])
def addUserAndLesson():
# get the new userAndLesson
data = request.get_json()
# insert to the data base
sqlFunctions.users_lesson_update(data['userMail'], data['lessonId'], data['statusId'], data['subjectId'],
datetime.now().strftime('%x'))
return jsonify({'message': 'New userAndLesson successfully added'})
@app.route('/processedVideos/', methods=['POST'])
def getprocessedVideos():
# fileName = json.dumps(request.data)['fileName']
data = request.get_json()
fileName = data['fileName']
return send_file('processedVideos/' + fileName, as_attachment=True)
# return jsonify({'message': fileName})
if __name__ == "__main__":
app.run()
| true |
e4dc128e2e0849c8b9d0a99c6e083ec87be8f751 | Python | Julio-vg/Entra21_Julio | /Exercicios/exercicios aula 05/if_parte2/exercicio16.py | UTF-8 | 1,490 | 4.125 | 4 | [] | no_license | # Exercicio 16
#
# Crie um programa para uma promoção de um posto de combustivel.
#
# O programa deve pedir ao usuário quantos litros ele quer abastecer e qual combustivel: álcool, diessel ou gasolina
#
# A promoção é a seguinte:
# - Para gasolina: Até 20 litros 0% de desconto e acima de 20 litros 10% de desconto
# - Para diessel: Até 10 litro 1.5% de desconto e acima de 10 litros 5% de desconto
# - para álcool: Até 10 litros 5% de desconto e acima de 10 litros 10% de desconto.
#
# Mostre o combustivel que ele selecionou, o total abastecido e a porcentagem de desconto a ser aplicada.
#
# Não precisa calcular o valor do combustivel!
#
litros = int(input("Quantos Litros Você Deseja Colocar:"))
comb = input("Qual combustivel você deseja Alcool,Diessel ou Gasolina:")
if litros <= 20 and comb == 'Gasolina':
print("Tipo:", comb,"-", litros,"Litros","-","0% de Desconto.")
elif litros > 20 and comb == 'Gasolina':
print("Tipo:", comb,"-", litros,"Litros","-","10% de Desconto.")
elif litros <= 10 and comb == 'Diessel':
print("Tipo:", comb,"-", litros,"Litros","-","1.5% de Desconto.")
elif litros > 10 and comb == 'Diessel':
print("Tipo:", comb,"-", litros,"Litros","-","5% de Desconto.")
elif litros <= 10 and comb == 'Alcool':
print("Tipo:", comb,"-", litros,"Litros","-","5% de Desconto.")
elif litros > 10 and comb == 'Alcool':
print("Tipo:", comb,"-", litros,"Litros","-","10% de Desconto.")
else:
print("Opção Invalida!")
| true |
3555cc65f3003d6c8446763c5646ef1d840e522b | Python | milkonst/first-assignment | /zadanie 1-3).py | UTF-8 | 803 | 3.078125 | 3 | [] | no_license | import pandas as pd
# headers
df = pd.read_csv(r'C:\Users\Dell\Desktop\ML\first-assignment-master\first-assignment-master\train.tsv', delimiter='\t',
names=["Price", "Number_Of_Rooms", "Area", "Floor_Number", "Address", "Description"])
df2 = pd.read_csv(r'C:\Users\Dell\Documents\GitHub\Machine_Learning\Zestaw 1\description.csv', delimiter=',')
# merge dataframe
df3 = pd.merge(df, df2, left_on="Floor_Number", right_on="liczba", how="inner")
#print(df3)
#stworzenie DataFrame z wybranymi kolumnami
df4 = pd.DataFrame(df3, columns=["Price", "Number_Of_Rooms", "Area", "Floor_Number", " opis", "Address", "Description"])
print(df4)
# zapisanie csv
with open('out2.csv', 'w', encoding="utf-8") as csvfile:
df4.to_csv(csvfile, index=False, line_terminator='\n') | true |
0ff340304a8754a0c959026eeee527ac9e2de1ab | Python | OrestM/vipod-django-project-template | /students/views/validation.py | UTF-8 | 594 | 2.96875 | 3 | [] | no_license | import magic
VALID_IMAGE_MINETYPES = [
"image"
]
def get_mimetype(fobject):
mime = magic.Magic(mime=True)
mimetype = mime.from_buffer(fobject.read(1024))
fobject.seek(0)
return mimetype
def valid_image_minetype(fobject):
mimetype = get_mimetype(fobject)
if mimetype:
return mimetype.startswith('image')
else:
return False
MAX_SIZE = 2*1024*1024 # 2 mb
def valid_image_size(image, max_size=MAX_SIZE):
width, height = image.size
if (width * height) > max_size:
return (False, "Image is too large")
return (True, image)
| true |
579fcd1e54151fc8f072c2df0dc2805c81c26e01 | Python | ghowland/versionman | /versioning.py | UTF-8 | 3,234 | 2.65625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | """
Versioning Module for TransAm
Handles all versioning issues for process.py, to separate concerns.
"""
import json
import session
import query
from query import Log, Query, SanitizeSQL
def CreateCommitVersion(session_id, comment=None):
"""Create the commit_version entry to reference all records stored.
Returns: int, commit_version.id (or None on failure)
"""
#TODO(g): Finish Authorize() and fetch the user from session.key
# Get the user for this session
user_name = session.GetUser(session_id)
# Create INSERT SQL without or with comment
if not comment:
sql = "INSERT INTO commit_version (`user`) VALUES ('%s')" % SanitizeSQL(user_name)
else:
sql = "INSERT INTO commit_version (`user`, `comment`) VALUES ('%s', '%s')" % (SanitizeSQL(user_name), SanitizeSQL(comment))
# Insert the commit and get the version
version = Query(sql)
return version
def CommitRecordVersion(commit_version, schema, database, table, key, data=None, delete=False):
"""Store a version of a record. Assume not deleting unless specified."""
# Create the INSERT SQL for the data storage
if not delete:
sql = "INSERT INTO record_version (`version`, `database`, `table`, `record`, `data`) VALUES (%s, '%s', '%s', '%s', '%s')" % \
(commit_version, SanitizeSQL(database), SanitizeSQL(table), SanitizeSQL(key), SanitizeSQL(json.dumps(data)))
# Create the INSERT SQL for the delete entry
else:
sql = "INSERT INTO record_version (`version`, `database`, `table`, `record`, `is_deleted`) VALUES (%s, '%s', '%s', '%s', 1)" % \
(commit_version, SanitizeSQL(database), SanitizeSQL(table), SanitizeSQL(key))
# Execute the record version INSERT
Query(sql)
def ListCommits(session_id, before_version=None, after_version=None):
"""List all the commits, optionally before/after version to limit view.
Returns: dict, keyed on version number, value is dict of commit info
"""
# Create the SQL for the list of versions
sql = 'SELECT * FROM `commit_version`'
where = ''
# If we want versions before a specified version
if before_version:
where += '`id` < %s' % int(before_version)
# If we want versions after a specific version
if after_version:
if where:
where += ' AND '
where += '`id` > %s' % int(after_version)
# If we had WHERE clauses, add them to the SQL statement
if where:
sql = '%s WHERE %s' % (sql, where)
# Query our versions
result = query.Query(sql)
# Return as a dict, keyed on the id
data = {}
for item in result:
data[str(item['id'])] = item
return data
def GetRecordVersions(session_id, database, table, key):
"""Returns all the versions of the database/table/key.
Returns: dict, key is the commit_version.id(int) and value is dict of the entry.
Relevant keys are 'data' and 'is_deleted'
"""
data = {}
sql = "SELECT * FROM `record_version` WHERE `database` = '%s' AND `table` = '%s' AND `record`='%s'" % \
(SanitizeSQL(database), SanitizeSQL(table), SanitizeSQL(key))
result = Query(sql)
# Return the versions, key on version number for this record
for item in result:
data[str(item['version'])] = item
return data
| true |
44e5116131274438c99d7434245ad08435b6a355 | Python | hoani/sl8 | /ode.py | UTF-8 | 739 | 2.84375 | 3 | [] | no_license | import scipy.integrate as integrate
import matplotlib.pyplot as plt
import numpy as np
t = np.linspace(0.0, 5e-3, 200)
# Parallel RC Model
R = 100
C = 10e-6
v_init = [10]
v = integrate.odeint(lambda v, t: -v/(R*C), v_init, t)
# with plt.style.context("dark_background"):
# fig, ax = plt.subplots()
# ax.plot(t, v)
# ax.set_xlabel("time (s)")
# ax.set_ylabel("voltage")
# plt.show()
# Parallel RCL Model
R = 100
C = 10e-6
L = 12e-3
v_init = [10.0, 0.0]
v = integrate.odeint(
lambda v, t: [v[1], -v[1]/(R * C) - v[0]/(C * L)],
v_init, t)
with plt.style.context("dark_background"):
fig, ax = plt.subplots()
ax.plot(t, v[:, 0])
ax.set_xlabel("time (s)")
ax.set_ylabel("voltage")
plt.show()
| true |
e81b259f2e6e803dd5530a238f3d06710709d470 | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_8/gmdnko003/question3.py | UTF-8 | 735 | 3.625 | 4 | [] | no_license | '''Program to encrypt message by converting all lowercase letters to next letter alphabetically
nkosi gumede
8 may 2014'''
D={'a':'b','b':'c','c':'d','d':'e','e':'f','f':'g','g':'h','h':'i','i':'j','j':'k','k':'l','l':'m','m':'n','n':'o','o':'p','p':'q','q':'r','r':'s','s':'t','t':'u','u':'v','v':'w','w':'x','x':'y','y':'z','z':'a',' ':' ','.':'.'}
listed=[]
def encrypt(message):
if message=="":
print("Encrypted message:")
print("".join(listed))
elif message[0] in D:
listed.append(D[message[0]])
encrypt(message[1:])
else:
listed.append(message[0])
encrypt(message[1:])
if __name__=='__main__':
x=input("Enter a message:\n")
encrypt(x) | true |
66c440db375389c86f45c06371df285de6ad206e | Python | MehtaAkshita/BC71_FORTIFY | /Code/Road Traffic/Time_Update.py | UTF-8 | 200 | 2.96875 | 3 | [
"MIT"
] | permissive | mylist=[4,2,1,1]
sl=sum(mylist)
lst=[]
def myround(x, base=4):
return round(x*base)/base
for a in range(4):
j=mylist[a]*4/sl
j=myround(j)
lst.append(j)
#print(lst[a])
print(lst)
| true |
0b3d137a1659e7f1f1b8bca1582a8994190cc5ac | Python | edwinfmesa/hoc | /no/comiladoresVisitor.py | UTF-8 | 725 | 2.8125 | 3 | [] | no_license | #compiladores Visitor
class DotVisitor(NodeVisitor):
def init (self,node):
self.dot = "digraph AST{\n"
self.id=0
self.visit(node)
def str (self):
return self.dot + "\n}"
def Id(self):
self.id += 1
return "n%d" % self.id
def visit.BinOp (self,node):
name = self.Id()
self.dot +="\t"+ name +"[label=""+ node.Op+"]\n"
L = self.visit(node.left)
R = self.visit(node.right)
self.dot + = "\t" + name + "->" + L + "\n"
self.dot + = "\t" + name + "->" + R + "\n"
return name
class NodeVisitor(object):
def visit(self,node):
if node:
method = "visit" + node class name
visitor = getattr(self, method , self.generic_visit)
return visitor (node)
else:
return name
def generic
| true |
f836e2fe49074b9b5ae8a91e733ee7546e592f1c | Python | AdamJacksonData/JanusBankingChurn | /naive_modelling_mlflow.py | UTF-8 | 3,762 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 18:12:58 2021
@author: AdamJackson
"""
# %%
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import mlflow
import argparse
from sklearn.tree import plot_tree
# %%
tracking_uri = r'file:C:/Users/AdamJackson/junkdata/mlruns'
parser = argparse.ArgumentParser('Banking Churn - GBC')
parser.add_argument('--n', type=int, default=1, help='Number of experiments to run')
parser.add_argument('--lr', type=float, default=0.01, help='Learning rate')
# Parser
args, _ = parser.parse_known_args()
n_experiments = args.n
lr = args.lr
# %%
file_path = os.getcwd() + r'/Aggregated_data.csv'
raw_data = pd.read_csv(file_path)
# %%
raw_data['end_date'] = pd.to_datetime('2020-05-31')
raw_data['dob'] = pd.to_datetime(raw_data['dob'])
raw_data['Age'] = (raw_data['end_date'] - raw_data['dob'])/np.timedelta64(1,'Y')
raw_data['Age'] = raw_data['Age'].apply(np.floor)
preprocessed_df = raw_data.drop(columns=['customer_id', 'deposit', 'withdrawal', 'dob', 'creation_date', 'account_id', 'end_date'])
#preprocessed_df = preprocessed_df.drop(columns='transaction_date')
preprocessed_df = preprocessed_df.drop(columns='state')
# %%
test_data = preprocessed_df[preprocessed_df['transaction_date']=='2020-05']
train_val_data = preprocessed_df[preprocessed_df['transaction_date']!='2020-05']
test_data.drop(columns=['transaction_date','last_transaction'],inplace=True)
train_val_data['churn'] = train_val_data['last_transaction'].map({True:1,False:0})
train_val_data.drop(columns=['transaction_date','last_transaction'],inplace=True)
# %%
train_data, val_data = train_test_split(train_val_data, test_size=0.2, stratify=train_val_data['churn'], shuffle=True, random_state=123)
X_train = train_data.drop(columns='churn')
y_train = train_data['churn']
X_val = val_data.drop(columns='churn')
y_val = val_data['churn']
# %%
scaler=StandardScaler()
X_train = pd.DataFrame(scaler.fit_transform(X_train),columns=X_train.columns)
X_val = pd.DataFrame(scaler.transform(X_val),columns=X_val.columns)
# %%
mlflow.set_tracking_uri(tracking_uri) # if we don't specify, logs are written in cwd
mlflow.set_experiment('Banking Churn - GBC')
for i in range(n_experiments):
with mlflow.start_run():
md = np.random.randint(1, 10)
msl = np.random.randint(1, 25)
model = GradientBoostingClassifier(learning_rate=lr, max_depth=md, min_samples_leaf=msl)
model.fit(X_train, y_train)
y_hat_train = model.predict(X_train)
train_acc = accuracy_score(y_train, y_hat_train)
y_hat_val = model.predict(X_val)
val_acc = accuracy_score(y_val, y_hat_val)
mlflow.log_param('max_depth', md)
mlflow.log_param('min_samples_leaf', msl)
mlflow.log_param('learning_rate', lr)
mlflow.log_metric('train_acc',train_acc)
mlflow.log_metric('val_acc',val_acc)
mlflow.sklearn.log_model(model, 'BankingChurnGBC')
# %%
'''
from sklearn.tree import export_graphviz
sub_tree = model.estimators_[0, 0]
from pydotplus import graph_from_dot_data
from IPython.display import Image
dot_data = export_graphviz(
sub_tree,
out_file=None, filled=True, rounded=True,
special_characters=True,
proportion=False, impurity=False, # enable them if you want
)
graph = graph_from_dot_data(dot_data)
Image(graph.create_png())
'''
# %%
| true |
7c524bb08bec0692c21fa9278953b8670cae72dc | Python | M-Watkinson/flask-anderson | /app.py | UTF-8 | 1,318 | 2.625 | 3 | [] | no_license | from flask import Flask, render_template, redirect, url_for
from flask_bootstrap import Bootstrap
from modules import character_data
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'BgzNrDP34D'
Bootstrap(app)
def get_char(source, id):
for row in source:
if id == str( row["id"] ):
character = row["character"]
actor = row["actor"]
movie = row["movie"]
year_released = row["year released"]
movie_imdb_url = row["movie imdb url"]
id = str(id)
return id, character, actor, movie, year_released, movie_imdb_url
@app.route('/')
def index():
ids_list = []
name_list = []
for character in character_data:
ids_list.append(character['id'])
name_list.append(character['character'])
pairs_list = zip(ids_list, name_list)
return render_template('index.html', pairs=pairs_list, the_title=("Wes Anderson Characters"))
@app.route('/character/<id>.html')
def character(id):
id, character, actor, movie, year_released, movie_imdb_url = get_char(character_data, id)
return render_template('character.html', character=character, actor=actor, movie=movie, year_released=year_released, movie_imdb_url=movie_imdb_url)
if __name__ == '__main__':
app.run(debug=True)
| true |
67e30bfce6e9d8264df6cb730b123dc1b0985754 | Python | JFlamingo26/ELQuotes | /Main.py | UTF-8 | 739 | 3.765625 | 4 | [] | no_license | from random import *
from time import sleep as wait
def RandomDictItem(Dict):
RDI = list(Dict)[randint(0,len(list(Dict))-1)]
return RDI,Dict[RDI]
Quotes = {"A Christmas Carol" : {
"Scrooge's introduction" : [
'"a squeezing, wrenching, grasping, scraping, clutching, covetous old sinner"',
'"he was a tight-fisted hand at the grindstone"',
'"solitary as an oyster"'
]
}
}
LitName,LitCont = RandomDictItem(Quotes)
ThemeChar,ThemeCharAns = RandomDictItem(LitCont)
print("In " + LitName + ", give three quotes about " + ThemeChar + ".")
for i in range(0,3):
input()
print("Alright. Here are some examples which you could've used:")
for i in ThemeCharAns:
wait(0.5)
print(i)
| true |
e1a7c39af3072a343bf9c20c95b12c4e32af0b20 | Python | bucsi/AoC2017 | /2b.py | UTF-8 | 437 | 2.921875 | 3 | [
"WTFPL"
] | permissive | #!/bin/pypy3
import itertools
with open("be2.txt", "r") as f: #beolvasás
seged=f.readlines()
be=[]
for sor in seged: #mátrixba szétszedés
asor = sor.split("\n")[0]
be.append(asor.split("\t"))
sordiff=0
for i in range(len(be)):
for j in range(len(be[i])):
be[i][j]=int(be[i][j]) #konvertálás intre
for j in itertools.combinations(be[i], 2):
if max(j)%min(j)==0:
sordiff+=int(max(j)/min(j))
print(sordiff)
| true |
3148f45f517b9d2426b0a052063e8e35fcf697ab | Python | Goldabj/IntroToProgramming | /Tkinter_ttk/.svn/pristine/ce/cee6b9b5b6a0e42014aae21c746f4ff7e4e83e96.svn-base | UTF-8 | 627 | 3.03125 | 3 | [] | no_license | """
Try out Tkinter and ttk!
"""
import tkinter
from tkinter import ttk
def main():
# Make a window.
# Put a Frame on it.
# Put a Button on the frame.
# Make your Button do something simple.
# Add a Label and an Entry.
# Make your Button do something with the Label and Entry.
pass
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
| true |
da46f2d8f4af0bd87c5cdcca224ab0e976630b8e | Python | ZihaoZhai/Recommendation_System | /featureExtractor/connection.py | UTF-8 | 223 | 2.796875 | 3 | [] | no_license | import psycopg2
def connect(params):
conn = None
try:
conn = psycopg2.connect(**params)
return conn
except (Exception, psycopg2.DatabaseError) as error:
print(error)
conn.close() | true |
d5ec3b50e37cd3343a4c6336445aac84371aa985 | Python | Schachte/Learn_Python_The_Hard_Way | /Excercise_39/Excercise_39_Extension.py | UTF-8 | 766 | 3.9375 | 4 | [] | no_license | #########################################
######### Excercise 39 Extension#########
# Learning Python The Hard Way###########
####### Ryan Schachte ###################
#########################################
print 'Hello, welcome to the virtual super market!'
#Item with relevant pricing
market_items = {
'Apples': '$3.17',
'Bananas': '$2.81',
'Peaches': '$1.22'
}
market_stock = {
'Apples': 20,
'Bananas': 10,
'Peaches': 5
}
fruits = ['Apples', 'Bananas', 'Peaches']
#Iterate through each element in the two dictionaries without using a counter or a directly stated statement
for each_fruit in fruits:
print 'There are ' + str(market_stock[each_fruit]) + ' ' + each_fruit + ' which cost ' + market_items[each_fruit] + ' per pound.' | true |
3ae10348056276919558d13a0ba4cb7f6528c54e | Python | rtesselli/knight | /tests/test_parse.py | UTF-8 | 1,276 | 3.265625 | 3 | [] | no_license | import pytest
import string
from knight.parse import parse_coordinate, parse_line, parse_input
from knight.data_model import ChessCoordinate, Statement
def test_parse_coordinate():
for letter in string.ascii_lowercase + string.ascii_uppercase:
for number in range(1, 9):
assert parse_coordinate(f"{letter}{number}") == ChessCoordinate(letter=letter.upper(), number=number)
with pytest.raises(ValueError):
parse_coordinate("A0")
with pytest.raises(ValueError):
parse_coordinate("ABC")
with pytest.raises(ValueError):
parse_coordinate("A")
def test_parse_line():
assert parse_line("A1 B2") == Statement(
start=ChessCoordinate(letter='A', number=1),
end=ChessCoordinate(letter='B', number=2)
)
with pytest.raises(ValueError):
parse_line("A1")
with pytest.raises(ValueError):
parse_line("A1 A1 A1")
def test_parse_input():
assert list(parse_input(["A1 B1", "C2 D4"])) == [
Statement(
start=ChessCoordinate(letter='A', number=1),
end=ChessCoordinate(letter='B', number=1)
),
Statement(
start=ChessCoordinate(letter='C', number=2),
end=ChessCoordinate(letter='D', number=4)
)
]
| true |
43e074ab807e63dafccdc8b8750be164105371d8 | Python | zaini/SimpleKNN | /SimpleKNN.py | UTF-8 | 2,432 | 3.1875 | 3 | [
"MIT"
] | permissive | import getopt
import sys
import pandas
def get_data_frame(url):
return pandas.read_csv(url)
def distance(a, b):
distance_square = 0
for a_feature, b_feature in zip(a, b):
distance_square += (a_feature - b_feature) ** 2
return distance_square ** 0.5
def mode(data):
return max(set(data), key=data.count)
def is_smaller_find(find, closest_rows):
classifications = closest_rows[0]
distances = closest_rows[1]
# Check for Nones before finding rows to replace
for classification in classifications:
if classification is None:
return classifications.index(classification)
smallest_distance_index = distances.index(max(distances))
if find < distances[smallest_distance_index]:
return smallest_distance_index
def get_initial_rows(k):
x = []
for i in range(k):
x.append(None)
return [x, x.copy()]
def get_classification(df, k, features):
closest_rows = get_initial_rows(k)
for row in df.values:
row_classification = row[0]
row_distance = distance(features, row[1::])
replace_index = is_smaller_find(row_distance, closest_rows)
if replace_index is not None:
closest_rows[0][replace_index] = row_classification
closest_rows[1][replace_index] = row_distance
return mode(closest_rows[0])
# TODO add validation
if __name__ == '__main__':
data_url = None
k = 11
features = None
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:k:q:", ["help", "data=", "features="])
except getopt.GetoptError:
print("SimpleKNN.py -d <data_url> -k <k_value> -q <query_features>")
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
# TODO add more help information
print("usage: SimpleKNN.py -d <data_url> -k <k_value> -q <query_features>")
sys.exit(2)
elif opt in ("-d", "--data"):
data_url = arg
elif opt in ("-q", "--features"):
features = list(map(int, arg.split(",")))
elif opt == "-k":
k = arg
if data_url is not None and features is not None:
print("Classification: {}".format(get_classification(get_data_frame(data_url), int(k), features)))
# python SimpleKNN.py -d weight-height.csv -k 5 --features 70,170
| true |
9fd50970473844cde1de159ce69157ed3ad6b700 | Python | sdyong88/pythonexam | /app/models/UsersModel.py | UTF-8 | 2,849 | 3.015625 | 3 | [] | no_license |
from system.core.model import Model
import re
class UsersModel(Model):
def __init__(self):
super(UsersModel, self).__init__()
def create_user(self,info):
EMAIL_REGEX = re.compile(r'^[a-za-z0-9\.\+_-]+@[a-za-z0-9\._-]+\.[a-za-z]*$')
errors = []
# validation
if not info['name'] and not info['alias']:
errors.append('Must Enter information in Name and Alias')
elif len(info['name']) < 2 and len(info['alias']) < 2:
errors.append('Name and Alias needs to be atleast 2 charactes long')
if not info['email']:
errors.append('Email cannot be blank**')
elif not EMAIL_REGEX.match(info['email']):
errors.append('Email format is invalid**')
if not info['password']:
errors.append('Password cannot be blank**')
elif len(info['password']) < 7:
errors.append('Password needs be atleast 8 characters long**')
elif info['password'] != info['confirm_pw']:
errors.append('Password and Confirmation do not match! **')
# else:
# errors.append('Success!')
if errors:
return {"status":False, "errors": errors}
else:
password = info['password']
hashed_pw = self.bcrypt.generate_password_hash(password)
insert_user = "INSERT INTO users (name, username, email, password,created_at,updated_at) VALUES (:name, :alias, :email, :pw_hash,NOW(), NOW()) "
data = {
"name": info["name"],
"alias": info["alias"],
"email": info['email'],
"pw_hash": hashed_pw
}
self.db.query_db(insert_user,data)
get_user_query = "SELECT * FROM users ORDER BY id DESC LIMIT 1"
users = self.db.query_db(get_user_query)
return { "status": True, "user": users[0]}
def login_check(self, info):
errors = []
if not info['email']:
errors.append('Email cannot be blank**')
elif len(info['password']) < 7:
errors.append('Password needs be atleast 8 characters long**')
if errors:
return {"status": False, "errors": errors}
else:
password = info['password']
user_query = "SELECT users.id AS user_id, users.name , users.password FROM users WHERE email = :email LIMIT 1"
user_data = {'email':info['email']}
user = self.db.query_db(user_query, user_data)
if self.bcrypt.check_password_hash(user[0]['password'], password):
return { "status": True, "user": user[0] }
else:
errors.append('Email and/or Password does not match')
return { "status": False, "errors": errors }
| true |
9a22d9308e07626d36127e6b0ff01ed760a30f47 | Python | wufans/EverydayAlgorithms | /2018/list/581. Shortest Unsorted Continuous Subarray.py | UTF-8 | 1,637 | 3.75 | 4 | [] | no_license | # -*- coding: utf-8 -*-
#@author: WuFan
"""
Given an integer array, you need to find one continuous subarray that if you only sort this subarray in ascending order, then the whole array will be sorted in ascending order, too.
You need to find the shortest such subarray and output its length.
Example 1:
Input: [2, 6, 4, 8, 10, 9, 15]
Output: 5
Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the whole array sorted in ascending order.
Note:
Then length of the input array is in range [1, 10,000].
The input array may contain duplicates, so ascending order here means <=.
"""
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
#90
sort_nums = sorted(nums)
res = len(nums)
for i in range(len(nums)):
if nums[i] == sort_nums[i]:
res -= 1
else:
break
if res != 0:
for i in range(-1,-len(nums)-1,-1):
if nums[i] == sort_nums[i]:
res -= 1
else:
break
return res
# l, r = 0, len(nums) - 1
# while (l < r and nums[l] <= nums[l + 1]): l += 1
# if (l >= r): return 0;
# while (nums[r] >= nums[r - 1]): r -= 1
#
# maxval = max(nums[l:r + 1])
# minval = min(nums[l:r + 1])
#
# while (l >= 0 and minval < nums[l]): l -= 1
# while (r < len(nums) and nums[r] < maxval): r += 1
#
# return (r - l - 1)
print(Solution().findUnsortedSubarray([]))
| true |
afa26145e1c9d9df8ac2decfd03fd220ea7ac8ff | Python | toktok911/100DaysOfCode | /1 day one -day ten/6/day_6.py | UTF-8 | 3,682 | 3.15625 | 3 | [] | no_license | # streaming the tweets directly from twitter in realtime.
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy import API
from tweepy import Cursor
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from textblob import TextBlob
import re
import twitter_credentials
class TwitterClient():
def __init__(self, twitter_user=None):
self.auth=TwitterAuthenticator().authenticate_twitter()
self.twitter_client = API(self.auth)
self.twitter_user = twitter_user
def get_twitter_client_api(self):
return self.twitter_client
def get_user_tweets(self, num_tweets):
tweets = []
for t in Cursor(self.twitter_client.user_timeline, id= self.twitter_user).items(num_tweets):
tweets.append(t)
return tweets
#we want to create a clasa which will allow us to print the tweets.
#we call it stdoutListener, and it is going to inherit from StreamListener
#stdoutListener has some methods that we want to override.
class TwitterAuthenticator():
def authenticate_twitter(self):
auth = OAuthHandler(twitter_credentials.consumer_key, twitter_credentials.consumer_secret)
auth.set_access_token(twitter_credentials.access_token, twitter_credentials.access_token_secret)
return auth
class TwitterStreamer():
def __init__(self):
self.twitter_authenticator = TwitterAuthenticator()##
def stream_tweets(self, tweets_filename, hashtags_list):
listener = MyListener(tweets_filename)
auth = self.twitter_authenticator.authenticate_twitter()##
stream = Stream(auth, listener)
stream.filter(track=hashtags_list)
class MyListener(StreamListener):
def __init__(self, tweets_filename):
self.tweets_filename = tweets_filename
def on_data(self, data):
try:
print(data)
with open(self.tweets_filename, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error on_data %s" % str(e))
return True
def on_error(self, status):
if status ==420:
return False
print(status)
class TweetAnalyzer():
def tweets_to_dataframe(self, tweets):
df = pd.DataFrame(data=[t.text for t in tweets], columns=['Tweets'])
df['id'] = np.array([t.id for t in tweets])
df['len'] = np.array([len(t.text) for t in tweets])
df['date'] = np.array([t.created_at for t in tweets])
df['source'] = np.array([t.source for t in tweets])
df['likes'] = np.array([t.favorite_count for t in tweets])
df['retweets'] = np.array([t.retweet_count for t in tweets])
return df
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def analyze_sentiment(self, tweet):
analysis = TextBlob(self.clean_tweet(tweet))
if analysis.sentiment.polarity >0:
return 1
elif analysis.sentiment.polarity ==0:
return 0
else:
return -1
if __name__ == '__main__':
twitter_client = TwitterClient()
api = twitter_client.get_twitter_client_api()
tweets = api.user_timeline(screen_name="toktok911", count="20" )
tweet_analyzer=TweetAnalyzer()
df = tweet_analyzer.tweets_to_dataframe(tweets)
df['sentiment'] = np.array([tweet_analyzer.analyze_sentiment(t) for t in df['Tweets']])
print(df.head(5)) | true |
fedf6608012a5b936ef45a0905559a75651b5bcb | Python | dfonovic/dbtest | /db-plot.py | UTF-8 | 1,285 | 2.75 | 3 | [] | no_license | import sqlite3
import datetime
import time
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from dateutil import parser
from matplotlib import style
style.use('fivethirtyeight')
conn = sqlite3.connect('sensor.db')
c = conn.cursor()
# def read_from_db():
c.execute('SELECT * FROM log')
data = c.fetchall()
dates = []
values = []
for row in data:
dates.append(parser.parse(row[1]))
values.append(row[2])
plt.plot_date(dates,values,'-')
plt.show()
#print(data)
#temps[]
#for row in data:
# print(row)
# print(row[2])
# plt.plot(row[2])
# plt.show()
def data_entry():
unix = int(time.time())
date = str(datetime.datetime.fromtimestamp(unix).strftime('%Y-%m-%d %H:%M:%S'))
temperatura = random.randint(30,40)
print (date)
print (temperatura)
c.execute("INSERT INTO 'sensor' (time, temp) VALUES (?, ?)",
(date, temperatura))
conn.commit()
#plt.legend()
#plt.show()
#for i in range(20):
# data_entry()
# time.sleep(1)
# for x in range(3, 25):
# temperatura = random.randint(30, 40)
# print (temperatura)
# print (x)
# Insert
# c.execute ("INSERT INTO `sensor` (time, temp) VALUES (datetime('now'), ?)", (temperatura))
# conn.commit()
conn.close()
| true |
0d4273fea6a2dc4166b5938dd993b232c8edd9f2 | Python | Johnmaras/taxi_data_processing | /worker/src/worker.py | UTF-8 | 5,691 | 2.90625 | 3 | [] | no_license | import json
import os
import time
from random import randint
from typing import Tuple
import boto3
import math
def get_distance(latitude: Tuple, longitude: Tuple):
lat1 = float(latitude[0])
lat2 = float(latitude[1])
lon1 = float(longitude[0])
lon2 = float(longitude[1])
earth_radius = 6371e3 # Earth radius in metres
f1 = lat1 * math.pi / 180 # φ, λ in radians
f2 = lat2 * math.pi / 180
Df = (lat2 - lat1) * math.pi / 180
Dl = (lon2 - lon1) * math.pi / 180
a = math.sin(Df / 2) * math.sin(Df / 2) + math.cos(f1) * math.cos(f2) * math.sin(Dl / 2) * math.sin(Dl / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = earth_radius * c # in metres
return d
def get_quadrant(latitude: Tuple, longitude: Tuple) -> str:
ny_lat = 40.76793672
ny_lon = -73.98215480
lat = latitude[0]
lon = longitude[0]
if lat > ny_lat and lon < ny_lon:
return "Area 1"
elif lat > ny_lat and lon > ny_lon:
return "Area 2"
elif lat < ny_lat and lon < ny_lon:
return "Area 3"
elif lat < ny_lat and lon > ny_lon:
return "Area 4"
return ""
def process_data(data: json):
# Query 1 init
key_1 = "Routes_Per_Quadrant"
areas = {"Area 1": 0, "Area 2": 0, "Area 3": 0, "Area 4": 0}
key_1_results = []
# End Query 1 init
# Query 2 init
key_2 = "Routes_With_Spec_Chars"
key_2_results = []
# End Query 2 init
# Query 3 init
key_3 = "Biggest_Route_Quadrant"
max_duration = 0
max_duration_area = ""
# End Query 3 init
# Query 4 init
key_4 = "Longest_Route"
# End Query 4 init
# Query 5 init
key_5 = "Passengers_Per_Vendor"
vendors_passengers = {}
key_5_results = []
# End Query 5 init
# print(type(data))
batch_id = list(data.keys())[0]
print(f"Processing batch: {batch_id}")
data = data[batch_id]
longest_route_len = 0
longest_route_record = {}
for record in data:
# print("Record" + str(record))
# Get values
latitude = (float(record["pickup_latitude"]), float(record["dropoff_latitude"]))
longitude = (float(record["pickup_longitude"]), float(record["dropoff_longitude"]))
# Query 1
quadrant = get_quadrant(latitude, longitude)
area_count = areas.get(quadrant,
0) # In case something wrong happend and no valid quadrant was returned by get_quadrant()
area_count += 1
areas[quadrant] = area_count
# End Query 1
# Query 2
l_R = get_distance(latitude, longitude)
trip_duration = int(record["trip_duration"])
t_R = trip_duration / 60
p_R = int(record["passenger_count"])
if l_R > 1000 and t_R > 10 and p_R > 2:
key_2_results.append(record)
# End Query 2
# Query 3
if max_duration < trip_duration:
max_duration = trip_duration
max_duration_area = quadrant
# End Query 3
# Query 4
if longest_route_len < l_R:
longest_route_len = l_R
longest_route_record = record
# End Query 4
# Query 5
vendor_id = record["vendor_id"]
passengers = int(record["passenger_count"])
num_of_passengers = vendors_passengers.get(vendor_id, 0)
num_of_passengers += passengers
vendors_passengers[vendor_id] = num_of_passengers
# End Query 5
key_1_results = areas
key_3_results = {"Area": max_duration_area, "Route Time(secs)": max_duration}
key_4_results = {"Record": longest_route_record, "Route Length": longest_route_len}
for vendor_id in vendors_passengers:
passengers = vendors_passengers[vendor_id]
results_record = {"Vendor ID": vendor_id, "Passenger": passengers}
key_5_results.append(results_record)
results = {batch_id: {key_1: key_1_results,
key_2: key_2_results,
key_3: key_3_results,
key_4: key_4_results,
key_5: key_5_results}}
return results
def get_message_deduplication_id(base_data_id: str) -> str:
time_nano_epoch = time.time_ns()
return base_data_id + str(time_nano_epoch)
def send_message(data, queue_url, sqs_client):
base_data_id = "reducer-data-group"
data_json = json.dumps(data)
i = randint(1, 1000)
message_group_id = base_data_id + str(i)
message_deduplication_id = get_message_deduplication_id(base_data_id)
sqs_client.send_message(QueueUrl=queue_url,
MessageBody=data_json,
MessageGroupId=message_group_id,
MessageDeduplicationId=message_deduplication_id)
def handler(event, context):
worker_queue_url = os.getenv("WORKER_QUEUE_URL")
record = event["Records"][0]
message_id = record["messageId"]
receipt_handle = record["receiptHandle"]
data = record["body"]
# json_data = json.loads(data)
json_data = json.loads(json.loads(data))
# print(json_data)
results = process_data(json_data)
# print(results)
sqs_client = boto3.client("sqs")
# Send results to reducer queue
reducer_queue_url = os.getenv("REDUCER_QUEUE_URL")
results_json = json.dumps(results)
send_message(results_json, reducer_queue_url, sqs_client)
sqs_client = boto3.client("sqs")
response = sqs_client.delete_message(QueueUrl=worker_queue_url, ReceiptHandle=receipt_handle)
if response and response["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Message deleted successfully: {message_id}")
| true |
de5c6ec6ad825513b6b189004dd22301da59626b | Python | MMcintire96/cardirection_project | /imgserve/run_analysis.py | UTF-8 | 2,266 | 2.78125 | 3 | [] | no_license | import subprocess
import time
import cv2
import numpy as np
import paho.mqtt.publish as publish
from picamera import PiCamera
from picamera.array import PiRGBArray
MQTT_SERVER = ''
motion_filter = .005
#starts the mosquitto dameon in the background
subprocess.call(["mosquitto", "-d"])
def captureFirst():
print("Taking the first frame photo for motion analysis")
camera.capture('firstimg.jpg')
stillImg = cv2.imread('firstimg.jpg')
grayStill = cv2.cvtColor(stillImg, cv2.COLOR_BGR2GRAY)
return grayStill
#load camera - let sleep to init
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 30
rawCapture = PiRGBArray(camera, size=(320, 240))
time.sleep(5)
grayStill = captureFirst()
i = 0
mseArr = []
def mse(grayStill, grayFrame):
mse = np.sum((grayStill.astype("float") - grayFrame.astype("float")) ** 2)
mse /= float(grayStill.shape[0] * grayStill.shape[1])
return mse
def init_mse_arr(grayFrame):
x = mse(grayStill, grayFrame)
mseArr.append(x)
def frame_arr(image):
MSE = mse(grayStill, grayFrame)
mseArr.append(MSE)
err = mseArr[len(mseArr)-1] - mseArr[0]
if len(mseArr) >= i:
mseArr.pop(0)
return err
def pub_message(image, err, MSE):
camera.capture('photos/test.jpg')
f = open('photos/test.jpg', 'rb')
file_content = f.read()
lot_id = 12
pub_message = str(lot_id) + ',' + str(err) + ',' + str(MSE)
publish.single('full_send', payload=pub_message, hostname=MQTT_SERVER)
publish.single('full_img', payload=file_content, hostname=MQTT_SERVER)
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# cleans each frame
rawCapture.truncate()
rawCapture.seek(0)
# get ready for motion analysis
image = frame.array
grayFrame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#init the mseError array for 10 frames
if i < 10:
init_mse_arr(grayFrame)
i += 1
print("Preparing the video stream with %s frames" %i)
else:
err = frame_arr(grayFrame)
MSE = mse(grayStill, grayFrame)
print(err)
if abs(err) > motion_filter:
print('Motion detected - publishing to mqtt')
pub_message(image, err, MSE)
| true |
65e7e9c0aa5841bc540db97fe3e7188d013f3a19 | Python | aditimishra1290/object-detection-effectiveness | /my_package/data/dataset.py | UTF-8 | 3,277 | 3.125 | 3 | [] | no_license | #Imports
import json
import re
from PIL import Image
import numpy as np
import ast
class Dataset(object):
'''
A class for the dataset that will return data items as per the given index
'''
def __init__(self, annotation_file: str, transforms: list = None):
self.transforms = transforms
with open(annotation_file) as f:
content = f.readlines()
self.annotations = [ ast.literal_eval( line ) for line in content ]
#==>f = open(annotation_file,)
#==>self.annotations = json.load(f)
#with open(annotation_file, 'r') as handle:
#text_data = handle.read()
#text_data = '[' + re.sub(r'\}\s\{', '},{', text_data) + ']'
#self.annotations = [json.loads(x) for x in text_data]
#with open(annotation_file) as f:
# self.annotations = [json.loads("[" + x.replace("}\n{", "},\n{") + "]") for x in f]
#self.annotations = [json.loads(line) for line in open(annotation_file, 'r')]
#with open(annotation_file, encoding='utf-8') as f:
#self.annotations = json.load(f)
'''
Arguments:
annotation_file: path to the annotation file
transforms: list of transforms (class instances)
For instance, [<class 'RandomCrop'>, <class 'Rotate'>]
'''
def __len__(self):
return(len(self.annotations))
'''
return the number of data points in the dataset
'''
#, idx: int
def __getitem__(self, idx: int) -> np.ndarray:
img_ann = self.annotations[idx]
print(img_ann["img_fn"])
img = Image.open(img_ann["img_fn"])#img_ann['img_fn'])
for transform in self.transforms:
img = transform(img)
gt_bboxes = [[x["category_id"]]+x["bbox"] for x in img_ann["bboxes"]]
img = np.array(img)
img = img/255
img = img.transpose(2, 0, 1)
print(img.shape)
return img, gt_bboxes
#return {
# "image": img,
# "gt_bboxs": gt_bboxes
#}
'''
return the dataset element for the index: "idx"
Arguments:
idx: index of the data element.
Returns: A dictionary with:
image: image (in the form of a numpy array) (shape: (3, H, W))
gt_bboxes: N X 5 array where N is the number of bounding boxes, each
consisting of [class, x1, y1, x2, y2]
x1 and x2 lie between 0 and width of the image,
y1 and y2 lie between 0 and height of the image.
You need to do the following,
1. Extract the correct annotation using the idx provided.
2. Read the image and convert it into a numpy array (wont be necessary
with some libraries). The shape of the array would be (3, H, W).
3. Scale the values in the array to be with [0, 1].
4. Create a dictonary with both the image and annotations
4. Perform the desired transformations.
5. Return the transformed image and annotations as specified.
'''
| true |
73ecd57455786807da380e7a4c26432646f65f13 | Python | MrColour/Advent_of_Code_python | /2017/day_02/p_2017_02_02.py | UTF-8 | 419 | 3.296875 | 3 | [] | no_license | def divisible_nums(row):
for i in range(len(row)):
for j in range(i + 1, len(row)):
if (row[i] % row[j] == 0 or row[j] % row[i] == 0):
if (row[i] > row[j]):
return (row[i] // row[j])
else:
return (row[j] // row[i])
input = open("input.txt").read().rstrip()
rows = [list(map(int, row.split("\t"))) for row in input.split("\n")]
total = [divisible_nums(row) for row in rows]
print(sum(total)) | true |
a155216b93b89d5e63fccf4790ae6e41fda7115a | Python | mbirkegaard/supreme-rotary-phone | /document_manager/document_manager.py | UTF-8 | 1,681 | 3.21875 | 3 | [] | no_license | import glob
from spacy.tokens.doc import Doc
from typing import NamedTuple, Optional, Dict
import spacy
import time
class StoredDocument(NamedTuple):
name: str
path: str
doc: Optional[Doc]
class DocumentManager:
def __init__(self):
self.nlp = None
self.documents: Dict[str, StoredDocument] = {}
for path in glob.glob('data/*.txt'):
name = path.split('/')[-1]
self.documents[name] = StoredDocument(
name,
path,
None
)
def get_document_names(self):
return [doc.name for doc in self.documents.values()]
def get_document(self, name):
if name not in self.documents:
raise KeyError('{} is not the name of a stored document'.format(name))
if self.documents[name].doc is None:
document = self.documents[name]
processed_doc = self.load_file_and_process_doc(document.path)
self.documents[name] = StoredDocument(
document.name,
document.path,
processed_doc
)
return self.documents[name].doc
def load_file_and_process_doc(self, path):
with open(path, 'r', encoding='utf-8') as file:
if self.nlp is None:
print('Loading spacy')
self.nlp = spacy.load('en')
print('Loaded spacy')
text = file.readline()
now = time.time()
doc = self.nlp(text)
print('Det tog {} sekunder'.format(time.time() - now))
print('Teksten indeholder {} ord'.format(doc.__len__()))
return doc
| true |
20965d34a911104b5ad89e46664108f29dd50b89 | Python | chaosgoo/Exercise_Fluent_Python | /Chapter 14/C14E14.py | UTF-8 | 454 | 3.296875 | 3 | [] | no_license | import itertools
def vowel(c):
return c.lower() in "aeiou"
print(list(filter(vowel,"Aardvark")))
print(list(itertools.filterfalse(vowel,"Aardvark")))
print(list(itertools.dropwhile(vowel,"Aardvark")))
print(list(itertools.takewhile(vowel,"Aardvark")))
print(list(itertools.compress("Aardvark",(1,0,1,1,0,1))))
print(list(itertools.islice("Aardvark",4)))
print(list(itertools.islice("Aardvark",4,7)))
print(list(itertools.islice("Aardvark",1,7,2)))
| true |