blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2ff442a9ccf66ba152b9664c8c2353ec7ff19386 | Python | heidonomm/mhopRL | /scripts/create_constrained_training_data.py | UTF-8 | 1,778 | 3.09375 | 3 | [] | no_license | import json
from collections import defaultdict
# file_to_read_from = "toy_data/verb_only/training_data.txt"
# file_to_write_to = "toy_data/verb_only/constrained_training_data.txt"
"""
get counts of predicates based on previous constrained dataset
"""
with open("toy_data/constrained_training_data.txt", "r") as in_file:
pred_counter = defaultdict(int)
for line in in_file:
row = json.loads(line)
for pred in row['fact1_pred']:
pred_counter[pred] += 1
for pred in row['fact2_pred']:
pred_counter[pred] += 1
"""
Script to remove all questions that have a fact associated with it
that only has ('be') as its predicate (because count says theres 332 in total)
also questions which have a fact which has a predicate which occurs less than X times
"""
threshold = 3
with open(file_to_read_from, 'r') as in_file, open(file_to_write_to, "w") as out_file:
for index, sample in enumerate(in_file):
row = json.loads(sample)
constrained_predicates = list()
for pred in row['fact1_pred']:
if pred_counter[pred] >= threshold:
constrained_predicates.append(pred)
if len(constrained_predicates) == 0:
continue
else:
row['fact1_pred'] = constrained_predicates
constrained_predicates = list()
for pred in row['fact2_pred']:
if pred_counter[pred] >= threshold:
constrained_predicates.append(pred)
if len(constrained_predicates) == 0:
continue
else:
row['fact2_pred'] = constrained_predicates
# if it reaches this stage, then all predicates for current sample have atleast count of 5
out_file.write(f"{json.dumps(row)}\n")
| true |
4f66705e24864ceaef28e8d51b1124cdf837ac94 | Python | shreyjj/RockPaperScissors | /RPS.py | UTF-8 | 1,002 | 3.828125 | 4 | [] | no_license | import random
tracers = {"scissors":1, "paper":2, "rock":3, "lizard":4, "spock":5}
Number = 0
choice_names = ["scissors", "paper", "rock", "lizard", "spock"]
choice_numbers = [1,2,3,4,5]
odd = ("1,3,5")
even = ("2,4,6")
user_choice=input("Pick either rock, paper, scissors lizard or spock")
if user_choice not in choice_names:
Print("not a valid choice")
user_choice=input("Pick either rock, paper, scissors lizard or spock")
else:
computer_choice =(random.choice(choice_names))
print("The computer picked", computer_choice)
user_value = tracers[user_choice]
computer_value = tracers[computer_choice]
number = user_value - computer_value
if number % 2 == 0:
if user_value > computer_value:
print("Congratualtions, you won")
else:
print("You lose")
elif number % 2 == 1:
if user_value < computer_value:
print("Congratualtions, you won")
else:
print("You lose")
else:
print("You drew") | true |
337570ff24ddabf7c3c9e6f16f0ef7a0ac8a2d6f | Python | DarisaLLC/python-code | /deep-learning/keras-functional/utils.py | UTF-8 | 381 | 2.71875 | 3 | [] | no_license | from scipy.io import loadmat
import os
# loads utk face images/labels from .mat file and returns python lists
def load_data(mat_path):
d = loadmat(mat_path)
return d["images"], d["genders"][0], d["ages"][0], d["ethnicities"][0], d["img_size"][0, 0]
# makes directory, catching exceptions
def mk_dir(dir):
try:
os.mkdir( dir )
except OSError:
pass
| true |
14e4ba3d22b4f87b31c5b3319289b1ff826e0072 | Python | markelov-alex/py-sockets | /napalm/socket/test/test_parser.py | UTF-8 | 6,239 | 2.5625 | 3 | [] | no_license | from unittest import TestCase
from napalm.socket.parser import CommandParser
class TestCommandParser(TestCase):
def setUp(self):
super().setUp()
self.parser = CommandParser()
def test_parse_room_code(self):
game_id, game_variation, game_type, room_type = CommandParser.parse_room_code("1_7_10_5")
self.assertEqual(game_id, 1)
self.assertEqual(game_variation, 7) # can be numeric
self.assertEqual(game_type, 10)
self.assertEqual(room_type, 5)
game_id, game_variation, game_type, room_type = CommandParser.parse_room_code("1_H_10_5")
self.assertEqual(game_id, 1)
self.assertEqual(game_variation, "H")
self.assertEqual(game_type, 10)
self.assertEqual(room_type, 5)
game_id, game_variation, game_type, room_type = CommandParser.parse_room_code("1_H_10_")
self.assertEqual(game_id, 1)
self.assertEqual(game_variation, "H")
self.assertEqual(game_type, 10)
self.assertEqual(room_type, -1)
game_id, game_variation, game_type, room_type = CommandParser.parse_room_code("1_H_")
self.assertEqual(game_id, 1)
self.assertEqual(game_variation, "H")
self.assertEqual(game_type, -1)
self.assertEqual(room_type, -1)
game_id, game_variation, game_type, room_type = CommandParser.parse_room_code("1_")
self.assertEqual(game_id, 1)
self.assertEqual(game_variation, -1)
self.assertEqual(game_type, -1)
self.assertEqual(room_type, -1)
game_id, game_variation, game_type, room_type = CommandParser.parse_room_code("1")
self.assertEqual(game_id, 1)
self.assertEqual(game_variation, -1)
self.assertEqual(game_type, -1)
self.assertEqual(room_type, -1)
game_id, game_variation, game_type, room_type = CommandParser.parse_room_code("")
self.assertEqual(game_id, -1)
self.assertEqual(game_variation, -1)
self.assertEqual(game_type, -1)
self.assertEqual(room_type, -1)
def test_make_room_code(self):
room_code = CommandParser.make_room_code("1", "H", 10, 5)
self.assertEqual(room_code, "1_H_10_5")
room_code = CommandParser.make_room_code("1", "H", 10)
self.assertEqual(room_code, "1_H_10")
room_code = CommandParser.make_room_code("1", "H")
self.assertEqual(room_code, "1_H")
room_code = CommandParser.make_room_code("1")
self.assertEqual(room_code, "1")
room_code = CommandParser.make_room_code()
self.assertEqual(room_code, "")
def test_split_commands(self):
commands = self.parser.split_commands("a||b||c##d||e||f||")
self.assertEqual(commands, ["a||b||c", "d||e||f||"])
commands = self.parser.split_commands("a||b||c##d||e||f||##")
self.assertEqual(commands, ["a||b||c", "d||e||f||", ""])
def test_parse_command(self):
params_list = self.parser.parse_command(
"1||k1::a,,b,,c;;k2::;;k3::v3;;k4||a,,b,,c,,d;;abc;;d,,e,,f;;g,,h||a,,b,,c")
self.assertEqual(len(params_list), 4)
self.assertEqual(params_list[0], "1")
self.assertEqual(params_list[1], {"k1": ["a", "b", "c"], "k2": "", "k3": "v3", "k4": None})
self.assertEqual(params_list[2], [["a", "b", "c", "d"], "abc", ["d", "e", "f"], ["g", "h"]])
self.assertEqual(params_list[3], ["a", "b", "c"])
def test_decode_string(self):
string = self.parser.decode_string("some&dblstick&text")
self.assertEqual(string, "some||text")
def test_encode_string(self):
string = self.parser.encode_string("some||text")
self.assertEqual(string, "some&dblstick&text")
def test_join_commands(self):
commands = self.parser.join_commands(["1||param1||param2##", "4||param1||param2##"])
self.assertEqual(commands, "1||param1||param2##4||param1||param2##")
def test_make_command(self):
command_params = self.parser.make_command(["10", [3, 100, 200], 50,
[[23123, "name1", 2000], [65332, "name2", 2300]]])
self.assertEqual(command_params, "10||3,,100,,200||50||23123,,name1,,2000;;65332,,name2,,2300##")
command_params = self.parser.make_command(["10", {"0": "some", 5: ["a", "b", 7]}])
self.assertEqual(command_params, "10||0::some;;5::a,,b,,7##")
# protected
def test_str_items(self):
items = self.parser._str_items(["abc", 123, True, False, None,
["abc", 123, True, False, None],
{"a": "abc", "b": 123, "c": True, "d": False, "e": None}
])
self.assertEqual(items, ["abc", "123", "1", "0", "",
'["abc", 123, true, false, null]',
'{"a": "abc", "b": 123, "c": true, "d": false, "e": null}'
])
def test_serialize_dict(self):
string = self.parser._serialize_dict({"k1": "v1", "k2": ["a", "b", "c"]})
self.assertEqual(string, "k1::v1;;k2::a,,b,,c")
# Note: due to performance we use str() instead of _str_items()
# string = self.parser._serialize_dict({"k1": "v1", "k2": ["a", 2, ["a", 2, "", True, False, None]],
# "k3": 123, "k4": None, "k5": {"a": "abc", "d": False, "e": None}})
# self.assertEqual(string, 'k1::v1;;k2::a,,2,,["a", 2, "", true, false, null];;
# k3::123;;k4::;;k5::{"a": "abc", "d": false, "e": null}')
def test_serialize_complex_list(self):
string = self.parser._serialize_complex_list([["a", "b", "c", ["a", 2, ""], 10], "v1"])
self.assertEqual(string, 'a,,b,,c,,["a", 2, ""],,10;;v1')
# Note: due to performance we use str() instead of _str_items()
# string = self.parser._serialize_complex_list([["a", "", 2, True, False, None,
# ["a", 2, "", True, False, None]],
# "v1"])
# self.assertEqual(string, 'a,,,,2,,1,,0,,,,["a", 2, "", true, false, null];;v1')
| true |
d528884c48defecc949942d010c6077c107507f6 | Python | kokorinosoba/contests | /AtCoder/ABC2/ABC259/C.py | UTF-8 | 214 | 2.953125 | 3 | [] | no_license | s=input()
t=input()
ans="Yes"
if set(s) != set(t):
print("No")
exit(0)
for c in set(s):
sc=s.count(c)
tc=t.count(c)
if sc != tc:
if sc > tc:
ans="No"
break
if sc < 2:
ans="No"
break
print(ans)
| true |
6da176a4cf7f111aed92bd7e79981e597d69ef83 | Python | ViniciusTrajano/Projeto-P1_LP1 | /projeto/tela inicial.py | UTF-8 | 1,839 | 3 | 3 | [] | no_license | import pygame
import sys
from pygame.locals import *
largura, altura = 800 , 500
relogio = pygame.time.Clock()
branco=(255,255,255)
preto=(0,0,0)
vermelho=(255,0,0)
verde=(0,255,0)
azul=(0,0,255)
tela = pygame.display.set_mode((largura,altura))
def tela_inicial(cor):
baner = pygame.image.load('imagens/imagem.jpg')
gato = pygame.image.load('imagens/gato.png')
pygame.init()
tela = pygame.display.set_mode((largura,altura))
def texto(msg, cor, tam, x, y):
font = pygame.font.SysFont(None, tam)
texto1 = font.render(msg, True, cor)
tela.blit(texto1, [x, y])
while True:
relogio.tick(60)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
x = pygame.mouse.get_pos()[0]
y = pygame.mouse.get_pos()[1]
if (x >= 177 and y >= 302) and (x < 319 and y < 349 ):
break
elif (x >= 500 and y >= 302) and (x <624 and y <349 ):
pygame.quit()
sys.exit()
quit()
tela.blit(baner,(0,0))
tela.blit(gato,(70,50))
texto('BEM VINDO AO SAD CAT SPACE', cor, 40, 250, 100 )
pygame.draw.rect(tela,(0,0,0),[177,302,141,47])
pygame.draw.rect(tela,(0,0,0),[500,302,125,47])
texto("jogar(C)", cor, 50, 180, 307)
texto("Sair(S)", cor, 50, 505, 307)
pygame.display.update()
def texto(msg, cor, tam, x, y):
font = pygame.font.SysFont(None, tam)
texto1 = font.render(msg, True, cor)
tela.blit(texto1, [x, y])
tela_inicial(vermelho)
| true |
f4aa33f872bbd34356a5a7bad748e77f823ba923 | Python | catboost/catboost | /contrib/python/Pygments/py3/pygments/lexers/whiley.py | UTF-8 | 4,018 | 2.671875 | 3 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | """
pygments.lexers.whiley
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Whiley language.
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Comment, Keyword, Name, Number, Operator, \
Punctuation, String, Text
__all__ = ['WhileyLexer']
class WhileyLexer(RegexLexer):
"""
Lexer for the Whiley programming language.
.. versionadded:: 2.2
"""
name = 'Whiley'
url = 'http://whiley.org/'
filenames = ['*.whiley']
aliases = ['whiley']
mimetypes = ['text/x-whiley']
# See the language specification:
# http://whiley.org/download/WhileyLanguageSpec.pdf
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# Comments
(r'//.*', Comment.Single),
# don't parse empty comment as doc comment
(r'/\*\*/', Comment.Multiline),
(r'(?s)/\*\*.*?\*/', String.Doc),
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Keywords
(words((
'if', 'else', 'while', 'for', 'do', 'return',
'switch', 'case', 'default', 'break', 'continue',
'requires', 'ensures', 'where', 'assert', 'assume',
'all', 'no', 'some', 'in', 'is', 'new',
'throw', 'try', 'catch', 'debug', 'skip', 'fail',
'finite', 'total'), suffix=r'\b'), Keyword.Reserved),
(words((
'function', 'method', 'public', 'private', 'protected',
'export', 'native'), suffix=r'\b'), Keyword.Declaration),
# "constant" & "type" are not keywords unless used in declarations
(r'(constant|type)(\s+)([a-zA-Z_]\w*)(\s+)(is)\b',
bygroups(Keyword.Declaration, Text, Name, Text, Keyword.Reserved)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(bool|byte|int|real|any|void)\b', Keyword.Type),
# "from" is not a keyword unless used with import
(r'(import)(\s+)(\*)([^\S\n]+)(from)\b',
bygroups(Keyword.Namespace, Text, Punctuation, Text, Keyword.Namespace)),
(r'(import)(\s+)([a-zA-Z_]\w*)([^\S\n]+)(from)\b',
bygroups(Keyword.Namespace, Text, Name, Text, Keyword.Namespace)),
(r'(package|import)\b', Keyword.Namespace),
# standard library: https://github.com/Whiley/WhileyLibs/
(words((
# types defined in whiley.lang.Int
'i8', 'i16', 'i32', 'i64',
'u8', 'u16', 'u32', 'u64',
'uint', 'nat',
# whiley.lang.Any
'toString'), suffix=r'\b'), Name.Builtin),
# byte literal
(r'[01]+b', Number.Bin),
# decimal literal
(r'[0-9]+\.[0-9]+', Number.Float),
# match "1." but not ranges like "3..5"
(r'[0-9]+\.(?!\.)', Number.Float),
# integer literal
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
# character literal
(r"""'[^\\]'""", String.Char),
(r"""(')(\\['"\\btnfr])(')""",
bygroups(String.Char, String.Escape, String.Char)),
# string literal
(r'"', String, 'string'),
# operators and punctuation
(r'[{}()\[\],.;]', Punctuation),
(r'[+\-*/%&|<>^!~@=:?'
# unicode operators
r'\u2200\u2203\u2205\u2282\u2286\u2283\u2287'
r'\u222A\u2229\u2264\u2265\u2208\u2227\u2228'
r']', Operator),
# identifier
(r'[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\[btnfr]', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\.', String),
(r'[^\\"]+', String),
],
}
| true |
63a51fecf4d3e169bfd8e8ba5eee8b64665511b8 | Python | AranGarcia/Cookie | /text-normalizer/run.py | UTF-8 | 1,685 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Text normalizing script.
The input is any of the files stored in the Shepard repository, as they need to be preprocessed
before any information extraction or ETL processes can be performed upon them. This script will
do the following:
1) Stop-words filtering
Removal of common words that are redundant and do not give much meaning to the text.
2) Lemmatization
Since words can be derived into multiple conjugations, normalization to the lemma of each
word will provide the main text data source for information retrieval.
Important resources:
https://www.analyticsvidhya.com/blog/2019/08/how-to-remove-stopwords-text-normalization-nltk-spacy-gensim-python/
"""
import sys
# Spacy
import spacy
# YAML
from yaml import Loader, dump, load
if len(sys.argv) < 2:
print("Usage\n\trun.py FILE [DEST]")
exit(1)
# Unserialize data from YAML file
fname = sys.argv[1]
with open(fname) as f:
data = load(f, Loader=Loader)
nlp = spacy.load("es_core_news_sm")
def iter_items(items):
"""Recursive legal file object text normalization."""
for it in items:
text = nlp(it["text"].lower())
# Stop word removal
token_list = [
token.lemma_
for token in text
if not token.is_stop and not token.is_punct
]
it["text"] = " ".join(token_list)
children_items = it.get("content", {}).get("items")
if children_items:
iter_items(children_items)
# Remove stop words
iter_items(data["items"])
# Reserialize in another file
fname = "result.yaml"
with open(fname, "w") as f:
dump(data, stream=f, encoding="utf-8", allow_unicode=True)
| true |
3584e8bc29933de885a7a10f68fde75f0a57cfe6 | Python | RaulAstudillo06/astudillo_raul_orie6125 | /hw2/problem3/my_search.py | UTF-8 | 907 | 3.921875 | 4 | [] | no_license | import numpy as np
from binary_search import binary_search
def my_search(arr, item):
"""
Searches a number in an array that has the form described in problem 3.
:param arr: list of numbers as described in problem 3.
:param item: number to be searched.
"""
l = 0
r = len(arr) -1
while r - l > 2:
#print(l)
#print(r)
m = int(np.floor((l+r)/2))
if arr[m] >arr[r]:
if arr[l] <= item and item <= arr[m-1]:
return l+binary_search(arr[l:m],item)
else:
l = m
else:
if arr[m+1] <= item and item <= arr[r]:
return m+1+binary_search(arr[m+1:r+1],item)
else:
r = m
for k in range(l,r+1):
if arr[k] == item:
return k
print('{} is not in input array.'.format(item))
return None
| true |
08eed0d3c9681eaedaf0690728ef7d72319da9ec | Python | nikitaboyko/HOG-SVM-Python3-object-detector | /gather_annotations.py | UTF-8 | 3,336 | 2.8125 | 3 | [] | no_license | import numpy as np
import cv2
import argparse
from imutils.paths import list_images
import os
from tqdm import tqdm
class BoxSelector(object):
def __init__(self, image, window_name,color=(0,0,255)):
#store image and an original copy
self.image = image
self.orig = image.copy()
#capture start and end point co-ordinates
self.start = None
self.end = None
#flag to indicate tracking
self.track = False
self.color = color
self.window_name = window_name
#hook callback to the named window
cv2.namedWindow(self.window_name)
cv2.setMouseCallback(self.window_name,self.mouseCallBack)
def mouseCallBack(self, event, x, y, flags, params):
#start tracking if left-button-clicked down
if event==cv2.EVENT_LBUTTONDOWN:
self.start = (x,y)
self.track = True
#capture/end tracking while mouse-move or left-button-click released
elif self.track and (event==cv2.EVENT_MOUSEMOVE or event==cv2.EVENT_LBUTTONUP):
self.end = (x,y)
if not self.start==self.end:
self.image = self.orig.copy()
#draw rectangle on the image
cv2.rectangle(self.image, self.start, self.end, self.color, 2)
if event==cv2.EVENT_LBUTTONUP:
self.track=False
#in case of clicked accidently, reset tracking
else:
self.image = self.orig.copy()
self.start = None
self.track = False
cv2.imshow(self.window_name,self.image)
@property
def roiPts(self):
if self.start and self.end:
pts = np.array([self.start,self.end])
s = np.sum(pts,axis=1)
(x,y) = pts[np.argmin(s)]
(xb,yb) = pts[np.argmax(s)]
return [(x,y),(xb,yb)]
else:
return []
#parse arguments
dataset_path = r'd:\Users\boykni\Desktop\object_detection\Object-Detector-master\dataset'
annotations_path = r"d:\Users\boykni\Desktop\object_detection\Object-Detector-master\annot.npy"
images_path = r"d:\Users\boykni\Desktop\object_detection\Object-Detector-master\images.npy"
# ap = argparse.ArgumentParser()
# ap.add_argument("-d","--dataset",required=True,help="path to images dataset...")
# ap.add_argument("-a","--annotations",required=True,help="path to save annotations...")
# ap.add_argument("-i","--images",required=True,help="path to save images")
# args = vars(ap.parse_args())
print('hi')
#annotations and image paths
annotations = []
imPaths = []
#loop through each image and collect annotations
for im in tqdm(os.listdir(dataset_path)):
imagePath = os.path.join(dataset_path,im)
#load image and create a BoxSelector instance
image = cv2.imread(imagePath)
bs = BoxSelector(image,"Image")
cv2.imshow("Image",image)
cv2.waitKey(0)
#order the points suitable for the Object detector
pt1,pt2 = bs.roiPts
(x,y,xb,yb) = [pt1[0],pt1[1],pt2[0],pt2[1]]
annotations.append([int(x),int(y),int(xb),int(yb)])
imPaths.append(imagePath)
#save annotations and image paths to disk
annotations = np.array(annotations)
imPaths = np.array(imPaths,dtype="unicode")
np.save(annotations_path, annotations)
np.save(images_path,imPaths)
| true |
68d668f5cb045b5455e65ac540d89e9c621a8ba8 | Python | id774/sandbox | /python/cycle.py | UTF-8 | 135 | 3.1875 | 3 | [] | no_license | from itertools import cycle
import numpy as np
c = cycle('ABCDEFGH')
lis = np.arange(0, 100)
for i, c in zip(lis, c):
print(i, c)
| true |
35f33dc9329e2acbe32d95f5b04eeede714cbbd7 | Python | gragragrao/training_deeplearning | /univ_homework/mnist_recog2.py | UTF-8 | 6,256 | 2.9375 | 3 | [] | no_license | def homework(train_X, train_y, test_X):
rng = np.random.RandomState(1234)
random_state = 42
class Autoencoder:
def __init__(self, vis_dim, hid_dim, W, function=lambda x: x):
self.W = W
self.a = tf.Variable(np.zeros(vis_dim).astype('float32'), name='a')
self.b = tf.Variable(np.zeros(hid_dim).astype('float32'), name='b')
self.function = function
self.params = [self.W, self.a, self.b]
def encode(self, x):
u = tf.matmul(x, self.W) + self.b
return self.function(u)
def decode(self, x):
u = tf.matmul(x, tf.transpose(self.W)) + self.a
return self.function(u)
def f_prop(self, x):
y = self.encode(x)
return self.decode(y)
def reconst_error(self, x, noise):
tilde_x = x * noise
reconst_x = self.f_prop(tilde_x)
error = -tf.reduce_mean(tf.reduce_sum(x * tf.log(reconst_x) + (1. - x) * tf.log(1. - reconst_x), axis=1))
return error, reconst_x
class Dense:
def __init__(self, in_dim, out_dim, function):
self.W = tf.Variable(rng.uniform(low=-0.08, high=0.08, size=(in_dim, out_dim)).astype('float32'), name='W')
self.b = tf.Variable(np.zeros([out_dim]).astype('float32'))
# m, v はもしかしたら乱数の方がいいのかもしれない
self.mW = tf.Variable(np.zeros((in_dim, out_dim)).astype('float32'))
self.mb = tf.Variable(np.zeros([out_dim]).astype('float32'))
self.vW = tf.Variable(np.zeros((in_dim, out_dim)).astype('float32'))
self.vb = tf.Variable(np.zeros([out_dim]).astype('float32'))
self.function = function
self.params = [self.W, self.b]
self.train_params = [self.W, self.b, self.mW, self.mb, self.vW, self.vb]
self.ae = Autoencoder(in_dim, out_dim, self.W, self.function)
def f_prop(self, x):
u = tf.matmul(x, self.W) + self.b
self.z = self.function(u)
return self.z
def pretrain(self, x, noise):
cost, reconst_x = self.ae.reconst_error(x, noise)
return cost, reconst_x
class Adam:
def __init__(self, layers, cost, epoch):
self.layers = layers
self.cost = cost
self.alpha = 10 ** (-3)
self.beta = 0.9
self.gamma = 0.999
def shape_params(self):
all_params = []
for layer in self.layers:
all_params.append(layer.train_params)
return all_params
def updates(self, epoch):
all_params = self.shape_params
g_params = tf.gradients(all_params)
updates = []
for param, g_param in zip(all_params, g_params):
_W, _b, _mW, _mb, _vW, _vb = param
gW, gb, gmW, gmb, gvW, gvb = g_param
mW_hat = _mW / (1 - self.gamma ** epoch)
mb_hat = _mb / (1 - self.beta ** epoch)
vW_hat = _vW / (1 - self.gamma ** epoch)
vb_hat = _vb / (1 - self.beta ** epoch)
updates.append(_mW.assign(_mW * self.gamma - (1 - self.gamma) * gmW))
updates.append(_mb.assign(_mb * self.gamma - (1 - self.gamma) * gmb))
updates.append(_vW.assign(_vW * self.beta - (1 - self.beta) * gvW * gvW))
updates.append(_vb.assign(_vb * self.beta - (1 - self.beta) * gvb * gvb))
updates.append(W.assign_add(-self.alpha * vW_hat / mW_hat))
updates.append(b.assign_add(-self.alpha * vb_hat / mb_hat))
return updates
layers = [
Dense(784, 500, tf.nn.sigmoid),
Dense(500, 500, tf.nn.sigmoid),
Dense(500, 500, tf.nn.sigmoid),
Dense(500, 10, tf.nn.softmax)
]
def sgd(cost, params, eps=np.float32(0.1)):
g_params = tf.gradients(cost, params)
updates = []
for param, g_param in zip(params, g_params):
if g_param is not None:
updates.append(param.assign_add(-eps * g_param))
return updates
X = np.copy(train_X)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for l, layer in enumerate(layers[:-1]):
corruption_level = np.float(0.3)
batch_size = 100
n_batches = X.shape[0] // batch_size
n_epochs = 20
x = tf.placeholder(tf.float32)
noise = tf.placeholder(tf.float32)
cost, reconst_x = layer.pretrain(x, noise)
params = layer.params
train = sgd(cost, params)
encode = layer.f_prop(x)
for epoch in range(n_epochs):
X = shuffle(X, random_state=random_state)
err_all = []
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
_noise = rng.binomial(size=X[start:end].shape, n=1, p=1 - corruption_level)
_, err = sess.run([train, cost], feed_dict={x: X[start:end], noise: _noise})
err_all.append(err)
X = sess.run(encode, feed_dict={x: X})
x = tf.placeholder(tf.float32, [None, 784])
t = tf.placeholder(tf.float32, [None, 10])
epoch = tf.placeholder(tf.float32)
def f_props(layers, x):
params = []
for layer in layers:
x = layer.f_prop(x)
params += layer.params
return x, params
y, params = f_props(layers, x)
cost = -tf.reduce_mean(tf.reduce_sum(t * tf.log(y), 1))
adam = Adam(layers, cost, epoch)
updates = adam.updates(epoch)
train = tf.group(*updates)
valid = tf.argmax(y, 1)
n_epochs = 50
batch_size = 100
n_batches = train_X.shape[0] // batch_size
for epoch in range(n_epochs):
train_X, train_y = shuffle(train_X, train_y, random_state=random_state)
for i in range(n_batches):
start = i * batch_size
end = start + batch_size
sess.run(train, feed_dict={x: train_X[start:end], t: train_y[start:end], epoch: epoch})
pred_y = sess.run(valid, feed_dict={x: test_X})
return pred_y
| true |
26139c1e8caeb1a74bacc4deba0c7c1020dfab5c | Python | KirstChat/how-till-spake-norn-irish | /app.py | UTF-8 | 8,081 | 2.65625 | 3 | [] | no_license | import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
@app.route("/")
@app.route("/how_till_spake_norn_irish")
# Render Home Page
def home():
return render_template('index.html')
@app.route("/our_wee_guide")
# Render Dictionary Page
def dictionary():
dictionary = mongo.db.dictionary.find()
return render_template("dictionary.html", dictionary=dictionary)
@app.route("/search", methods=["GET", "POST"])
# Search Functionality
def search():
search = request.form.get("search")
dictionary = mongo.db.dictionary.find({"$text": {"$search": search}})
return render_template("dictionary.html", dictionary=dictionary)
"""
Code below is from W3 Schools on using the MongoDB sort function:
https://www.w3schools.com/python/python_mongodb_sort.asp
"""
@app.route("/ascending")
# Ascending Sort Functionality
def ascending():
dictionary = mongo.db.dictionary.find().sort("word", 1)
return render_template("dictionary.html", dictionary=dictionary)
@app.route("/descending")
# Descending Sort Functionality
def descending():
dictionary = mongo.db.dictionary.find().sort("word", -1)
return render_template("dictionary.html", dictionary=dictionary)
@app.route("/sign_up", methods=["GET", "POST"])
# Render Sign Up Page
def sign_up():
if request.method == "POST":
"""
Check if the username already exists in
MongoDB user_profile collection
"""
existing_user = mongo.db.user_profile.find_one(
{"username": request.form.get("username")})
if existing_user:
# Display flash message if username already exists
flash("Sorry, that wee username is already being used!", "error")
return redirect(url_for("sign_up"))
# Add new user details to collection
new_user = {
"first_name": request.form.get("first_name").capitalize(),
"username": request.form.get("username"),
"password": generate_password_hash(request.form.get("password"))
}
# Add new user details to user_profile collection in MongoDB
mongo.db.user_profile.insert_one(new_user)
# Put the new user into a session cookie
session["user"] = request.form.get("username")
# Display flash message if sign up is successful
flash(
"You've successfully created a wee account with us. "
"Welcome to your profile page!",
"success")
# Redirect user to their profile
return redirect(url_for("profile", username=session["user"]))
return render_template("sign_up.html")
@app.route("/login", methods=["GET", "POST"])
# Render Login Page
def login():
if request.method == "POST":
# Check if username exists in MongoDB user_profile collection
existing_user = mongo.db.user_profile.find_one(
{"username": request.form.get("username")})
if existing_user:
# Check if hashed password matches user input
if check_password_hash(
existing_user["password"], request.form.get("password")):
"""
Put existing user into a 'session' cookie
using first_name and username
"""
session["user"] = request.form.get("username")
flash("Welcome back to your wee profile!", "welcome")
# Redirect user to their profile
return redirect(url_for("profile", username=session["user"]))
# Display flash message if password doesn't match input
else:
flash(
"That's the wrong username/password ya melter!",
"incorrect")
return redirect(url_for("login"))
# Display flash message if username doesn't exist
else:
flash("That's the wrong username/password ya melter!", "incorrect")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/logout")
# Logout Functionality
def logout():
# Display flash message if user has been logged out
flash("Ach, you've logged out of your wee account!", "logout")
# Remove user from session cookies
session.pop("user")
return redirect(url_for("login"))
@app.route("/profile/<username>", methods=["GET", "POST"])
# Render Profile Page
def profile(username):
# Grab the session users first_name from MongoDB user_profile collection
username = mongo.db.user_profile.find_one(
{"username": session["user"]})["first_name"]
# Get dictionary to display words added by user
dictionary = mongo.db.dictionary.find(
{"added_by": session["user"]})
if session["user"]:
return render_template(
"profile.html",
dictionary=dictionary, username=username)
return redirect(url_for("login"))
@app.route("/add_word", methods=["GET", "POST"])
# Render Add Word Page
def add_word():
if request.method == "POST":
existing_word = mongo.db.dictionary.find_one(
{"word": request.form.get("word")})
if existing_word:
# Display flash message if word already exists
flash(
"Sorry, that word is already in Our Wee Guide",
"word_exists")
return redirect(url_for("add_word"))
word = {
"word": request.form.get("word").capitalize(),
"definition": request.form.get("definition").capitalize(),
"example": request.form.get("example").capitalize(),
"added_by": session["user"]
}
# Add word to dictionary
mongo.db.dictionary.insert_one(word)
# Display flash message if word has been added successfully
flash("Your word has been added to Our Wee Guide!", "add")
return redirect(url_for("dictionary"))
return render_template("add_word.html")
@app.route("/edit_word/<word_id>", methods=["GET", "POST"])
# Render Edit Word Page
def edit_word(word_id):
if request.method == "POST":
submit = {
"word": request.form.get("word"),
"definition": request.form.get("definition"),
"example": request.form.get("example"),
"added_by": request.form.get("added_by")
}
# Update word in dictionary
mongo.db.dictionary.update({"_id": ObjectId(word_id)}, submit)
# Display flash message if word has been successfully updated
flash("You've successfully updated a word in Our Wee Guide", "edit")
return redirect(url_for("dictionary"))
word = mongo.db.dictionary.find_one({"_id": ObjectId(word_id)})
return render_template("edit_word.html", word=word)
@app.route("/delete_word/<word_id>")
# Delete Functionality
def delete_word(word_id):
mongo.db.dictionary.remove({"_id": ObjectId(word_id)})
flash("Your word has been deleted from Our Wee Guide", "delete")
return redirect(url_for("dictionary"))
@app.route("/contact_us")
# Render Contact Us Page
def contact():
return render_template("contact.html")
# Error Handlers
"""
Code is from Flask documentation for Error Handlers:
https://flask.palletsprojects.com/en/1.1.x/patterns/errorpages/
"""
@app.errorhandler(404)
# 404 Error Handler
def page_not_found(e):
return render_template("404.html"), 404
@app.errorhandler(500)
# 500 Error Handler
def internal_server_error(e):
return render_template("500.html"), 500
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
| true |
d83e594e01234ac5929fc8204c85c9754323f7ce | Python | mikedim/RSA-ElGamal-Demo | /elgAlice.py | UTF-8 | 1,093 | 3.375 | 3 | [] | no_license | ######Alice ElGamal######
import support
print("------WELCOME ALICE: ElGamal Encryption------")
print("Generating keys, this may take a few seconds")
#Generate large prime n
#paremeter=num of digits
n=support.primegen(20)
#Alice randomly choose generator g and secret key a
g=support.getprimroot(n)
a=int(support.privkeygen(n,g))
#Alice calculate ga=g**a%p to send Alice-->Bob
ga=support.fastexpo(g,a,n)
#Alice sends to Bob: prime n, generator g, ga
print("Alice sends to Bob: ")
print("N= " + str(n))
print("g= " + str(g))
print("g^a= " + str(ga))
#print(n,g,ga)
print("Alice keeps private key a: ")
print(a)
print("Enter gb received from Bob: ")
gb=input()
print("Alice: encrypt (1) or decrypt (2) message?")
entry=input()
if entry==1:
print("Enter integer to encrypt: ")
xin=input()
ex=support.elgencrypt(n,gb,a,xin)
print("Ciphertext message: ")
print(ex)
elif entry==2:
print("Enter ciphertext message: ")
ex=input()
xout=support.elgdecrypt(n,gb,a,ex)
print("Decrypted message: ")
print(xout)
else:
print("Input error, try again")
| true |
064fdceeab268355f55f737dce9b60213fecf0ed | Python | hjlevy/picar | /test scripts/wait_test_multip.py | UTF-8 | 1,057 | 3.40625 | 3 | [] | no_license | ### This code breaks a wait statement in a function,
### and recognizes if esc is pressed everything should be stopped
### It uses multiprocessing to simultaneously run a function collecting data and performing a straight movement
#note: doesn't work :(
import multiprocessing
from threading import Event
# import keyboard module.
import keyboard
import signal
import sys
exit = Event()
def straight():
print("moving straight")
exit.wait(10)
def get_data():
if keyboard.read_key() == 'space':
print('set')
exit.set()
else:
exit.clear()
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
exit.set()
sys.exit(0)
def main():
straight()
print("All done!")
# perform any cleanup here
if __name__ == '__main__':
# try:
# signal.signal(signal.SIGINT, signal_handler)
p1 = multiprocessing.Process(target=get_data, args=())
p2 = multiprocessing.Process(target=main, args=())
p1.start()
p2.start()
try:
p1.join()
p2.join()
except KeyboardInterrupt:
p1.terminate() # sends a SIGTERM
p2.terminate()
| true |
4d3b6e0303cd790876fce7f68638ff37f7f4f3cb | Python | Jack0427/python_basic | /FileTest/read.py | UTF-8 | 645 | 3.765625 | 4 | [] | no_license | # 文件一定要存在 中文的話要使用utf-8
f = open('text.txt', encoding="utf-8")
# a = f.read() 效率差
# b = f.readline() 一次只讀取一行 下次執行會讀取下一行 會標記指針 使用f.close() f.seek(0) 可以清除指針 在運行會從第一行開始
# c = f.readlines() 回傳list 若文件很大內存會爆炸
# for line in f:
# print(line, end='.')
# for line in f:
# print(line, end='.') # 文件打開後就會標記指針 指針已經到最後就不會再次讀取了
# f.close()
with open('text.txt', encoding="utf-8") as f: # 使用with 就不需使用close
for line in f:
print(line)
| true |
e8a4562207c8c209493d1d730766a1ee72881ea3 | Python | gkantsidis/Utils | /PL/Python/CG/Productivity/Documents/PDF/split.py | UTF-8 | 2,510 | 3.578125 | 4 | [
"Apache-2.0"
] | permissive | """
Split a PDF file into multiple files
"""
import argparse
import os
import sys
from typing import List, NamedTuple
from csv import DictReader
from pathlib import Path
from PyPDF3 import PdfFileWriter, PdfFileReader
Chapter = NamedTuple('Chapter',
[
('name', str),
('start', int),
('stop', int)
])
def _parse_splits_file(filename: str, newline='') -> List[Chapter]:
assert os.path.exists(filename)
output: List[Chapter] = []
with open(filename, newline='') as file:
reader = DictReader(file)
for row in reader:
name = row['name']
start = int(row['start'])
stop = int(row['end'])
entry = Chapter(name, start, stop)
output.append(entry)
return output
def split(filename: str, chapters: List[Chapter], directory: str) -> None:
assert os.path.exists(filename)
for chapter in chapters:
print(f'Writing to {chapter.name} pages {chapter.start} to {chapter.stop}')
# The file should have been opened before the previous loop,
# however, there is a bug in the library, and this is the only way to
# get around it.
with open(filename, 'rb') as input_stream:
input_pdf = PdfFileReader(input_stream)
output = PdfFileWriter()
for page_number in range(chapter.start-1, chapter.stop):
page = input_pdf.getPage(page_number)
output.addPage(page)
output_filename = os.path.join(directory, chapter.name)
with open(output_filename, "wb") as output_stream:
output.write(output_stream)
output_stream.flush()
if __name__ == '__main__':
_parser = argparse.ArgumentParser(description='Split pages of a PDF file')
_parser.add_argument('filename', type=str, help='File to split')
_parser.add_argument('chapters', type=str, help='List of splits to create (in CSV format)')
args = _parser.parse_args()
if os.path.exists(args.filename) is False:
sys.stderr.write(f'Input file {args.filename} does not exist')
exit(-1)
if os.path.exists(args.chapters) is False:
sys.stderr.write(f'Chapter file {args.chapters} already exists')
exit(-1)
_directory = Path(args.filename).parent.absolute()
_chapters = _parse_splits_file(args.chapters)
split(args.filename, _chapters, _directory)
| true |
6722156e356e8bbdad1fec6c6446152d83044b19 | Python | SSymbol/homework3 | /cluster.py | UTF-8 | 2,382 | 2.828125 | 3 | [] | no_license |
#coding=utf-8
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.cluster import KMeans, MeanShift, MiniBatchKMeans
from sklearn.metrics import classification_report
from sklearn import preprocessing
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
data = pd.read_csv('e:/hw3/data/train.csv')
result = open(r'e:/hw3/result/cluter_result.txt','a+')
#预处理
data.drop(['Name'], 1, inplace=True)
data.convert_objects(convert_numeric=True)
data.fillna(0, inplace = True )
label = data['Survived']
data.drop(['Survived'], 1, inplace=True)
#对类别型特征进行转化,成为特征向量
vec=DictVectorizer(sparse=False)
_data=vec.fit_transform(data.to_dict(orient='record'))
_data = preprocessing.scale(_data)
#使用KMean进行聚类模型的训练以及预测分析
clu_kmeans = KMeans(n_clusters=2)
kmeans_pred = clu_kmeans.fit_predict(_data)
print('KMeans:\n',classification_report(kmeans_pred,label))
print('KMeans:\n',classification_report(kmeans_pred,label),file=result)
#使用MeanShift进行聚类模型的训练以及预测分析
clu_mean = MeanShift()
mean_pred = clu_mean.fit_predict(_data)
print('MeanShift:\n',classification_report(mean_pred,label))
print('MeanShift:\n',classification_report(mean_pred,label),file=result)
#绘图
color_value = lambda a: 'r' if a == 1 else 'b'
#原始图像
color_ori = [color_value(d) for d in label]
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(data['Age'],data['Pclass'],data['Fare'],c = color_ori,marker = 'o')
ax.set_xlabel('Age')
ax.set_ylabel('Pclass')
ax.set_zlabel('Fare')
plt.savefig('e:/hw3/output/cluster_original.png')
plt.clf()
#KMeans结果
color_kmeans = [color_value(d) for d in kmeans_pred]
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(data['Age'],data['Pclass'],data['Fare'],c = color_kmeans,marker = 'o')
ax.set_xlabel('Age')
ax.set_ylabel('Pclass')
ax.set_zlabel('Fare')
plt.savefig('e:/hw3/output/cluster_result_kmeans.png')
plt.clf()
#meanshift结果
color_meanshift = [color_value(d) for d in mean_pred]
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(data['Age'],data['Pclass'],data['Fare'],c = color_meanshift,marker = 'o')
ax.set_xlabel('Age')
ax.set_ylabel('Pclass')
ax.set_zlabel('Fare')
plt.savefig('e:/hw3/output/cluster_result_meanshift.png')
plt.clf()
| true |
a953a91c57945f395e9fe948bcb5df3f60b51a7e | Python | ianrecke/covid_vaccine_progress_bot | /.github/utils/EuropeanUnion.py | UTF-8 | 2,157 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | # Standard
import argparse
import os
import sys
import pandas as pd
# =============================================================================
# Constants
# =============================================================================
COUNTRIES = [
"Austria",
"Belgium",
"Bulgaria",
"Croatia",
"Cyprus",
"Czechia",
"Denmark",
"Estonia",
"Finland",
"France",
"Germany",
"Greece",
"Hungary",
"Ireland",
"Italy",
"Latvia",
"Lithuania",
"Luxembourg",
"Malta",
"Netherlands",
"Poland",
"Portugal",
"Romania",
"Slovakia",
"Slovenia",
"Spain",
"Sweden"]
# =============================================================================
# Arguments
# =============================================================================
description = "Publish vaccination data for a country."
parser = argparse.ArgumentParser(description=description)
arg = "--data"
parser.add_argument(arg)
arg = "--output"
parser.add_argument(arg)
args = sys.argv[1:]
args = parser.parse_args(args)
# Rename the command line arguments for easier reference
path = args.data
output = args.output
# =============================================================================
# Functions
# =============================================================================
# =============================================================================
# Main
# =============================================================================
columns=["date", "total_vaccinations", "people_vaccinated", "people_fully_vaccinated"]
eu_data = pd.DataFrame(columns = columns)
for country in COUNTRIES:
path_file = os.path.join(path, country.replace(" ", "") + ".csv")
data = pd.read_csv(path_file, usecols = columns, index_col="date")
if eu_data.empty:
eu_data = data
else:
#eu_data = data.reindex_like(eu_data).fillna(0) + eu_data.fillna(0)
data = data.reindex_like(eu_data).fillna(method="ffill")
print("DATA")
print(data)
print("EU_DATA")
print(eu_data)
eu_data = eu_data.add(data, fill_value=0)
| true |
6f644c10c64a1caf840c54d240214344754309f1 | Python | shihyuanwang/Social_Media_Sentiment_Analysis_for_Ridesharing_Companies | /TweetSentiment.py | UTF-8 | 8,071 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 05:17:55 2020
@author: Shih-Yuan Wang
"""
## import the libraries
import tweepy, codecs, os, sys, csv
from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
## fill in Twitter credentials
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
#--------------------------------------------------------------------------------
# Get Uber Tweets
#--------------------------------------------------------------------------------
## let Tweepy set up an instance of the REST API
auth = tweepy.AppAuthHandler(consumer_key, consumer_secret)
#auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
#api = tweepy.API(auth)
#Maximum number of tweets we want to collect
maxTweets = 100000
#The twitter Search API allows up to 100 tweets per query
tweetsPerQry = 100
searchQuery = ('Uber')
tweetCount = 0
#http://www.dealingdata.net/2016/07/23/PoGo-Series-Tweepy/
#https://pastebin.com/9rC7UrVn
## use the codecs library to write the text of the Tweets to a .txt file
#Old way of doing things - which is what you will see a lot of people doing online
max_id = -1
tweetCount = 0
with codecs.open("twitterOut_Uber.csv", "w", "utf-8") as file:
#While we still want to collect more tweets
while tweetCount < maxTweets:
try:
#Look for more tweets, resuming where we left off
if max_id <= 0:
new_tweets = api.search(q=searchQuery, lang = "en", count=tweetsPerQry)
else:
new_tweets = api.search(q=searchQuery, lang = "en", count=tweetsPerQry, max_id=str(max_id - 1))
#If we didn't find any exit the loop
if not new_tweets:
print("No more tweets found")
break
for tweet in new_tweets:
#Make sure the tweet has place info before writing
#if (tweet.geo is not None) and (tweetCount < maxTweets):
if (tweetCount < maxTweets):
if (len(tweet.id_str)==0):
file.write(" ")
else :
file.write(tweet.id_str)
file.write("\t")
if (len(str(tweet.user.id))==0):
file.write(" ")
else :
file.write(str(tweet.user.id))
file.write("\t")
file.write(str(tweet.user.location))
file.write("\t")
file.write(str(tweet.created_at))
file.write("\t'")
file.write(str(tweet.text.encode('utf-8')))
file.write("\t")
#ss = p.clean(str(tweet.text.encode('utf-8')))
#file.write(ss)
#file.write("\t")
#Choose one of the two analysis that fit what you need
#default sentiment analysis
tt = TextBlob(str(tweet.text.encode('utf-8')))
#tt = TextBlob(ss)
file.write(str(tt.sentiment.polarity))
file.write("\t")
file.write(str(tt.sentiment.subjectivity))
file.write("\t")
#if you using NaiveBayes
#tt2 = TextBlob(str(tweet.text.encode('utf-8')), analyzer=NaiveBayesAnalyzer())
#tt2 = TextBlob(ss, analyzer=NaiveBayesAnalyzer())
#file.write(str(tt2.sentiment.classification))
#file.write("\t")
#file.write(str(tt2.sentiment.p_pos))
#file.write("\t")
#file.write(str(tt2.sentiment.p_neg))
file.write("\n")
#tt=TextBlob(str(tweet.text.encode('utf-8')), )
tweetCount += 1
#Display how many tweets we have collected
print("Downloaded {0} tweets".format(tweetCount))
#Record the id of the last tweet we looked at
max_id = new_tweets[-1].id
except tweepy.TweepError as e:
#Print the error and continue searching
print("some error : " + str(e))
file.close()
#--------------------------------------------------------------------------------
# Get Lyft Tweets
#--------------------------------------------------------------------------------
## let Tweepy set up an instance of the REST API
auth = tweepy.AppAuthHandler(consumer_key, consumer_secret)
#auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
#api = tweepy.API(auth)
#Maximum number of tweets we want to collect
maxTweets = 100000
#The twitter Search API allows up to 100 tweets per query
tweetsPerQry = 100
searchQuery = ('Lyft')
tweetCount = 0
#http://www.dealingdata.net/2016/07/23/PoGo-Series-Tweepy/
#https://pastebin.com/9rC7UrVn
## use the codecs library to write the text of the Tweets to a .txt file
#Old way of doing things - which is what you will see a lot of people doing online
max_id = -1
tweetCount = 0
with codecs.open("twitterOut_Lyft.csv", "w", "utf-8") as file:
#While we still want to collect more tweets
while tweetCount < maxTweets:
try:
#Look for more tweets, resuming where we left off
if max_id <= 0:
new_tweets = api.search(q=searchQuery, lang = "en", count=tweetsPerQry)
else:
new_tweets = api.search(q=searchQuery, lang = "en", count=tweetsPerQry, max_id=str(max_id - 1))
#If we didn't find any exit the loop
if not new_tweets:
print("No more tweets found")
break
for tweet in new_tweets:
#Make sure the tweet has place info before writing
#if (tweet.geo is not None) and (tweetCount < maxTweets):
if (tweetCount < maxTweets):
if (len(tweet.id_str)==0):
file.write(" ")
else :
file.write(tweet.id_str)
file.write("\t")
if (len(str(tweet.user.id))==0):
file.write(" ")
else :
file.write(str(tweet.user.id))
file.write("\t")
file.write(str(tweet.user.location))
file.write("\t")
file.write(str(tweet.created_at))
file.write("\t'")
file.write(str(tweet.text.encode('utf-8')))
file.write("\t")
#Choose one of the two analysis that fit what you need
#default sentiment analysis
tt = TextBlob(str(tweet.text.encode('utf-8')))
#tt = TextBlob(ss)
file.write(str(tt.sentiment.polarity))
file.write("\t")
file.write(str(tt.sentiment.subjectivity))
file.write("\t")
file.write("\n")
#tt=TextBlob(str(tweet.text.encode('utf-8')), )
tweetCount += 1
#Display how many tweets we have collected
print("Downloaded {0} tweets".format(tweetCount))
#Record the id of the last tweet we looked at
max_id = new_tweets[-1].id
except tweepy.TweepError as e:
#Print the error and continue searching
print("some error : " + str(e))
file.close()
| true |
9905708e9b777b28430ed4ca36915c2b3aa73155 | Python | gbriones1/python-training | /basic/zoo/zoo.py | UTF-8 | 2,347 | 3.734375 | 4 | [] | no_license | class Life(object):
YEAR = 2010
BEINGS = []
@classmethod
def time_passes(cls, years):
for being in cls.BEINGS:
being.grow_up(years)
cls.YEAR += years
class Animal(object):
def __init__(self, age):
self.age = age
Life.BEINGS.append(self)
def recieve_name(self, name):
self.name = name
def grow_up(self, years):
self.age += years
print self.__class__.__name__+" now has "+str(self.age)+" years old"
def is_pet(self):
if 'Pet' in [c.__name__ for c in list(self.__class__.__bases__)]:
return True
return False
def speak(self):
print "I have been living for "+str(self.age)+" years"
class Mammal(Animal):
def speak(self):
print "I was born from my mom's belly"
super(Mammal, self).speak()
class Fish(Animal):
def speak(self):
print "I have scales"
super(Fish, self).speak()
class Bird(Animal):
def speak(self):
print "I have feathers"
class Pet(Animal):
def be_adopted(self, name, master):
super(Pet, self).recieve_name(name)
self.master = master
print name+" was adopted by "+self.master.name
def speak(self):
print self.master.name+" is my master"
super(Pet, self).speak()
class Dog(Mammal, Pet):
def speak(self):
print "Woof!"
super(Dog, self).speak()
class GoldenFish(Fish, Pet):
def speak(self):
print "My color is golden"
super(GoldenFish, self).speak()
class Wild(Animal):
def speak(self):
print "I cannot be tamed"
super(Wild, self).speak()
class Tiger(Mammal, Wild):
def speak(self):
print "I am a tiger"
super(Tiger, self).speak()
class Person(Mammal):
def __init__(self, name):
super(Person, self).__init__(0)
super(Person, self).recieve_name(name)
self.pets = []
print self.name+" was born."
def adopt_pet(self, animal, name):
if animal.is_pet():
animal.be_adopted(name, self)
self.pets.append(animal)
else:
print animal.__class__.__name__+" is wild and cannot be adopted"
def play_with_pets(self):
for pet in self.pets:
print "Playing with "+pet.name
pet.speak()
me = Person("Gabriel")
Life.time_passes(20)
me.adopt_pet(Dog(3), "Icy")
Life.time_passes(5)
me.adopt_pet(GoldenFish(2), "Juicy")
me.adopt_pet(Tiger(2), "Fury")
puppy = Dog(0)
me.play_with_pets()
Life.time_passes(2)
| true |
193e25642576c83f998b5a0c552692df6f2464df | Python | Aasthaengg/IBMdataset | /Python_codes/p03078/s338892211.py | UTF-8 | 748 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python3
import heapq
def main():
x, y, z, k = map(int, input().split())
a = list(reversed(sorted(map(int, input().split()))))
b = list(reversed(sorted(map(int, input().split()))))
c = list(reversed(sorted(map(int, input().split()))))
q = [(-a[0] - b[0] - c[0], 0, 0, 0)]
queued = {(0, 0, 0)}
for i in range(k):
v, s, t, u = heapq.heappop(q)
print(-v)
for e, f, g in [(s, t, u + 1), (s, t + 1, u), (s + 1, t, u)]:
if e >= x or f >= y or g >= z:
continue
if (e, f, g) in queued:
continue
heapq.heappush(q, (-(a[e] + b[f] + c[g]), e, f, g))
queued.add((e, f, g))
if __name__ == "__main__":
main()
| true |
a5574f9d6f2f93d33955a2eb1a917bae492d94bc | Python | skarone/PipeL | /install/serial/clientsManager.py | UTF-8 | 1,735 | 2.828125 | 3 | [] | no_license | import sqlite3 as lite
import sys
import os
class ClientsManager(object):
"""handle sql database of the clients"""
def __init__(self, path = 'D:/test.db'):
self._path = path
def create(self):
"""create clients table"""
con = lite.connect( self._path )
with con:
cur = con.cursor()
cur.execute( '''CREATE TABLE IF NOT EXISTS Clients( ID INTEGER PRIMARY KEY AUTOINCREMENT,
CLIENT_NAME TEXT NOT NULL,
SERIAL_NO TEXT UNIQUE NOT NULL,
LICENSE_NO INT NOT NULL DEFAULT 1,
VALIDATIONS_NO INT NOT NULL DEFAULT 0 );''' )
def addClient(self, name, serial, License_number ):
"""docstring for addClient"""
con = lite.connect( self._path )
with con:
cur = con.cursor()
cur.execute("INSERT INTO Clients(CLIENT_NAME, SERIAL_NO, LICENSE_NO ) VALUES ('" + name + "','" + serial + "'," + str(License_number) + ");")
def clientInstall(self, serial):
"""when client made an install, add 1 to validations_no"""
clientData = self.getClientData( serial )
if clientData:
if clientData[3] == clientData[4]:
#we reach the limit of installation
return 'installations-reached'
#add one to the validations limit and return True!
con = lite.connect( self._path )
with con:
cur = con.cursor()
cur.execute( "UPDATE Clients SET VALIDATIONS_NO=" +str( clientData[4] + 1 ) + " WHERE Id=" + str( clientData[0] ) + ";" )
print clientData
return True
return 'wrong-serial'
def getClientData(self, serial):
"""return Bool if serial is exists"""
con = lite.connect( self._path )
with con:
cur = con.cursor()
cur.execute("select * from Clients where serial_no=:serial", {"serial": serial})
return cur.fetchone()
| true |
b74e9ff7b0da2e446eb8b4eb501b2476ef675bb6 | Python | eric-r-xu/klaviyo-weather-app | /async_api_and_email_service.py | UTF-8 | 2,264 | 2.734375 | 3 | [
"MIT"
] | permissive | import asyncio
import aiohttp
import aiosmtplib
# ... other imports remain the same
# Async fetch function using aiohttp
async def fetch(session, url):
async with session.get(url) as response:
return await response.text()
# Modify api_and_email_task to be an async function
async def api_and_email_task(sem, cityID, city_name, dateFact, tomorrow, recipients, local_tz, utc_offset_seconds):
async with sem:
# ... rest of the function remains the same, except for the following changes
# Replace time.sleep with asyncio.sleep
await asyncio.sleep(30)
# Replace requests.get with the fetch function
async with aiohttp.ClientSession() as session:
curr_r = await fetch(session, url)
# ... and similarly for other fetch calls
# Replace smtplib.SMTP with aiosmtplib.SMTP
async with aiosmtplib.SMTP(GMAIL_AUTH["mail_server"], 587) as server:
await server.starttls()
await server.login(GMAIL_AUTH["mail_username"], GMAIL_AUTH["mail_password"])
await server.send_message(message)
logging.info(f"Sent email to {recipient}")
# ...
# Modify main to create and use an asyncio event loop
def main():
# ... rest of the function remains the same, until this point
sem = asyncio.Semaphore(5)
tasks = []
for row in tblDimEmailCity_sorted.itertuples(index=True, name="Pandas"):
recipients = str(getattr(row, "email_set")).split(",")
cityID = getattr(row, "city_id")
local_tz = getattr(row, "tz")
utc_offset_seconds = getattr(row, "utc_offset_seconds")
city_name = city_dict[str(cityID)]
logging.info(
f"cityID={str(cityID)}, city_name={city_name}, local_tz={local_tz}, utc_offset_seconds={utc_offset_seconds}"
)
task = api_and_email_task(
sem,
cityID,
city_name,
dateFact,
tomorrow,
recipients,
local_tz,
utc_offset_seconds,
)
tasks.append(task)
# Run the tasks concurrently
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
# ...
if __name__ == "__main__":
main()
| true |
787eca9e3219e214f015bee642b85d78ee88aace | Python | HOIg3r/LINFO1101-Intro-a-la-progra | /Exercices INGI/Session 6/Q Représentation de tableau.py | UTF-8 | 462 | 3.265625 | 3 | [] | no_license | def table(filename_in, filename_out, width):
with open(filename_in,'r') as file_in:
file_in_line = []
for line in file_in:
file_in_line.append(line.strip())
with open(filename_out,'w') as file_out:
file_out.write("+" + "-"*(width+2) + "+\n")
for line in file_in_line:
file_out.write("| {:{}} |\n".format(line[:width], width))
file_out.write("+" + "-"*(width+2) + "+")
| true |
6c883e75e808538d678c7cbdf9acf65de00070d3 | Python | vadrevu-thanuja/list1 | /List2.py | UTF-8 | 108 | 3.125 | 3 | [] | no_license | List2=[12,14,-95,3]
num=0
while(num<len(list2)):
if List2[num]>=0:
print(List2[num],end=" ")
num+=1
| true |
48a6084ae713374465bebc4e8e295b1fb8435694 | Python | jiceR/france-ioi | /python/bornes.py | UTF-8 | 125 | 3.65625 | 4 | [] | no_license |
borneX = int(input());
borneY = int(input());
if borneX > borneY:
print(borneX - borneY);
else:
print(borneY - borneX); | true |
c8a0f714be360ceb5fc13e3fdeeed625344ce00d | Python | Milittle/learning_of_python | /com/mizeshuang/functional_programming/higher_order_function.py | UTF-8 | 1,092 | 4.78125 | 5 | [] | no_license | # 用实际的代码进行阐述
# 变量可以指向函数,这里使用自带函数abs进行测试
f = abs
b = f(-1)
print(b)
# 这说明一个现象就是,函数名也可以赋值给一个变量,调用这个变量和调用函数的功能是一致的
# 函数名也是变量
# 如果把abs指向其他变量,那么会发生什么呢?
#abs = 10
print(abs(-1))# 那么他就会报错,因为现在abs是一个int型变量
# 注:由于abs函数实际上是定义在import builtins模块中的,所以要让修改abs变量的指向在其它模块也生效,
# 要用import builtins; builtins.abs = 10。
# 传入函数
# 既然变量可以指向函数,函数的参数可以接受变量,那么,一个函数也就能接收另一个函数作为参数了,这种函数就叫做高阶函数
def add(x,y,f):
return f(x) + f(y)
# 调用
x = -5
y = 9
f = abs
print(add(x,y,f))
# 编写高阶函数,就是让函数的参数能够接收别的函数
# 把函数作为参数传入,这样的函数就是告诫函数,函数式编程就是之这种高度抽象的编程范式 | true |
a591310a8bfcced6bb51d80ffcbd409093d14b6c | Python | atiselsts/feature-group-selection | /feature-selection/ml_state.py | UTF-8 | 8,062 | 2.546875 | 3 | [] | no_license | import os
import numpy as np
import copy
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import ShuffleSplit, KFold
from sklearn.metrics import f1_score
import sys
sys.path.append("..")
sys.path.append("../energy-model")
import utils
import energy_model
from ml_config import *
class State:
def __init__(self):
# whether to use accuracy only of the combined energy accuracy score
self.use_accuracy_only = False
# whether to operate at group or individual vector level
self.do_subselection = False
def load_subset(self, dataset, name):
filename = os.path.join("..", "datasets", dataset, name, "features.csv")
data = np.asarray(utils.load_csv(filename, skiprows=1))
filename = os.path.join("..", "datasets", dataset, name, "y_{}.txt".format(name))
activities = np.asarray(utils.load_csv(filename)).ravel()
filename = os.path.join("..", "datasets", dataset, name, "subject_{}.txt".format(name))
subjects = np.asarray(utils.load_csv(filename)).ravel()
return data, activities, subjects
def load(self, dataset):
self.train, self.train_y, self.train_subjects = self.load_subset(dataset, "train")
self.validation, self.validation_y, self.validation_subjects = self.load_subset(dataset, "validation")
self.test, self.test_y, self.test_subjects = self.load_subset(dataset, "test")
if USE_N_FOLD_CROSS_VALIDATION:
self.alltrain = np.concatenate((self.train, self.validation, self.test))
self.alltrain_y = np.concatenate((self.train_y, self.validation_y, self.test_y))
self.alltrain_subjects = np.concatenate((self.train_subjects, self.validation_subjects, self.test_subjects))
# just pick the first one
self.subject_left_out = self.alltrain_subjects[0]
self.cv = []
self.cv_y = []
self.left_out = []
self.left_out_y = []
for i in range(len(self.alltrain_subjects)):
subject = self.alltrain_subjects[i]
if subject == self.subject_left_out:
self.left_out.append(self.alltrain[i])
self.left_out_y.append(self.alltrain_y[i])
else:
self.cv.append(self.alltrain[i])
self.cv_y.append(self.alltrain_y[i])
print("number of the subject left out:", int(self.subject_left_out))
self.cv = np.asarray(self.cv)
self.cv_y = np.asarray(self.cv_y).ravel()
self.left_out = np.asarray(self.left_out)
self.left_out_y = np.asarray(self.left_out_y).ravel()
filename = os.path.join("..", "feature_names.csv")
self.names = utils.read_list_of_features(filename)
if self.do_subselection:
self.groups = [n[1] for n in self.names]
else:
# need to preserve order, so cannot uniquify via the usual way (via a set)
self.groups = []
for n in self.names:
if n[2] not in self.groups:
self.groups.append(n[2])
self.num_features = len(self.groups) # number of features
# get the energy for raw data, used to stop iterating
self.energy_for_raw = self.eval_energy_for_raw()
print("Stopping energy value is {:.4f}".format(self.energy_for_raw))
def evaluate_baseline(self):
validation_scores = []
test_scores = []
for i in range(10):
clf = RandomForestClassifier(n_estimators = NUM_TREES, random_state=i,
class_weight = "balanced")
clf.fit(self.train, self.train_y)
hypothesis = clf.predict(self.validation)
f1 = f1_score(self.validation_y, hypothesis, average="micro")
validation_scores.append(f1)
hypothesis = clf.predict(self.test)
f1 = f1_score(self.test_y, hypothesis, average="micro")
test_scores.append(f1)
s_test = np.mean(test_scores)
s_validation = np.mean(validation_scores)
validation_scores = ["{:.4f}".format(x) for x in validation_scores]
test_scores = ["{:.4f}".format(x) for x in test_scores]
print("validation:" , "{:.4f}".format(s_validation), validation_scores)
print("test :" , "{:.4f}".format(s_test), test_scores)
def eval_accuracy(self, indexes):
if len(indexes) == 0:
return RANDOM_ACCURACY, RANDOM_ACCURACY
selector = utils.select(self.names, self.groups, indexes, self.do_subselection)
if USE_N_FOLD_CROSS_VALIDATION:
features = self.cv[:,selector]
left_out_features = self.left_out[:,selector]
validation_score = 0
test_score = 0
# rs = ShuffleSplit(n_splits = NUM_VALIDATION_ITERATIONS, test_size = 0.33)
rs = KFold(n_splits = NUM_VALIDATION_ITERATIONS) #, test_size = 0.33)
scores = []
# use balanced weigths to account for class imbalance
# (we're trying to optimize f1 score, not accuracy)
clf = RandomForestClassifier(n_estimators = NUM_TREES, random_state=0,
class_weight = "balanced")
for train_index, test_index in rs.split(features):
clf.fit(features[train_index], self.cv_y[train_index])
# s1 = clf.score(features[test_index], self.cv_y[test_index])
# s2 = clf.score(left_out_features, self.left_out_y)
hypothesis = clf.predict(features[test_index])
s1 = f1_score(self.cv_y[test_index], hypothesis, average="micro")
hypothesis = clf.predict(left_out_features)
s2 = f1_score(self.left_out_y, hypothesis, average="micro")
scores.append("{:2.2f} ({:2.2f})".format(s1, s2))
validation_score += s1
test_score += s2
validation_score /= NUM_VALIDATION_ITERATIONS
test_score /= NUM_VALIDATION_ITERATIONS
# names = [self.groups[i] for i in indexes]
# print(names)
# print("validation/test:" , scores)
else:
# simply train and then evaluate
features_train = self.train[:,selector]
features_validation = self.validation[:,selector]
scores = []
for i in range(NUM_TRIALS):
# use balanced weigths to account for class imbalance
# (we're trying to optimize f1 score, not accuracy)
clf = RandomForestClassifier(n_estimators = NUM_TREES, random_state=i,
class_weight = "balanced")
clf.fit(features_train, self.train_y)
#validation_score = clf.score(features_validation, self.validation_y)
hypothesis = clf.predict(features_validation)
#c = (self.validation_y == hypothesis)
f1 = f1_score(self.validation_y, hypothesis, average="micro")
scores.append(f1)
validation_score = np.mean(scores)
# check also the results on the test set
features_test = self.test[:,selector]
hypothesis = clf.predict(features_test)
f1 = f1_score(self.test_y, hypothesis, average="micro")
test_score = f1
#print("validation={:.2f} test={:.2f}".format(validation_score, test_score))
return validation_score, test_score
def eval_energy(self, indexes):
names = [self.groups[i] for i in indexes]
#print("names=", names)
return sum(energy_model.calc(names))
def combined_score(self, indexes):
av, at = self.eval_accuracy(indexes)
b = self.eval_energy(indexes)
score = roundacc(W_ACCURACY * av) + W_ENERGY * b
return score, av, at, b
def eval_energy_for_raw(self):
return sum(energy_model.calc_raw())
| true |
8266e2f0cbe4219caabd148100479da7b182a5ca | Python | EmersonBraun/python-excercices | /cursoemvideo/ex033.py | UTF-8 | 372 | 4.375 | 4 | [] | no_license | # Faça um programa que leia três números
# e mostre qual é o maior e qual é o menor
num1 = float(input('Digite o primeiro número: '))
num2 = float(input('Digite o segundo número: '))
num3 = float(input('Digite o terceiro número: '))
numeros = [num1, num2, num3]
numeros = sorted(numeros)
print('O maior número é {} e o menor {}'.format(numeros[2], numeros[0])) | true |
4a20388623e6fa823998c377d0aa8337e9a085c4 | Python | r4gus/Sichere_Programmierung | /Praktikum1/SP-P1-Sobott-Sugar/Code/mcrypt.py | UTF-8 | 833 | 3.65625 | 4 | [] | no_license | def gcd(a, b):
"""
Calculates the greatest common divisor of a and b.
"""
if b == 0:
return a
else:
return gcd(b, a % b)
def mul_inverse(n, m):
"""
Calculates the multiplicative inverse n^-1 of n (mod m).
def: n * n^(-1) = 1 (mod m)
Returns: n^-1 e [0,1,2..m-1] if there is exactly one multiplicative
inverse (e.g. gcd(n, m) != 1), None otherwise.
"""
#if gcd(n, m) != 1:
# return None
module = m
x = 1
y = 0
q = []
while m != 0:
q += [n // m]
(n, m) = (m , n % m)
if n != 1:
return None
q.reverse()
for t in q:
_x = y
_y = x - ( _x * t )
x = _x
y = _y
return (x + module) % module
if __name__ == '__main__':
print(mul_inverse(75, 38))
| true |
e74e295fd6629aa5f50a4a7076ac887f3f8bc84a | Python | RosaGeorge31/C34 | /C34/DSA/lab5/prg1.py | UTF-8 | 6,445 | 2.953125 | 3 | [] | no_license | class TreeNode:
def __init__(self):
self.parent= None
self.left = None
self.right = None
self.val = None
self.ht = 1
class AVLTree:
def __init__(self):
self.root = TreeNode()
def insert(self,key):
if self.root.val == None:
self.root.val = key
return
temp = TreeNode()
temp.val = key
par = self.root.parent
ptr = self.root
while ptr is not None:
par = ptr
f = 0
if key <= ptr.val:
ptr = ptr.left
f=1
else:
ptr = ptr.right
if f ==1:
par.left = temp
temp.parent = par
else:
par.right = temp
temp.parent = par
self.inc_ht(temp)
ptr = par
z = None
while ptr is not None:
if ptr.left is None:
diff = ptr.right.ht
if diff >= 2:
z = ptr
break
elif ptr.right is None:
diff = ptr.left.ht
if diff >= 2:
z = ptr
break
else:
diff = abs( ptr.left.ht - ptr.right.ht )
if diff >= 2:
z = ptr
break
ptr = ptr.parent
if z is None:
return
print("z is : " +str(z.val))
prpr = temp
pr = temp.parent
ptr = temp.parent.parent
while ptr is not z :
prpr = pr
pr = ptr
ptr = ptr.parent
x = prpr
y = pr
print("y is : " +str(y.val))
print("x is : " +str(x.val))
if z.left is y and y.left is x:
if z.parent is None: #root
print("CASE 1 a")
self.root = y
z.left = y.right
z.parent = y
y.right = z
y.parent = None
y.ht = x.ht + 1
z.ht = x.ht
self.inc_ht(z)
return
else:
print("CASE 1 b")
pt = z.parent
y.parent = pt
pt.left = y
z.left = y.right
z.parent = y
y.right = z
y.ht = x.ht + 1
z.ht = x.ht
self.inc_ht(z)
return
elif z.right is y and y.right is x:
if z.parent is None: #root
print("CASE 2 a")
self.root = y
z.right = y.left
z.parent = y
y.left = z
y.parent = None
y.ht = x.ht + 1
z.ht = x.ht
self.inc_ht(z)
return
else:
print("CASE 2 b")
pt = z.parent
y.parent = pt
pt.right = y
z.right = y.left
z.parent = y
y.left = z
y.ht = x.ht + 1
z.ht = x.ht
self.inc_ht(z)
return
elif z.left is y and y.right is x:
pt = z.parent
if pt is None:
print("CASE 3 a")
z.left = x
x.parent = z
x.left = y
y.right = x.left
y.parent = x
y.ht = x.ht
x.ht = y.ht + 1
z.left = x.right
z.parent = x
x.right = z
x.parent = pt
self.root = x
x.ht = y.ht + 1
z.ht = y.ht
self.inc_ht(x)
else:
print("CASE 3 b")
z.left = x
x.parent = z
x.left = y
y.right = x.left
y.parent = x
y.ht = x.ht
x.ht = y.ht + 1
self.inc_ht(y)
z.left = x.right
z.parent = x
x.right = z
x.parent = pt
pt.left = x
x.ht = y.ht + 1
z.ht = y.ht
self.inc_ht(x)
else:
print("CASE 4")
pt = z.parent
z.right = x
x.parent = z
y.left = x.right
y.parent = x
x.right = y
y.ht = x.ht
x.ht = y.ht + 1
z.right = x.left
z.parent = x
x.left = z
x.parent = pt
x.ht = y.ht + 1
z.ht = y.ht
def inc_ht(self,v):
ptr = v.parent
while ptr is not None:
if ptr.left is None:
ptr.ht = ptr.right.ht +1
elif ptr.right is None:
ptr.ht = ptr.left.ht +1
else:
ptr.ht = max(ptr.left.ht,ptr.right.ht) + 1
ptr = ptr.parent
def display(self,x):
if x == None:
return
else:
print(str(x.val) + " ht: " + str(x.ht))
self.display(x.left)
self.display(x.right)
def main():
T = AVLTree()
T.insert(10)
T.display(T.root)
print('')
T.insert(12)
T.display(T.root)
print('')
T.insert(8)
T.display(T.root)
print('')
T.insert(14)
T.display(T.root)
print('')
T.insert(16)
T.display(T.root)
print('')
T.insert(13)
T.display(T.root)
print('')
print('root : ' + str(T.root.val))
if __name__=='__main__':
main()
| true |
560d60a5649371ac90346c97471231c037840bc1 | Python | Saifur43/iWeather | /crawler.py | UTF-8 | 714 | 2.90625 | 3 | [] | no_license | from bs4 import BeautifulSoup
import requests
import urllib
def get(city_name):
url = "https://www.timeanddate.com/weather/bangladesh/" + city_name
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
name_box = soup.find('div', attrs={'class': 'h2'})
name = name_box.text.strip()
return name
def get_img(city_name):
url = "https://www.timeanddate.com/weather/bangladesh/" + city_name
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
img = soup.find('img', attrs={'id': 'cur-weather'})
img_a = img.get('src')
img_c = "http://" + str(img_a[2:])
name = "images/weather.png"
urllib.request.urlretrieve(img_c, name) | true |
a201115019fd10aeb9a839f93ef4c06e888bbc64 | Python | xiongmao2015/yiyao | /proxy_pool/kuaidaili.py | UTF-8 | 2,051 | 2.53125 | 3 | [] | no_license | # coding: utf-8
import time
import random
from datetime import datetime
import requests
from lxml import etree
from proxy_log.logs import logs
class Proxy(object):
def __init__(self):
self.ha_url = 'http://www.kuaidaili.com/free/inha/{page}/' # 1,2,3
self.tr_url = 'http://www.kuaidaili.com/free/intr/{page}/'
self.user_agent = [
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2117.157 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/59.0.3071.109 '
'Chrome/59.0.3071.109 Safari/537.36'
]
def get_proxy(self, url):
r = requests.get(url, headers={
'User-Agent': random.choice(self.user_agent)
})
html = etree.HTML(r.content)
all_proxy = html.xpath('//table//tr[td]')
print len(all_proxy), '-------len(all_proxy)------'
logs.debug('-----all ip num-----%s-----url: %s -----' % (len(all_proxy), url))
for i in all_proxy:
ip = i.xpath('./td[1]/text()')[0]
port = i.xpath('./td[2]/text()')[0]
http_type = i.xpath('./td[4]/text()')[0]
country = i.xpath('./td[5]/text()')[0]
anonymous = i.xpath('./td[3]/text()')[0]
from_site = 'kuaidaili'
crawl_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
proxy = (ip, port, country, anonymous, http_type, from_site, crawl_time)
if all(proxy):
yield proxy
def start(self):
for page in range(1, 5):
ha_url = self.ha_url.format(page=page)
time.sleep(1)
for proxy in self.get_proxy(url=ha_url):
yield proxy
for page in range(1, 5):
tr_url = self.tr_url.format(page=page)
time.sleep(1)
for proxy in self.get_proxy(url=tr_url):
yield proxy
if __name__ == '__main__':
p = Proxy()
for p_ip in p.start():
print p_ip
| true |
46f6880fc9ae36b75274b862c2c8cb8e94f0a158 | Python | ArthurBernard/Fynance | /fynance/backtest/plot_tools.py | UTF-8 | 7,071 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# coding: utf-8
""" Functions to plot backtest. """
# Built-in packages
# External packages
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
# Internal packages
from fynance.features.money_management import iso_vol
from fynance.features.metrics import drawdown, roll_sharpe
from fynance.backtest.print_stats import set_text_stats
# Set plot style
plt.style.use('seaborn')
__all__ = ['display_perf']
# =========================================================================== #
# Printer Tools #
# =========================================================================== #
def compute_perf(logret, signal, fee):
fees = np.zeros(logret.shape)
fees[1:] = (signal[1:] - signal[:-1]) * fee
pctret = np.exp(logret) - 1 - fees
return np.cumprod(pctret * signal + 1)
def display_perf(
y_idx, y_est, period=252, title='', params_iv={},
plot_drawdown=True, plot_roll_sharpe=True, x_axis=None,
underlying='Underlying', win=252, fees=0,
):
""" Plot performance and print KPI of backtest.
Print dynamic plot of performance indicators (perf, rolling sharpe
and draw down) of a strategy (raw and iso-volatility) versus its
underlying.
Parameters
----------
y_idx : np.ndarray[np.float64, ndim=1]
Time series of log-returns of the underlying.
y_est : np.ndarray[np.float64, ndim=1]
Time series of the signal's strategy.
period : int, optional
Number of period per year. Default is 252.
title : str or list of str, optional
Title of performance strategy, default is empty.
plot_drawdown : bool, optional
If true plot drawdowns, default is True.
plot_roll_sharpe : bool, optional
If true plot rolling sharpe ratios, default is True.
x_axis : list or np.asarray, optional
x-axis to plot (e.g. list of dates).
underlying : str, optional
Name of the underlying, default is 'Underlying'.
win : int, optional
Size of the window of rolling sharpe, default is 252.
fees : float, optional
Fees to apply at the strategy performance.
Returns
-------
perf_idx : np.ndarray[np.float64, ndim=1]
Time series of underlying performance.
perf_est : np.ndarray[np.float64, ndim=1]
Time series of raw strategy performance.
perf_ivo : np.ndarray[np.float64, ndim=1]
Time series of iso-vol strategy performance.
See Also
--------
PlotBackTest, set_text_stats
"""
if x_axis is None:
x_axis = range(y_idx.size)
# Compute perf.
perf_idx = np.exp(np.cumsum(y_idx))
perf_est = compute_perf(y_idx, y_est, fees)
iv = iso_vol(np.exp(np.cumsum(y_idx)), **params_iv)
perf_ivo = compute_perf(y_idx, y_est * iv, fees)
# Print stats. table
txt = set_text_stats(
y_idx, period=period,
Strategy=y_est,
Strat_IsoVol=y_est * iv,
underlying=underlying,
fees=fees
)
print(txt)
# Plot results
n = 1 + plot_roll_sharpe + plot_drawdown
f, ax = plt.subplots(n, 1, figsize=(9, 6), sharex=True)
if n == 1:
ax_perf, ax_dd, ax_dd = ax, None, None
ax_perf.set_xlabel('Date')
elif n == 2:
ax_perf = ax[0]
(ax_dd, ax_roll) = (ax[1], None) if plot_drawdown else (None, ax[1])
ax[-1].set_xlabel('Date')
else:
ax_perf, ax_dd, ax_roll = ax[0], ax[1], ax[2]
# Plot performances
ax_perf.plot(
x_axis,
100 * perf_est,
color=sns.xkcd_rgb["pale red"],
LineWidth=2.
)
ax_perf.plot(
x_axis,
100 * perf_ivo,
color=sns.xkcd_rgb["medium green"],
LineWidth=1.8
)
ax_perf.plot(
x_axis,
100 * perf_idx,
color=sns.xkcd_rgb["denim blue"],
LineWidth=1.5
)
# Set notify motion function
def motion(event):
N = len(ax_perf.lines[0].get_ydata())
w, h = f.get_size_inches() * f.dpi - 200
x = max(event.x - 100, 0)
j = int(x / w * N)
ax_perf.legend([
'Strategy: {:.0f} %'.format(ax_perf.lines[0].get_ydata()[j] - 100),
'Strat Iso-Vol: {:.0f} %'.format(ax_perf.lines[1].get_ydata()[j] - 100),
'{}: {:.0f} %'.format(underlying, ax_perf.lines[2].get_ydata()[j] - 100),
], loc='upper left', frameon=True, fontsize=10)
if plot_drawdown:
ax_dd.legend([
'Strategy: {:.2f} %'.format(ax_dd.lines[0].get_ydata()[j]),
'Strat Iso-Vol: {:.2f} %'.format(ax_dd.lines[1].get_ydata()[j]),
'{}: {:.2f} %'.format(underlying, ax_dd.lines[2].get_ydata()[j]),
], loc='upper left', frameon=True, fontsize=10)
if plot_roll_sharpe:
ax_roll.legend([
'Strategy: {:.2f}'.format(ax_roll.lines[0].get_ydata()[j]),
'Strat Iso-Vol: {:.2f}'.format(ax_roll.lines[1].get_ydata()[j]),
'{}: {:.2f}'.format(underlying, ax_roll.lines[2].get_ydata()[j]),
], loc='upper left', frameon=True, fontsize=10)
ax_perf.legend(
['Strategy', 'Strat Iso-Vol', underlying],
loc='upper left', frameon=True, fontsize=10
)
ax_perf.set_ylabel('Perf.')
ax_perf.set_yscale('log')
ax_perf.set_title(title)
ax_perf.tick_params(axis='x', rotation=30, labelsize=10)
# Plot DrawDowns
if plot_drawdown:
ax_dd.plot(
x_axis,
100 * drawdown(perf_est),
color=sns.xkcd_rgb["pale red"],
LineWidth=1.4
)
ax_dd.plot(
x_axis,
100 * drawdown(perf_ivo),
color=sns.xkcd_rgb["medium green"],
LineWidth=1.2
)
ax_dd.plot(
x_axis,
100 * drawdown(perf_idx),
color=sns.xkcd_rgb["denim blue"],
LineWidth=1.
)
ax_dd.set_ylabel('% DrawDown')
ax_dd.set_title('DrawDown in percentage')
ax_dd.tick_params(axis='x', rotation=30, labelsize=10)
# Plot rolling Sharpe ratio
if plot_roll_sharpe:
ax_roll.plot(
x_axis,
roll_sharpe(perf_est, period=period, w=win),
color=sns.xkcd_rgb["pale red"],
LineWidth=1.4
)
ax_roll.plot(
x_axis,
roll_sharpe(perf_ivo, period=period, w=win),
color=sns.xkcd_rgb["medium green"],
LineWidth=1.2
)
ax_roll.plot(
x_axis,
roll_sharpe(perf_idx, period=period, w=win),
color=sns.xkcd_rgb["denim blue"],
LineWidth=1.
)
ax_roll.set_ylabel('Sharpe ratio')
ax_roll.set_yscale('log')
ax_roll.set_xlabel('Date')
ax_roll.set_title('Rolling Sharpe ratio')
ax_roll.tick_params(axis='x', rotation=30, labelsize=10)
f.canvas.mpl_connect('motion_notify_event', motion)
plt.show()
return perf_idx, perf_est, perf_ivo
| true |
3dff2834412b59502b9aed75f181b137427a8cbe | Python | Kyefer/baba-is-ai | /Levels.py | UTF-8 | 6,588 | 2.6875 | 3 | [] | no_license |
import os
from tkinter import Tk, Label, Entry, Button, Menu, Listbox, Frame
import tkinter.filedialog as fd
import numpy as np
from Game import Entity, Level, Object, Modifier, Link
levels = []
def level00():
level = Level("00", 33, 18)
level.setup_entities({
(11, 6): Entity.NOUN(Object.BABA),
(12, 6): Entity.IS(),
(13, 6): Entity.MOD(Modifier.YOU),
(19, 6): Entity.NOUN(Object.FLAG),
(20, 6): Entity.IS(),
(21, 6): Entity.MOD(Modifier.WIN),
# (7, 20): Entity.OBJ(Object.BABA),
(11, 14): Entity.NOUN(Object.WALL),
(12, 14): Entity.IS(),
(13, 14): Entity.MOD(Modifier.STOP),
(19, 14): Entity.NOUN(Object.ROCK),
(20, 14): Entity.IS(),
(21, 14): Entity.MOD(Modifier.PUSH),
})
walls = {}
for i in range(11):
walls[(i + 11, 8)] = Object.WALL
walls[(i + 11, 12)] = Object.WALL
level.setup_objects(walls)
level.setup_objects({
(16, 9): Object.ROCK,
(16, 10): Object.ROCK,
(16, 11): Object.ROCK,
(12, 10): Object.BABA,
(20, 10): Object.FLAG,
})
return level
def level01():
level = Level("01", 24, 18)
level.setup_entities({
(8, 7): Entity.NOUN(Object.FLAG),
})
levels.append(level00())
def get_values():
vals = ["", Link.IS.name, Link.AND.name]
vals += [obj.name for obj in list(Object)]
vals += [obj.name.lower() for obj in list(Object)]
vals += [mod.name for mod in list(Modifier)]
return vals
class Editor:
def __init__(self, parent):
self.board: np.ndarray = None
self.parent = parent
self.menu = None
self.filemenu = None
self.filename: str = None
self.dim_frame = None
self.x_ent = None
self.y_ent = None
self.set_btn = None
self.tile_frame = None
self.grid = None
self.create_gui()
def create_gui(self):
self.parent.title("Baba is You Level Editor")
self.menu = Menu(self.parent)
self.parent.config(menu=self.menu)
self.filemenu = Menu(self.menu)
self.menu.add_cascade(label="File", menu=self.filemenu)
self.filemenu.add_command(label='Open...', command=self.open_file)
self.filemenu.add_separator()
self.filemenu.add_command(label='Save', state="disabled", command=self.save_file)
self.filemenu.add_command(label='Save As...', state="disabled", command=self.save_as_file)
self.filemenu.add_separator()
self.filemenu.add_command(label='Exit', command=self.parent.quit)
self.dim_frame = Frame(self.parent)
self.dim_frame.grid(row=0, column=0)
Label(self.dim_frame, text="x").grid(row=0, column=0)
Label(self.dim_frame, text="y").grid(row=0, column=2)
self.x_ent = Entry(self.dim_frame)
self.y_ent = Entry(self.dim_frame)
self.x_ent.insert(0, 4)
self.y_ent.insert(0, 3)
self.x_ent.grid(row=0, column=1)
self.y_ent.grid(row=0, column=3)
self.set_btn = Button(self.dim_frame, text="SET", width=10, command=self.set_dim)
self.set_btn.grid(row=0, column=5)
def set_dim(self):
try:
w, h = int(self.x_ent.get()), int(self.y_ent.get())
self.board = np.empty(shape=(w, h), dtype=object)
for i in range(w):
for j in range(h):
self.board[i][j] = ""
self.load_board()
self.filemenu.entryconfig("Save As...", state="normal")
except ValueError:
return
def load_board(self):
if self.tile_frame:
self.tile_frame.grid_forget()
self.tile_frame = Frame(self.parent)
self.tile_frame.grid(row=1)
self.tile_frame.grid(pady=10)
for j in range(self.board.shape[1]):
for i in range(self.board.shape[0]):
subframe = Frame(self.tile_frame, borderwidth=1, relief="solid")
subframe.grid(row=j, column=i, padx=1, pady=1)
lbl = Label(subframe, text=self.board[i, j], width=10, height=5)
lbl.grid(row=0)
def click(event):
x, y = event.widget.master.grid_info()['row'], event.widget.master.grid_info()['column']
if self.board[x, y] is not None:
lstbox = Listbox(event.widget.master, relief="flat", width=10, height=4, bg="#F0F0F0", activestyle="none", highlightthickness=0, selectmode="single")
for k, lst in enumerate(get_values()):
lstbox.insert(k, lst)
lstbox.grid(row=0, padx=0, pady=0)
lstbox.see(get_values().index(event.widget.cget("text")))
lstbox.select_set(get_values().index(event.widget.cget("text")))
self.board[x, y] = None
lstbox.bind("<Button-3>", click)
else:
self.board[x, y] = event.widget.get(event.widget.curselection())
lbl = Label(event.widget.master, text=self.board[x, y], width=10, height=5)
lbl.grid(row=0)
lbl.bind("<Button-3>", click)
lbl.bind("<Button-3>", click)
def get_filename(self):
self.filename = fd.askopenfilename(initialdir=os.getcwd(), )
def open_file(self):
self.get_filename()
if self.filename:
self.board = np.loadtxt(self.filename, delimiter=",", dtype=str)
print(self.board)
self.load_board()
self.filemenu.entryconfig("Save", state="normal")
self.filemenu.entryconfig("Save As...", state="normal")
def save_file(self):
if self.filename:
np.savetxt(self.filename, self.board, delimiter=",", fmt='%s')
def save_as_file(self):
self.filename = fd.asksaveasfilename(initialdir=os.getcwd(), title="Select file to save level to",
filetypes=[("Text Files", "*.txt")])
np.savetxt(self.filename, self.board, delimiter=",", fmt='%s')
self.filemenu.entryconfig("Save", state="normal")
def editor():
ed = Tk()
Editor(ed)
ed.mainloop()
if __name__ == "__main__":
editor()
| true |
afddeedb0eea3b9badb84b0662ce1028b868e191 | Python | maschlr/thesis_simulation | /weatherdata.py | UTF-8 | 5,742 | 3.125 | 3 | [] | no_license | # Script to read the weater data supported the Universidad de Piura in some ugly format
import sqlite3
import csv
import os
import re
from numpy import array, floor
class weatherData(object):
def __init__(self):
self.data = []
self.listOfFiles=[]
self.year = None
def setYear(self, year):
self.year=year
def getAllWeatherFiles(self,directory=os.path.join(os.getcwd(),'weatherfiles/')):
dataFiles = os.listdir(directory)
dataFiles.sort()
for file in dataFiles:
print file
answer = raw_input('Should these files be scanned for weather data? (y/n)')
if answer == 'no' or answer == 'n':
answer = raw_input('Do you want to pop some out of the list or scan another directory? (p/a)')
if answer == 'p' or answer == 'pop':
for i in range(len(dataFiles)):
print i, ': ', dataFiles[i]
pop = raw_input('Please type in the index of the files that should be excluded, separated by commata: ')
n = 0
for i in pop.split(','):
dataFiles.pop(int(i)-n)
n+=1
elif answer == 'a':
directory = raw_input('Which directory should be scanned for files instead? \n Full path: ')
self.getAllWeatherFiles(directory)
elif answer=='yes' or answer=='y':
pass
else:
print("You didn't answer the question properly, we'll try another time!")
self.getAllWeatherFiles(directory)
self.listOfFiles = []
for dataFile in dataFiles:
self.listOfFiles.append(os.path.join(directory, dataFile))
def readDataFromFiles(self):
self.data = []
for file in self.listOfFiles:
f = open(file, 'r')
f.readline()
f.readline()
yearMinutes = 0
i = 1
for l in f.readlines():
l = l.split()
l[0] = l[0].split('/')
l[1] = l[1].split(':')
if i < 2:
yearMinutes = self.getYearMinutes([int(b) for b in l[0]])
yearMinutes += int(l[1][0])*60
self.setYear(int(l[0][2]))
# The colums are:(day, month, year, hours, minutes, yearMinutes, temperature, humidity, windspeed, insolation)
self.data.append([int(l[0][0]),int(l[0][1]),int(l[0][2]),int(l[1][0]),int(l[1][1]),yearMinutes+i*30,float(l[2]),float(l[5])/100,float(l[7]),float(l[19])])
i+=1
f.close()
def getYearMinutes(self, dayMonthYear):
#Computes the minutes passed since 01.Jan 0:00
hoursTotal = 0
leapyears = range(1900,2200,4)
leapyears.remove(1900)
leapyears.remove(2100)
daysInMonth = [31, 28, 31, 30, 31, 30, 31 ,31, 30, 31, 30, 31]
if dayMonthYear[2] in leapyears:
daysInMonth[1] = 29
if dayMonthYear[1]>1:
for i in range(dayMonthYear[1]-1):
hoursTotal += daysInMonth[i]*24
return (hoursTotal+(dayMonthYear[0]-1)*24)*60
def writeFile(self, filename='default.csv', path=''):
f = open(os.path.join(path,filename), 'w')
header = ('day', 'month', 'year', 'hours', 'minutes', 'yearMinutes', 'temperature', 'humidity', 'windspeed', 'insolation')
csv_writer = csv.writer(self.f)
csv_writer.writerow(header)
for row in self.data:
csv_writer.writerow(row)
f.close()
def writeDB(self, dbfile='weather.db', path='', tableName='weatherData'):
conn = sqlite3.connect(os.path.join(path, dbfile))
db = conn.cursor()
db.execute("CREATE TABLE IF NOT EXISTS %s (day int, month int, year int, hours int, \
minutes int, yearMinutes int, temperature real, humidity real, windspeed real, insolation real)" %tableName)
for row in self.data:
# create a tuple to pass it as argument to the execute function
db.execute('INSERT INTO %s VALUES (?,?,?,?,?,?,?,?,?,?)' %tableName, tuple(row))
conn.commit()
db.close()
def readDB(self, month, startDay, startTime, timeSpan, year=None, dbfile='weather.db', path='', tableName='weatherData'):
startMinutes = self.getYearMinutes([startDay, month, (year or self.year)])+startTime*60
endMinutes = startMinutes+timeSpan*60
conn = sqlite3.connect(os.path.join(path,dbfile))
db = conn.cursor()
db.execute('SELECT * FROM %s WHERE month=? AND yearMinutes>=? AND yearMinutes<=? ORDER BY yearMinutes' %tableName, (month, startMinutes, endMinutes) )
self.relevantData = db.fetchall()
db.close()
if __name__ == '__main__':
print('Welcome to the data reading script that takes the ugly files,')
print('provided by the Piura weather radar station and gives you ')
print('either a .csv file or a sqlite3 database with the numbers that')
print('really matter without all this useless shit. ')
print('Your output format do you want to use?')
inputChoice = raw_input(' 1 : .csv 2 : sqlite3 3 : both ')
print('')
print('Do you want to search the current directory for files?')
directorySearch = raw_input('[y/n] ')
# Only get the files with the known pattern
dataFiles = filter(lambda x: re.match('[0-9]{6}\.txt', x), os.listdir(os.getcwd()))
dataFiles.sort()
print('')
print('I have found the following files with potential data:')
print(dataFiles)
switchContinue = raw_input('Continue? [y/n] ')
wd = weatherData(dataFiles)
wd.writeFile()
wd.writeDB()
| true |
3659bde2bbc0f7a317a2ad2117f0712ca6981db4 | Python | syurskyi/Python_Topics | /125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/51_v2/miller.py | UTF-8 | 940 | 3.28125 | 3 | [] | no_license | from datetime import datetime
# https://pythonclock.org/
PY2_DEATH_DT = datetime(year=2020, month=1, day=1)
BITE_CREATED_DT = datetime.strptime('2018-02-26 23:24:04', '%Y-%m-%d %H:%M:%S')
def py2_earth_hours_left(start_date=BITE_CREATED_DT):
"""Return how many hours, rounded to 2 decimals, Python 2 has
left on Planet Earth (calculated from start_date)"""
diff = PY2_DEATH_DT - start_date
#td = PY2_DEATH_DT - start_date
# return round(td.days * 24 + td.seconds / 60 / 60, 2)
return round(diff.total_seconds()/3600,2)
def py2_miller_min_left(start_date=BITE_CREATED_DT):
"""Return how many minutes, rounded to 2 decimals, Python 2 has
left on Planet Miller (calculated from start_date)"""
hours = py2_earth_hours_left(start_date)
number_of_years = hours / (365 * 24)
hours_miller = number_of_years / 7
return round(hours_miller * 60,2)
| true |
2fdd469cdbe71552c2b6a5005c4debef222222bf | Python | almaudoh/mlsequential | /Seq2Seq/trainer.py | UTF-8 | 3,389 | 2.78125 | 3 | [] | no_license | import torch
class Trainer(object):
def __init__(self, optimizer=None, criterion=None):
self.optimizer = optimizer
self.criterion = criterion
self.stats = {
'epoch': [],
'loss': [],
'gradient_flow': [],
}
def fit(self, model, X, Y, epochs=10, learning_rates=None, batch_size=500):
assert self.optimizer is not None
assert self.criterion is not None
# Check device
if torch.cuda.is_available():
device = torch.device('cuda')
print("Training on GPU")
report_interval = 100
else:
device = torch.device('cpu')
print("Training on CPU")
report_interval = 10
model.to(device)
X = X.to(device)
Y = Y.to(device)
# Training Run
# batchsize = X.shape[0]
# input_seq_len = X.shape[1]
next_lr = self.optimizer.defaults['lr']
next_lr_epoch = epochs + 1
print("LR: {:.4f}".format(next_lr))
if learning_rates and len(learning_rates):
next_lr_epoch, next_lr = learning_rates.pop(0)
# Update training statistics.
self.stats['loss'] = []
self.stats['epoch'] = []
for epoch in range(1, epochs + 1):
# Variable LR adjustments.
if next_lr_epoch == epoch:
for param_group in self.optimizer.param_groups:
param_group['lr'] = next_lr
print("LR: {:.4f}".format(next_lr))
if learning_rates and len(learning_rates):
next_lr_epoch, next_lr = learning_rates.pop(0)
# Shuffle the input before taking batches
shuffled = torch.randperm(X.shape[0])
for i in range(0, X.shape[0], batch_size):
# Clears existing gradients from previous epoch
self.optimizer.zero_grad()
# model.zero_grad() # Clears existing gradients from previous epoch
indices = shuffled[i:i + batch_size]
batch_x, batch_y = X[indices], Y[indices]
output = model(batch_x)
# Have to convert to 2D Tensor since pytorch doesn't handle 3D properly.
loss = self.criterion(output.view(-1, output.shape[2]), batch_y.view(-1))
# loss = self.criterion(output, Y)
# Does back propagation and calculates gradients
loss.backward()
# Updates the weights accordingly
self.optimizer.step()
# Update training statistics.
self.save_gradient_flow(model.named_parameters())
self.stats['loss'].append(loss.item())
self.stats['epoch'].append(epoch)
if epoch % report_interval == 0:
print('Epoch: {}/{}.............'.format(epoch, epochs), end=' ')
print("Loss: {:.4f}".format(loss.item()))
def save_gradient_flow(self, named_parameters):
ave_grads = []
layers = []
for n, p in named_parameters:
if p.requires_grad and "bias" not in n:
layers.append(n)
ave_grads.append(p.grad.abs().mean())
self.stats['gradient_flow'].append({
'layers': layers,
'grads': ave_grads,
})
| true |
5747b6198e1d9515ef9e18abbbff150cb82cbfb0 | Python | igordejanovic/parglare | /tests/func/regressions/test_issue31_glr_drop_parses_on_lexical_ambiguity.py | UTF-8 | 752 | 2.890625 | 3 | [
"MIT",
"Python-2.0"
] | permissive | from parglare import Grammar, GLRParser
def test_issue31_glr_drop_parses_on_lexical_ambiguity():
grammar = """
model: element+;
element: title
| table_with_note
| table_with_title;
table_with_title: table_title table_with_note;
table_with_note: table note*;
terminals
title: /title/; // <-- This is lexically ambiguous with the next.
table_title: /title/;
table: "table";
note: "note";
"""
# this input should yield 4 parse trees.
input = "title table title table"
g = Grammar.from_string(grammar)
parser = GLRParser(g, debug=True, debug_colors=True)
results = parser.parse(input)
# We should have 4 solutions for the input.
assert len(results) == 4
| true |
5d36c31abe62d3bc24967d257bd7acde33fa81c8 | Python | matthew-lowe/RoboJosh | /extensions/info_commands.py | UTF-8 | 2,325 | 2.859375 | 3 | [] | no_license | import datetime
import discord
from discord.ext import commands
class InfoCommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Displays the avatar
@commands.command(help="Show the avatar of a user", usage=";avatar [user]")
async def avatar(self, ctx, target=None):
utils = self.bot.get_cog("Utils")
# Set the target user if given, else message author
user = utils.get_target(ctx, target)
if user is None:
await ctx.send("`Invalid user! Please tag a member of the server`")
return
# URL Discord stores avatars in
url = "https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024".format(user)
# Send avatar as a swanky embed
embed = discord.Embed(title="Avatar URL", url=url, description=user.name + '#' + user.discriminator)
embed.set_image(url=url)
utils.style_embed(embed, ctx)
await ctx.send(embed=embed)
# Gives some info about a user
@commands.command(help="Get information about a user", usage=";info [user]")
async def info(self, ctx, target=None):
utils = self.bot.get_cog("Utils")
# Set the target user if given, else message author
user = utils.get_target(ctx, target)
if user is None:
await ctx.send("`Invalid user! Please tag a member of the server`")
return
member = ctx.guild.get_member(user.id)
author_avatar = "https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024".format(ctx.author)
user_avatar = "https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024".format(user)
# Ommits the first value (@everyone)
member_roles = member.roles[1:]
role_string = ""
for role in member_roles:
role_string += f"<@&{role.id}> "
embed = discord.Embed(title=f"{user.name}#{user.discriminator}")
embed.add_field(name="User ID:", value=user.id, inline=True)
embed.add_field(name="Display name:", value=member.display_name, inline=True)
embed.add_field(name="Account Created:", value=user.created_at.strftime('%A %d %b %Y, %I:%M %p'), inline=False)
embed.add_field(name="Guild Join Date:", value=member.joined_at.strftime('%A %d %b %Y, %I:%M %p'), inline=False)
embed.add_field(name="Server Roles:", value=role_string, inline=False)
embed.set_thumbnail(url=user_avatar)
utils.style_embed(embed, ctx)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(InfoCommands(bot))
| true |
2facfbae9aa0223b17d2b76947635d3b855c3c9e | Python | aalepere/IRB | /app.py | UTF-8 | 730 | 3.1875 | 3 | [] | no_license | """ Streamlit app for portfolio analysis and capital requirements """
import matplotlib.pyplot as plt
import pandas as pd
import streamlit as st
st.title("Credit risk portfolio analysis and capital requirements")
@st.cache
def load_portfolio():
"""
Load portfolio data
"""
data = pd.read_csv("tests/test_data_excel_csv.csv")
return data
data_load_state = st.text("Loading data...")
data = load_portfolio()
data_load_state.text("Portfolio data has been loaded")
if st.checkbox("Show raw data"):
st.subheader("Raw data")
st.write(data)
st.subheader("PD distribution")
plt.hist(data["PD"] * 100, bins=20)
plt.xlabel("PD - Probability of defaults")
plt.ylabel("Number of obligors")
st.pyplot()
| true |
1b986e1d64ff843a33f1ae2567176b5229b06a28 | Python | m9ra/bot-trading | /bot_trading/core/exceptions.py | UTF-8 | 352 | 3.109375 | 3 | [] | no_license | class TradeEntryNotAvailableException(Exception):
def __init__(self, pair: str, timestamp: float = None, entry_index: int = None):
super().__init__(f"Requested entry is not available for {pair} at {entry_index} on {timestamp}")
class PortfolioUpdateException(Exception):
def __init__(self, message):
super().__init__(message)
| true |
963d777487d1182b235e0f8000154f32c699ebef | Python | zjkang/PythonPractice | /soccer/*** footballDB.py | UTF-8 | 6,974 | 3.125 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3
import sys
from types import *
def create_schemas():
country = [u"西班牙", u"英格兰", u"德国", u"意大利", u"法国"]
level = [u"超级", u"甲级", u"乙级"]
country_level = []
for c in country:
for l in level:
country_level.append(c + '_' + l)
return country_level
def create_table_team(dbname, teamTable):
con = sqlite3.connect(dbname)
with con:
cursor = con.cursor()
cursor.execute("DROP TABLE IF EXISTS " + teamTable)
statement = "CREATE TABLE " + teamTable + "(id INTEGER PRIMARY KEY AUTOINCREMENT, " + \
"name TEXT, country TEXT, level TEXT)"
cursor.execute(statement)
def create_table_match(dbname, matchTable):
con = sqlite3.connect(dbname)
with con:
cursor = con.cursor()
cursor.execute("DROP TABLE IF EXISTS " + matchTable)
statement = "CREATE TABLE " + matchTable + "(id INTEGER PRIMARY KEY AUTOINCREMENT, " + \
"match_name TEXT, home_id INTEGER, guest_id INTEGER, " + \
"home_score INTEGER, guest_score INTEGER, date TEXT)"
cursor.execute(statement)
def create_table_odd(dbname, oddTable):
con = sqlite3.connect(dbname)
with con:
cursor = con.cursor()
cursor.execute("DROP TABLE IF EXISTS " + oddTable)
statement = "CREATE TABLE " + oddTable + "(id INTEGER PRIMARY KEY AUTOINCREMENT, " + \
"match_id , home_id INTEGER, guest_id INTEGER, " + \
"home_score INTEGER, guest_score INTEGER, date TEXT)"
print statement
cursor.execute(statement)
def insert_table_team(dbname, teamTable):
teams = [(u"巴萨罗那", u"西班牙", u"甲级"),
(u"皇家马德里", u"西班牙", u"甲级"),
(u"马德里竞技", u"西班牙", u"甲级"),
(u"瓦伦西亚", u"西班牙", u"甲级"),
(u"曼联", u"英格兰", u"超级"),
(u"曼城", u"英格兰", u"超级"),
(u"拜仁慕尼黑", u"德国", u"甲级"),
(u"多特蒙德", u"德国", u"甲级")]
con = sqlite3.connect(dbname)
with con:
cur = con.cursor()
cur.executemany("INSERT INTO " + teamTable + "(name, country, level) VALUES(?, ?, ?)", teams)
def insert_table_match(dbname, teamTable, matchTable):
matches = [(u"西甲", u"巴萨罗那", u"皇家马德里", 2, 1, "2016-10-02T03:00:00"),
(u"西甲", u"马德里竞技", u"瓦伦西亚", 3, 0, "2016-10-03T05:00:00"),
(u"英超", u"曼联", u"曼城", 1, 1, "2016-10-05T05:00:00"),
(u"欧冠", u"拜仁慕尼黑", u"多特蒙德", 3, 2, "2016-10-07T05:20:00"),
(u"德甲", u"拜仁慕尼黑", u"多特蒙德", 0, 2, "2016-10-10T20:20:00")]
matchesId = []
con = sqlite3.connect(dbname)
with con:
cur = con.cursor()
for match in matches:
# print match[1].encode('utf8'), match[2].encode('utf8')
cur.execute("SELECT id, name FROM " + teamTable + " WHERE name = ?", (match[1],))
home = cur.fetchone()
cur.execute("SELECT id, name FROM " + teamTable + " WHERE name = ?", (match[2],))
guest = cur.fetchone()
matchesId.append((match[0], home[0], guest[0], match[3], match[4], match[5]))
# print matchesId
cur.executemany("INSERT INTO " + matchTable + "(match_name, home_id, guest_id, home_score, guest_score, date) \
VALUES(?, ?, ?, ?, ?, ?)", matchesId)
def select_teams_by_country(teamTable, country):
con = sqlite3.connect(dbname)
with con:
cursor = con.cursor()
statement = "SELECT id, name, country, level FROM " + teamTable + \
" WHERE country = ? "
rows = cursor.execute(statement, (country,))
return rows
def select_all_matches(teamTable, matchTable):
con = sqlite3.connect(dbname)
with con:
cursor = con.cursor()
statement = "SELECT M.id, M.match_name, T1.name AS home_name, T2.name AS guest_name, " \
"M.home_score, M.guest_score, M.date " \
"FROM " + matchTable + " as M " \
"LEFT JOIN " + teamTable + " as T1 on M.home_id = T1.id " \
"LEFT JOIN " + teamTable + " as T2 on M.guest_id = T2.id"
print statement
rows = cursor.execute(statement)
return rows
# def create_table_match(country_match):
# conn = sqlite3.connect(dbname)
# def drop_tables(dbname):
# conn = sqlite3.connect(dbname)
# cursor = conn.cursor()
# cursor.execute('drop table ' + country)
# conn.commit()
# cursor.close()
# conn.close()
# def insert(teams, country):
# # users = (u'腾讯qq', 'qq@example.com')
# # conn = sqlite3.connect(dbname)
# # cursor = conn.cursor()
# # cursor.execute("insert into userinfo(name, email) values(?, ?)", users)
# # conn.commit()
# # cursor.close()
# # conn.close()
# conn = sqlite3.connect(dbname)
# cursor = conn.cursor()
# for team in teams:
# sql = 'insert into ' + country + '(id, name) values(?, ?)'
# cursor.execute(sql, team)
# conn.commit()
# cursor.close()
# conn.close()
# teams = [(1, u'皇家马德里'), (2, u'巴萨罗那')]
# def select(text):
# conn = sqlite3.connect(dbname)
# cursor = conn.cursor()
# print "select name from userinfo where email='%s'" % text
# rows = cursor.execute("select * from userinfo where email= ? ", (text,))
# print rows
# for row in rows:
# print row[0].encode('utf8'), row[1]
if __name__ == '__main__':
dbname = 'football.db'
teamTable = "team"
matchTable = "match"
oddInitialTable = "oddInitial"
oddBeforeTable = "oddBefore"
oddAfterTable = "oddAfter"
create_table_team(dbname, teamTable)
insert_table_team(dbname, teamTable)
create_table_match(dbname, matchTable)
insert_table_match(dbname, teamTable, matchTable)
# select teams by country
# country = u"西班牙"
# rows = select_teams_by_country(teamTable, country)
# for row in rows:
# # print row[0], row[1].encode('utf8'), row[2].encode('utf8'), row[3].encode('utf8')
# print row
# test = u"德国"
# print test.encode('utf8')
rows = select_all_matches(teamTable, matchTable)
for row in rows:
result = [r if type(r) is IntType else r.encode('utf8') for r in row]
# print result
for r in result:
print r,
print
# select match by date
# select match
# try:
# drop_tables(dbname)
# except:
# pass
# create_tables(dbname)
# insert(teams, country)
# select("qq@example.com")
# drop_tables(dbname) | true |
f6b3a9b4cce5d128c344e7c30f11b1379a0b9a0a | Python | lukevs/charity-explorer | /utils.py | UTF-8 | 104 | 3.296875 | 3 | [] | no_license | def batch(xs, batch_size):
for i in range(0, len(xs), batch_size):
yield xs[i:i+batch_size]
| true |
55382a5a6283a88e6e8d0e5222bcb96f9d6b473f | Python | XomakNet/teambuilder | /experiments/experiment4/preferences_clustering.py | UTF-8 | 15,802 | 3 | 3 | [] | no_license | from math import ceil
from typing import List
from typing import Set
from experiments.experiment5.balancer import Balancer
from models.user import User
__author__ = 'Xomak'
class PreferencesClustering:
DEBUG = False
def __init__(self, users: Set[User], teams_number: int, need_balance: bool=True):
"""
Instantiates algorithms.
:param users: Set of users
:param teams_number: Required teams number
"""
self.need_balance = need_balance
self.users = users
self.teams_number = teams_number
def clusterize(self) -> List[Set[User]]:
"""
Performs clustering
:return: List of sets of users
"""
return self.__class__.cluster(self.users, self.teams_number, self.need_balance)
@classmethod
def _construct_graph(cls, users):
"""
Construct graph, based on users set
:param users: Given users set
:return: nodes dict
"""
nodes = dict()
for user in users:
nodes[user] = Node(user)
for current_user, node in nodes.items():
selected_people = current_user.get_selected_people()
for selected_user in selected_people:
selected_user_node = nodes[selected_user]
node.add_outbound(selected_user_node)
selected_user_node.add_inbound(node)
return nodes
@classmethod
def _construct_teams(cls, nodes, teams, team_size, only_mutual):
"""
Goes through the graph nodes and find connected components in it, then add such sets to teams
:param nodes: Graph with connections between users
:param teams: Teams
:param team_size: Maximal size of constructed teams
:param only_mutual: Construct, if users has mutual link
:return:
"""
for user, node in nodes.items():
sb = cls._construct_team(node, only_mutual)
if len(sb) > 0:
#cls._reduce_set(sb, team_size)
teams.append(sb)
@classmethod
def _construct_teams_on_lonely_users(cls, nodes, teams):
"""
Distribute all lonely users from the graph to their own team, including only themselves
:param nodes: Graph with connections
:param teams: Teams set, in which new teams will be added
:return:
"""
for user, node in nodes.items():
if node._is_free:
sb = set()
sb.add(node)
node.set_not_free()
teams.append(sb)
@staticmethod
def get_distance_between_users(user1: User, user2: User):
result = 0
if user1 in user2.get_selected_people():
result += 0.5
if user2 in user1.get_selected_people():
result += 0.5
return result
@classmethod
def cluster(cls, users, teams_number, need_balance):
"""
Divides users into teams_number teams
:param need_balance: It balancing required
:param users: Set of users
:param teams_number: Required teams number
:return: List of sets of users
"""
if teams_number > len(users):
raise ValueError("Teams number should be less or equal to users count")
nodes = cls._construct_graph(users)
teams = list()
team_size = int(ceil(len(users) / teams_number))
cls._construct_teams(nodes, teams, team_size, True)
cls._construct_teams(nodes, teams, team_size, False)
cls._construct_teams_on_lonely_users(nodes, teams)
# cls._balance_teams(nodes, teams, team_size, teams_number)
# cls._divide_teams(teams, team_size, teams_number)
result = []
for team in teams:
new_team = set()
for node in team:
new_team.add(node.get_user())
result.append(new_team)
if need_balance:
b = Balancer(teams_number, result, PreferencesClustering.get_distance_between_users)
b.balance()
return result
@classmethod
def _find_set_to_merge_with(cls, teams, current_set, nodes, set_size):
"""
Finds (or creates from existing) set of size set_size, mostly suitable to merge with current_set
:param teams: Teams sets
:param current_set: Set, for which we find
:param nodes: Graph
:param set_size: Required set size
:return:
"""
merge_candidates = cls._get_sets_with_nearest_size(teams, set_size, current_set)
set_to_merge = None
max_connections = None
if cls.DEBUG:
print("Looking up for set to merge with %s" % next(iter(current_set)))
merge_candidates.sort(key=lambda x: len(x))
for candidate in merge_candidates:
connections_number = cls._calculate_set_connections(current_set, candidate)
if cls.DEBUG:
print("Candadate %s (%d) with cn: %s" % (next(iter(candidate)), len(candidate), connections_number))
if max_connections is None or connections_number > max_connections:
max_connections = connections_number
set_to_merge = candidate
if len(set_to_merge) > set_size:
cls._reduce_set(set_to_merge, set_size)
cls._construct_teams_on_lonely_users(nodes, teams)
return set_to_merge
@classmethod
def _balance_teams(cls, nodes, teams, max_team_size, teams_number):
"""
Balances teams and reduces their count to teams_number or less than teams_number
:param nodes: Graph with connections between users
:param teams: Teams set
:param max_team_size: Maximum team size
:param teams_number: Number of team
:return:
"""
is_balanced = cls._check_team_set_balance(teams, max_team_size)
while not is_balanced or len(teams) > teams_number:
# From small teams to bigger
teams.sort(key=lambda x: len(x))
for current_set in teams:
current_length = len(current_set)
if current_length < max_team_size - 1 or (is_balanced and current_length < max_team_size):
if cls.DEBUG:
print("Trying to merge with {}".format(current_set))
needed_merge_size = max_team_size - current_length
set_to_merge = cls._find_set_to_merge_with(teams, current_set, nodes, needed_merge_size)
cls._merge_teams(teams, current_set, set_to_merge)
break
is_balanced = cls._check_team_set_balance(teams, max_team_size)
@classmethod
def _find_poorest_two(cls, teams, max_team_size):
"""
Finds at least two poorest members
:param teams: teams set
:param max_team_size: Maximal team size
:return: dict with poorest members as keys and their team as values
"""
poorest_members = dict()
size = max_team_size
while len(poorest_members) < 2:
for current_set in teams:
if len(current_set) == size:
poorest_members[cls._find_poorest_member(current_set)] = current_set
size -= 1
return poorest_members
@classmethod
def _divide_teams(cls, teams, max_team_size, required_teams_number):
"""
Creates new teams, while total number of teams is less then required.
New teams are formed from worst-connected members from existing full teams.
:param teams: Teams set
:param max_team_size: Maximal team size
:param required_teams_number: Required number of teams
:return:
"""
if len(teams) < required_teams_number:
while len(teams) < required_teams_number:
poorest_members = cls._find_poorest_two(teams, max_team_size)
sorted_members = list(poorest_members.keys())
sorted_members.sort()
member1 = sorted_members.pop()
member2 = sorted_members.pop()
# Remove members from their current teams
poorest_members[member1].remove(member1)
poorest_members[member2].remove(member2)
t = set()
t.add(member1)
t.add(member2)
teams.append(t)
@classmethod
def _check_team_set_balance(cls, teams, max_team_size):
"""
Check, if all the teams are balanced.
Balance means, that they have max_team_size or (max_team_size-1) members
:param teams: Teams set
:param max_team_size: Maximal team size
:return:
"""
for current_set in teams:
if len(current_set) < max_team_size - 1:
return False
return True
@classmethod
def _merge_teams(cls, teams, team1, team2):
"""
Merges team2 into team1 and removed team2 from the teams set
:param teams: Teams set
:param team1: Team 1
:param team2: Team 2
:return:
"""
teams.remove(team2)
teams.remove(team1)
teams.append(team1.union(team2))
@classmethod
def _get_sets_with_nearest_size(cls, sets, needed_size, except_set):
"""
Returns all sets, having size equal or less than needed_size.
If no such set exists, returns first set with size more than needed_size.
Except_set is not considered.
:param sets: List of sets
:param needed_size: Size
:param except_set: This set will not be present in result
:return: list of found sets
"""
if needed_size < 1:
raise ValueError("needed_size should be > 0: %s" % needed_size)
sizes = dict()
for current_set in sets:
if current_set != except_set:
size = len(current_set)
if size not in sizes.keys():
sizes[size] = list()
sizes[size].append(current_set)
result = []
size = needed_size
while size > 0:
try:
result += sizes[size]
except KeyError:
pass
size -= 1
size = needed_size + 1
while len(result) == 0:
try:
result += sizes[size]
except KeyError:
pass
size += 1
return result
@classmethod
def _calculate_set_connections(cls, set1, set2):
"""
Calculates number of connections between nodes from set1 and set2
:param set1:
:param set2:
:return: Number of connections
"""
connections = 0
for node in set1:
connections += len(node.get_outbound().intersection(set2))
connections += len(node.get_inbound().intersection(set2))
return connections
@classmethod
def _reduce_set(cls, set_to_reduce, needed_size):
"""
Reduces set size to the given size.
Reducing is based on removing nodes with poorest connection number
:param set_to_reduce: Given set
:param needed_size: Size
:return:
"""
if needed_size < len(set_to_reduce):
cls._calculate_connections_number(set_to_reduce)
nodes_list = list(set_to_reduce)
nodes_list.sort()
i = 0
for current_size in range(len(set_to_reduce), needed_size, -1):
set_to_reduce.remove(nodes_list[i])
nodes_list[i].set_free()
i += 1
@classmethod
def _find_poorest_member(cls, team):
"""
Returns member of the team, who has minimal connectivity rank with other members in his team
:param team: Team - users set
:return: Poorest member
"""
cls._calculate_connections_number(team)
return min(team)
@classmethod
def _calculate_connections_number(cls, set_to_calculate):
"""
Calculates connections number between nodes and sets these values as properties in nodes
:param set_to_calculate:
:return:
"""
for node in set_to_calculate:
node.clear_metrics()
for neighbor in node.get_outbound():
if neighbor not in set_to_calculate:
node.external_connections_number += 1
else:
if neighbor in node.get_inbound():
node.mutual_connections_number += 1
else:
node.single_connections_number += 1
for neighbor in node.get_inbound():
if neighbor not in set_to_calculate:
node.external_connections_number += 1
else:
if neighbor not in node.get_outbound():
node.single_connections_number += 1
@classmethod
def _construct_team(cls, start_node, only_mutual):
"""
Construct team, starting from start_node and connecting another linked nodes
:param start_node: BFS will start from this node
:param only_mutual: Set to True, if all connections between nodes should be mutual
:return: set of connected nodes
"""
subgraph = set()
queue = list()
queue.append(start_node)
while len(queue) > 0:
current_node = queue.pop()
if current_node.is_free():
current_node.set_not_free()
subgraph.add(current_node)
for neighbor in current_node.get_outbound():
if not only_mutual or current_node in neighbor.get_outbound():
queue.insert(0, neighbor)
if not only_mutual:
for neighbor in current_node.get_outbound():
queue.insert(0, neighbor)
if len(subgraph) == 1:
subgraph.pop().set_free()
return subgraph
class Node:
"""
Represents node in connectivity graph
"""
def __str__(self):
return str(self._user)
def __init__(self, user):
self._user = user
self._inbound_connections = set()
self._outbound_connections = set()
self._is_free = True
def clear_metrics(self):
# Number of mutual connections to the group's members
self.mutual_connections_number = 0
# Number of single connections to the group'd members
self.single_connections_number = 0
# Number of any external connections
self.external_connections_number = 0
def set_not_free(self):
self._is_free = False
self.clear_metrics()
def set_free(self):
self._is_free = True
def is_free(self):
return self._is_free
def get_outbound(self):
return self._outbound_connections
def get_inbound(self):
return self._inbound_connections
def add_outbound(self, node):
self._outbound_connections.add(node)
def add_inbound(self, node):
self._inbound_connections.add(node)
def get_user(self):
return self._user
def __lt__(self, other):
return self.__cmp__(other) < 0
def __cmp__(self, other):
mutual_diff = self.mutual_connections_number - other.mutual_connections_number
single_diff = self.single_connections_number - other.single_connections_number
# This is not an error! The more external connections we have, the less we are
external_diff = other.external_connections_number - self.external_connections_number
return 0.5 * mutual_diff + 0.25 * single_diff + 0.5 * external_diff
| true |
cef6bfe882a1b5ee0ff90002817811cad8aafd44 | Python | alex-thibodeau/QB_Nebulae_V2 | /Code/nebulae/nconfig.py | UTF-8 | 373 | 2.640625 | 3 | [
"MIT"
] | permissive | import ConfigParser
class NConfig:
def __init__(self):
self.config = ConfigParser.SafeConfigParser()
self.config.read("./nebulae.opt")
def getValue(self,section,var,defvalue):
try:
val = self.config.get(section,var)
print "config " + section + ":"+ var + "=" + str(val)
except:
val = defvalue
return val
| true |
5390b4a844b43ba75664952cd1b998f9cd5576a8 | Python | bb13135811/Introducing_Python | /Chpater4/About None.py | UTF-8 | 420 | 3.859375 | 4 | [
"MIT"
] | permissive | thing = None
if thing:
print("There's something")
else:
print("Empty")
#區分None與False
if thing is None:
print("It's nothing")
else:
print("It's something")
def is_none(thing):
if thing is None:
print("It's None")
elif thing:
print("It's True")
else:
print("It's False")
is_none(None)
is_none(True)
is_none(False)
is_none(11)
is_none("")
is_none(0)
is_none([]) | true |
ac5845ffd2a9e26a1e9899408169c8911b749bb0 | Python | chandankuiry/datastructure | /fibonaccia.py | UTF-8 | 376 | 3.40625 | 3 | [] | no_license |
def fib2(n): # return Fibonacci series up to n
"""this is my codewrithin"""
result = []
a, b = 0, 1
while a < n:
result.append(a) # see below
a, b = b, a+b
print result #here we give "print" option so it show result if we give "return option then it don't show resul
if __name__ == "__main__":
import sys
fib2(int(sys.argv[1]))
| true |
3c5ed526a3b68eabed2cb48899be6f8f65544a2a | Python | 0equals2/self-study | /파이썬 기본 실습/소수구하기.py | UTF-8 | 558 | 3.890625 | 4 | [] | no_license | #2부터 100까지 numbers 리스트에 넣기
numbers=[]
for i in range(2,100,1):
numbers.append(i)
prime=[] #소수를 저장할 리스트
for i in numbers:
am_i_prime=True #일단 i 가 소수라고 가정
#i가 소수인지 판단하기 위해, 자기보다 작은 수로 나누어 떨어지는지 검사
#소수가 아니면 am_i_prime=false
for j in range(2,i,1):
if i%j==0:
am_i_prime=False
break
#i가 소수이면 prime 리스트에 추가
if am_i_prime:
prime.append(i)
print(prime)
| true |
40514b67de526d7b21dcfac20384d1c3159b8049 | Python | osushkov/gym_simple | /tabular_qlearner.py | UTF-8 | 2,980 | 2.953125 | 3 | [] | no_license |
import agent
import math
import numpy as np
from gym.spaces.discrete import Discrete
class TabularQLearner(agent.Agent):
def __init__(self, action_space, observation_space, total_episodes, discount=0.99,
init_learn_rate=0.1, final_learn_rate=0.01,
init_egreedy=1.0, final_egreedy=0.1, space_buckets=50):
self._learning_flag = True
self._action_space = action_space
self._observation_space = observation_space
self._discount = discount
self._init_learn_rate = init_learn_rate
self._learn_rate = self._init_learn_rate
self._learn_rate_decay = math.pow(final_learn_rate / init_learn_rate, 1.0 / total_episodes)
self._init_egreedy = init_egreedy
self._egreedy = self._init_egreedy
self._egreedy_decay = math.pow(final_egreedy / init_egreedy, 1.0 / total_episodes)
self._bucket_size = (observation_space.high - observation_space.low) / space_buckets
q_shape = (space_buckets,)*self._bucket_size.shape[0] + (action_space.n,)
self._q_table = np.random.normal(0.0, 0.1, q_shape)
self._last_action = None
self._last_state = None
if not isinstance(action_space, Discrete):
print("not discrete action space")
def initialize_episode(self, episode_count):
self._egreedy = self._init_egreedy * (self._egreedy_decay ** episode_count)
self._learn_rate = self._init_learn_rate * (self._learn_rate_decay ** episode_count)
def act(self, observation):
if np.random.rand() < self._egreedy and self._learning_flag:
action = self._random_action()
else:
action = self._best_action(observation)
self._last_action = action
self._last_state = observation
return action
def feedback(self, resulting_state, reward, episode_done):
if episode_done:
target = reward
else:
index = self._bucket_index(resulting_state)
target = reward + self._discount * np.max(self._q_table[index])
self._update_q_table(self._last_state, self._last_action, target)
def set_learning(self, learning_flag):
self._learning_flag = learning_flag
def _random_action(self):
return self._action_space.sample()
def _best_action(self, observation):
index = self._bucket_index(observation)
return np.argmax(self._q_table[index])
def _bucket_index(self, observation):
index = ((observation - self._observation_space.low) / self._bucket_size).astype(int)
return tuple(index)
def _q_value(self, observation, action):
return self._q_table[self._bucket_index(observation)][action]
def _update_q_table(self, observation, action, target_value):
shift = target_value - self._q_value(observation, action)
shift *= self._learn_rate
self._q_table[self._bucket_index(observation)][action] += shift
| true |
bb4ef8f3ae265cea86dad4c2ecaaef8862ba1fc4 | Python | riffelllab/GCMS_peakid | /integrate_whole_processed_csv_directory_to_one_csv.py | UTF-8 | 4,691 | 3.078125 | 3 | [] | no_license | import sys #loading a default module, sys
#(sys allows you to pass in file instead of coding the file name into the script)
import csv #loads default module, csv; allows Python to read .csv files
import glob
import os.path
import os
import dicttocsv_csvtolist_v2 as conv
def get_name_and_area_from_gcms(csv_filename):
"""
reads in processed gcms data from utf8-encoded csv file name passed in; returns a list of
chemicals and their areas for that sample
"""
name_and_area_doubles = []
areas = [float(item) for item in conv.read_unicode_csv_col_to_list(csv_filename, 2)]
names = conv.read_unicode_csv_col_to_list(csv_filename, 3 )
for i in range(0, len(areas)):
name_and_area_doubles.append([names[i], areas[i]])
return name_and_area_doubles
def get_names_and_name_to_area(gcms_filepath):
""" make a list of unique chemical names in each processed sample file, and
a dictionary one that pairs names with abundance/area;
if the flower does not have that particular
chemical, then it is added to each the list and the dictionary;
if we ID two peaks as the same chemical (they may be closely related compounds),
then we add together the values for all of those peaks for the final area in the dictionary."""
name_and_area_doubles = get_name_and_area_from_gcms(gcms_filepath)
list_of_names = []
name_to_area = {}
for name, area in name_and_area_doubles:
if name not in list_of_names:
list_of_names.append(name)
name_to_area[name] = area
else:
old_area = name_to_area[name]
area_sum = old_area + area
name_to_area[name] = area_sum
return list_of_names, name_to_area
def make_list_of_names_and_name_to_area_dictionary_for_all_csvs(directory):
""" for all csv files in a directory,generates a macro dictionary
that uses filenames as keys, and returns
the smaller dictionaries for individual files; also generates as
separate list with all unique chemical names across all files"""
csv_files = glob.glob(directory + "*.csv")
filepath_to_name_to_area_dictionaries_dictionary = {}
master_list_of_names = []
for csv_file in csv_files:
list_of_names, name_to_area = get_names_and_name_to_area(csv_file)
for name in list_of_names:
if name not in master_list_of_names:
master_list_of_names.append(name)
filepath_to_name_to_area_dictionaries_dictionary[csv_file] = name_to_area
return filepath_to_name_to_area_dictionaries_dictionary, master_list_of_names
def get_dictionary_of_names_and_list_of_areas_in_order(filepath_to_name_to_area_dictionaries_dictionary, master_list_of_names, list_of_filenames):
""" outputs a dictionary with rt as the key and a list of area values that correspond
to the order of flower samples in of the list of filenames ({rt ->
[value of the area for that rt in file 1 in list of filenames,
in file 2 in list of filenames, etc]}"""
table = {}
for name in master_list_of_names:
all_areas = []
for filename in list_of_filenames:
name_to_area_dictionary = filepath_to_name_to_area_dictionaries_dictionary[filename]
area = name_to_area_dictionary.get(name, 0)
all_areas.append(area)
table[name] = all_areas
return table
def make_csv_of_name_area(csv_filename, name_to_areas_dictionary, master_list_of_names, list_of_filepaths):
""" outputs a .csv file with header of filenames, row.names as rts, and areas/
abundances filling in the cells"""
header = ["Chemical_Name"]
filenames = []
for filepath in list_of_filepaths:
filename = filepath.rstrip(os.sep)
#retain just file name, not full path
all_slash_indices = [i for i, char in enumerate(filename) if char == "/"]
last_slash_index = all_slash_indices[-1]
filename = filename[(last_slash_index + 1):]
extension_index = filename.index(".csv")
filename = filename[:extension_index]
filenames.append(filename)
header.extend(filenames)
rows = []
for name in master_list_of_names:
row = [name]
areas = []
for area in name_to_areas_dictionary[name]:
areas.append(unicode(int(area)))
row.extend(areas)
rows.append(row)
conv.write_unicode_lists_into_csv_listsasrows(csv_filename, rows, header)
def integrate_csvs_to_one_by_name(csv_output_file, directory_containing_csv_files):
print "start"
filepath_to_name_to_area_dictionaries_dictionary, master_list_of_names = make_list_of_names_and_name_to_area_dictionary_for_all_csvs(directory_containing_csv_files)
list_of_filenames = filepath_to_name_to_area_dictionaries_dictionary.keys()
table = get_dictionary_of_names_and_list_of_areas_in_order(filepath_to_name_to_area_dictionaries_dictionary, master_list_of_names, list_of_filenames)
make_csv_of_name_area(csv_output_file, table, master_list_of_names, list_of_filenames)
print "done"
| true |
5acc69a30e4f357bcd172d1a83ad05aadd89d753 | Python | Pratiknarola/PyTricks | /objgetnamedattribute.py | UTF-8 | 150 | 3.609375 | 4 | [
"MIT"
] | permissive | #! /usr/bin/env python3
""" Return the value of the named attribute of an object """
class obj():
attr = 1
foo = "attr"
print(getattr(obj, foo))
| true |
204c3eb660d341b6cf228857a6bab56ec0b4f8c6 | Python | Stanford-PERTS/yosemite | /unit_testing/test_example.py | UTF-8 | 1,637 | 3.46875 | 3 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | """Provide example code for writing unit tests."""
import unittest
@unittest.skip("Remove this line in test.example.py to see examples.")
class ExampleTest(unittest.TestCase):
"""Collection of unit test examples, showcasing features.
Read more about the kinds of assertions here:
https://docs.python.org/2/library/unittest.html#unittest.TestCase
"""
def test_true(self):
"""This should succeed but doesn't."""
print "@@@ Look! Print statements are copied into failure details!"
my_var = 2
self.assertEqual(1, my_var,
"Handy debugging message: {}".format(my_var))
@unittest.skip("demonstrating skipping")
def test_skip_me_1(self):
"""A test to skip."""
self.fail("shouldn't happen")
@unittest.skipIf(1 > 0, "this is a reason why we skipped this test")
def test_skip_me_2(self):
"""Another test to skip."""
self.fail("shouldn't happen")
@unittest.skipUnless(0 > 1, "this is a reason why we skipped this test")
def test_skip_me_3(self):
"""Another test to skip."""
self.fail("shouldn't happen")
@unittest.expectedFailure
def test_fail(self):
"""This test is designed to fail."""
self.fail("this is supposed to fail")
@unittest.expectedFailure
def test_succeed(self):
"""This test is expected to fail but doesn't."""
# Note that returning False doesn't make the test fail.
return False
def test_raise_exception(self):
"""This test raises an exception."""
raise Exception("This is the exception message.")
| true |
f5646458f601449714d975b39922e55319ee6a03 | Python | GoncaloKLopes/rnndissect | /model/bisarnn.py | UTF-8 | 2,948 | 2.5625 | 3 | [] | no_license | import torch
import torch.nn as nn
class BinarySARNN(nn.Module):
def __init__(self, config):
super(BinarySARNN, self).__init__()
self.hidden_dim = config.d_hidden
self.vocab_size = config.vocab_size
self.embed_dim = config.d_embed
self.batch_size = config.batch_size
self.num_labels = 2
self.num_layers = config.n_layers
self.dropout = config.dropout
self.bidir = config.bidir
self.embed = nn.Embedding(self.vocab_size, self.embed_dim)
self.arch = config.arch
if config.arch == "RNN":
if config.nonlin not in ("tanh", "relu"):
raise ValueError("Invalid activation function", config.nonlin,
". Expected \"tanh\" or \"relu\".")
else:
self.nonlin = config.nonlin
self.rnn = nn.RNN(input_size=self.embed_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
dropout=self.dropout,
nonlinearity=self.nonlin,
bidirectional=self.bidir)
elif config.arch == "LSTM":
self.rnn = nn.LSTM(input_size=self.embed_dim,
hidden_size=self.hidden_dim,
num_layers=self.num_layers,
dropout=self.dropout,
bidirectional=self.bidir)
else:
raise ValueError("Invalid architecture string" + config.arch)
self.hidden_to_label = nn.Linear(self.hidden_dim * (1 + self.bidir), self.num_labels)
def forward(self, batch, text_lengths):
embeddings = self.embed(batch)
packed_embeddings = nn.utils.rnn.pack_padded_sequence(embeddings, text_lengths)
if self.arch == "RNN":
packed_rnn_out, hidden = self.rnn(packed_embeddings)
elif self.arch == "LSTM":
packed_rnn_out, (hidden, cell) = self.rnn(packed_embeddings)
if self.bidir:
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1).squeeze(0)
else:
if self.num_layers > 1:
hidden = hidden[-1]
else:
hidden = hidden.squeeze(0)
return self.hidden_to_label(hidden)
def forward(self, sentence):
embeddings = self.embed(sentence)
if self.arch == "RNN":
packed_rnn_out, hidden = self.rnn(embeddings)
elif self.arch == "LSTM":
packed_rnn_out, (hidden, cell) = self.rnn(embeddings)
if self.bidir:
hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1).squeeze(0)
else:
if self.num_layers > 1:
hidden = hidden[-1]
else:
hidden = hidden.squeeze(0)
return self.hidden_to_label(hidden)
| true |
56f6de42acc68d0c5cfc8a1e99c30528c62c4cf4 | Python | royerguerrero/hackmer | /products/templatetags/product_extras.py | UTF-8 | 439 | 2.625 | 3 | [] | no_license | """Product Tags Extras"""
# Django
from django import template
register = template.Library()
@register.filter(name='format_to_cop')
def format_to_cop(value):
return '$ {:,.0f} COP'.format(value)
@register.filter(name='get_main_picture')
def get_main_picture(obj):
return obj.get_main_picture()
@register.filter(name='get_discount_price')
def get_discount_price(price, discount):
return price - (price * discount) / 100
| true |
bfa4eb651f7920a94c9c785e8d5745a704c91b9c | Python | HARI-VELIVELA/leetcode-python | /66.py | UTF-8 | 608 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[27]:
"""Given a non-empty array of digits representing a non-negative integer, plus one to the integer.
The digits are stored such that the most significant digit is at the head of the list, and each element in the array contain a single digit.
You may assume the integer does not contain any leading zero, except the number 0 itself."""
class Solution(object):
def plusOne(self, digits):
digits = [str(i) for i in digits]
digits = int("".join(digits))
digits = digits+1
return [int(i) for i in str(digits)]
# In[ ]:
| true |
7b1212bc6c38cb14048dcd2a9abb6101a68936c0 | Python | VitamintK/AlgorithmProblems | /google-code-jam/2018/round1a/B_bit_party.py | UTF-8 | 1,058 | 3.390625 | 3 | [] | no_license | #could this just be a binary search problem?
#seems easy for a codejam rnd1b...
#but i don't see anything wrong with this approach
import math
class Cashier:
def __init__(self, m, s, p):
self.m = m
self.s = s
self.p = p
cashiers = []
def is_possible(time):
#print(time)
bits = []
for cashier in cashiers:
bits.append(max(0, min((time-cashier.p)//cashier.s, cashier.m)))
bits.sort(reverse=True)
tot = sum(bits[:R])
return tot >= B
T = int(input())
for t in range(T):
R, B, C = map(int, input().split())
cashiers = []
for c in range(C):
cashiers.append(Cashier(*[int(x) for x in input().split()]))
maxint = 1000000001
hi = 1000*maxint + 1000*maxint*maxint + 1001 #for good measure
lo = 0
while(hi>lo):
#print(lo, hi)
mid = (hi+lo)//2
if is_possible(mid):
#print(mid, "worked")
hi = mid
else:
#print(mid, "didn't work")
lo = mid+1
print("Case #{}: {}".format(t+1, hi))
| true |
4c29b73963df28011e6a3a925051ecafdd4a1878 | Python | math4youbyusgroupillinois/Python-Integration | /apitemplateio_lib.py | UTF-8 | 1,911 | 2.640625 | 3 | [
"MIT"
] | permissive | import requests, json
class APITemplateIO:
def __init__(self, api_key):
self.api_key = api_key
def download(self, download_url, save_to):
with open(save_to, 'wb') as output:
response_get = requests.get( download_url, stream=True)
for chunck in response_get.iter_content():
output.write(chunck)
def create_pdf(self, template_id, data, pdf_file_path):
return self.create(template_id, data, pdf_file_path, None)
def create_image(self, template_id, data, jpeg_file_path, png_file_path):
return self.create(template_id, data, jpeg_file_path, png_file_path)
def create(self, template_id, data, save_to_1, save_to_2):
response = requests.post(
F"https://api.apitemplate.io/v1/create?template_id={template_id}",
headers = {"X-API-KEY": F"{self.api_key}"},
json = data
)
resp_json = json.loads(response.content)
if resp_json['status'] == 'success':
self.download(resp_json['download_url'], save_to_1)
if 'download_url_png' in resp_json and save_to_2 is not None:
self.download(resp_json['download_url_png'], save_to_2)
return True
return False
def get_account_information(self):
response = requests.get(
F"https://api.apitemplate.io/v1/account-information",
headers = {"X-API-KEY": F"{self.api_key}"}
)
resp_json = json.loads(response.content)
if resp_json['status'] == 'success':
return resp_json
return None
def list_templates(self):
response = requests.get(
F"https://api.apitemplate.io/v1/list-templates",
headers = {"X-API-KEY": F"{self.api_key}"}
)
print(response.content)
resp_json = json.loads(response.content)
return resp_json
| true |
43b40318cea159c04b5756b1ebda010c54e26d2f | Python | youwi/ApiTestPlatform | /test/tmp2.py | UTF-8 | 865 | 2.84375 | 3 | [] | no_license | import datetime
from Common.utils.DataMaker import DataMaker
t = datetime.datetime.now()
t.isoformat()
print(t.isoformat())
print(t.isoweekday())
print(t.timestamp())
TPL_TIME_ISO = "%Y-%m-%dT%H:%M:%S%z"
print((datetime.date.today() + datetime.timedelta(days=0)).strftime(TPL_TIME_ISO))
print((datetime.datetime.now()).strftime(TPL_TIME_ISO))
# print(DataMaker.number())
print(t.utcnow())
from datetime import datetime, tzinfo, timedelta
class simple_utc(tzinfo):
def tzname(self, **kwargs):
return "UTC"
def utcoffset(self, dt):
return timedelta(0)
print(datetime.utcnow().replace(tzinfo=simple_utc()).isoformat())
# '2014-05-16T22:51:53.015001+00:00'
print(DataMaker.number(1, 2))
print(DataMaker.email())
for i in range(1, 100):
print(DataMaker.telephone())
for i in range(1, 100):
print(DataMaker.chinese_name())
| true |
b5f2923eaadb96f8c32b69112555979173136347 | Python | LuFernandez/PASA | /TP1/codigo/eigenvals.py | UTF-8 | 1,036 | 2.875 | 3 | [] | no_license | import channel_simulator
import numpy as np
import scipy
from matplotlib import pyplot as plt
import math
import pandas as pd
def autocorr(x):
r = np.correlate(x, x, mode='full') / len(x)
return r[len(r) // 2:]
sims = 5000
N = 4
samples_per_bit = 16
n_bits = math.ceil(N/samples_per_bit)
mus_max = np.zeros(sims)
for sim in range(sims):
if not sim % 50:
print("simulacion ", sim+1)
x = channel_simulator.generate_random_sequence(n_bits)
u = channel_simulator.tx_channel(x)
r = autocorr(u[:N])
R = scipy.linalg.toeplitz(r)
eigvals = np.linalg.eigvals(R)
# mus_max[sim] = max(eigvals)
mus_max[sim] = 2 / max(eigvals)
mus_max.sort()
plt.scatter(range(sims), mus_max)
plt.grid(which='both')
plt.xlabel('Número de simulación')
plt.ylabel('$\mu$ máximo')
plt.show()
plt.hist(x=mus_max, density=True, rwidth=0.95)
plt.xlabel('$\mu$ máximo')
plt.ylabel('Frecuencia (normalizado)')
plt.grid(which='both')
plt.show()
df = pd.DataFrame(
{
'mu': mus_max
}
)
df.to_csv(path_or_buf='eigvals_alpha=1e-3.csv', index=False) | true |
bba0be5dc748b67fecd2bcbe55965dfc07c866fa | Python | dsimpson1980/project_euler | /problem20.py | UTF-8 | 397 | 3.890625 | 4 | [] | no_license | """Factorial digit sum
Problem 20
n! means n x (n - 1) x ... x 3 x 2 x 1
For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
Brute force
"""
prod = 1
for n in range(1, 101):
prod *= n
total = 0
for c in str(prod):
total += int(c)
print 'Total = %s' % total
| true |
48b1efe210f406749dcbcc0dcb7757b72e322d17 | Python | Fama18/Programmation_Python | /Nombre_Secret.py | UTF-8 | 426 | 4.375 | 4 | [] | no_license |
def nombre_secret() :
a = int(input("Donner le nombre secret : "))
b = int(input("Donner le nombre déviné par le second utilisateur : "))
i = 1
while a != b :
if b > a :
print("Trop grand")
else :
print("Trop petit")
i += 1
b = int(input("Donner un autre nombre : "))
print("vous avez trouvé le nombre aprés", i, " tentatives")
nombre_secret() | true |
742a35fbcec57c68d928a36577d6c50bdf66b23f | Python | luxiaolei930/python3_book | /第六章/6.3 文本处理和分析.py | UTF-8 | 2,376 | 3.25 | 3 | [] | no_license | # 导入json库
import json
import nltk
from nltk import FreqDist
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'w+')
with open("./reviews.json", encoding="utf8") as f:
data = json.load(f)
# 获取所有评论内容
reviews = [i["review"] for i in data]
########################### 去除停用词与标点符号 ##############################
# 打开英文停用词表en.json文件
with open("./en.json", encoding="utf-8") as f:
stopwords = json.load(f)
# 英文标点符号
puncation = [".", ",", "?", "'", "/", "\\", "\"", ">", "<", "=", "-", "(", ")", "*", "&", "^", "#", "@", "!", " ``"]
stopwords.extend(puncation)
# 利用sent_tokenize()进行分句
result = []
for review in reviews:
review_docs = nltk.sent_tokenize(review)
# 存放去除停用词后的句子
de_sw_result = []
for review_doc in review_docs:
review_tokens = [word.lower() for word in nltk.word_tokenize(review_doc) if word.lower() not in stopwords]
# 去除停用词
de_sw_result.append(review_tokens)
result.append(de_sw_result)
########################### 词频统计 ##############################
# flatten()将多维列表转为一维列表
def flatten(a):
if not isinstance(a, (list, )):
return [a]
else:
b = []
for item in a:
b += flatten(item)
return b
flatten_result = flatten(result)
text=nltk.text.Text(flatten_result)
fd = FreqDist(text)
fd.plot(20)
########################### 绘制词云图 ##############################
# 导入wordcloud库
import wordcloud
# 初始化WordCloud对象
w = wordcloud.WordCloud()
# 将列表转化为字符串
text_str = " ".join(flatten_result)
# 向对象w中加载text_str
w.generate(text_str)
# 导出png文件
w.to_file("wc.png")
print("已导出词云wc.png")
########################### 词性标注 ##############################
print(nltk.pos_tag(flatten_result))
########################### 关键字检索 ##############################
print(text.concordance("prison", width=100))
########################### 统计每个评论的形符类符比 ##############################
def cal_ttr(words):
type = set(words)
ttr = len(type)/len(words)*100
return ttr
ttr_list = []
for review in reviews:
word_token = nltk.word_tokenize(review)
ttr_list.append(cal_ttr(word_token))
print(ttr_list) | true |
129ae1b6b873814db30d2ce6895b658ef26936d9 | Python | mhq1065/respose | /homework_8_py/11.2.py | UTF-8 | 1,775 | 3.296875 | 3 | [] | no_license | words = ['traceback','define','identifier','valid','invalid','syntax','indent','unexpected','indices']
expla = ['追溯','定义','标识符','有效的','无效的','语法','缩进','以外的','下标index的复数形式']
def bubleSort(words,expla):#冒泡排序
for i in range(len(words)-1):
for j in range(len(words)-1-i):
if words[j]>words[j+1]:
words[j],words[j+1]=words[j+1],words[j]
expla[j],expla[j+1]=expla[j+1],expla[j]
def insertSort(words,expla):#直插
for i in range(1,len(words)):
temp = words[i]
j = i-1
while j>=0 and temp<words[j]:
words[j+1]=words[j]
j -= 1
words[j+1]=temp
#简排不写了
def found(L,key):#二分法查找
low = 0
heigh = len(L)-1
while low<=heigh:
mid = int((low+heigh)/2)
if key<L[mid]:
heigh = mid-1
elif key ==L[mid]:
return mid
else:
low = mid+1
return None
def add(word,mean,L,M):#插入
for i in range(len(L)):
if L[i]>=word:
L.insert(i,word)
M.insert(i,mean)
return
L.insert(len(L),word)
M.insert(len(L),mean)
def add2(word,mean,L,M):#插入 课本解法
L.append(word)
M.append(mean)
j = len(L)-1
while j>=1 and L[j-1]>L[j]:
L[j],L[j-1] = L[j-1],L[j]
M[j],M[j-1] = M[j-1],M[j]
j -=1
bubleSort(words,expla)
print(words)
print(expla)
while 1:
word = str(input("请输入单词"))
n=found(words,word)
if n!=None:
print("该单词的意思是",expla[n])
else:
print("没有该单词")
mean=input("该单词的已加入字典,请输入它的词义 ")
add(word,mean,words,expla)
| true |
5cf1840967d86cb03b217d0663bb09b8c43684fb | Python | AidanDai/graduation-design | /paper/reference/run.py | UTF-8 | 498 | 2.5625 | 3 | [
"MIT"
] | permissive | import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
ENP = 32
CWP = 37
CPP = 38
ENG = 40
GPIO.setup(ENP, GPIO.OUT)
GPIO.setup(ENG, GPIO.OUT)
GPIO.setup(CWP, GPIO.OUT)
GPIO.setup(CPP, GPIO.OUT)
def forward(delay):
setStep(1, 0, 1, 0)
time.sleep(delay)
setStep(1, 1, 1, 0)
time.sleep(delay)
def setStep(w1, w2, w3, w4):
GPIO.output(ENP, w1)
GPIO.output(CWP, w2)
GPIO.output(CPP, w3)
GPIO.output(ENG, w4)
while True:
forward(0.5)
GPIO.cleanup()
| true |
bb955fe63b42616d4c21ed609bd7f76ff8957c9a | Python | jinkstudy/python-study | /cWebConn/3_beautifulsoup_class/Ex02_attribute.py | UTF-8 | 608 | 3.59375 | 4 | [] | no_license | from bs4 import BeautifulSoup
html = """
<html>
<body>
<ul>
<li><a href='http://www.naver.com'>네이브</a></li>
<li><a href='http://www.daum.net'>다아음</a></li>
</ul>
</body>
</html>
"""
#리스트의 내용과 해당 경로 추출하기
#attr['속성명'] : 해당 속성값을 얻어주는 함수
'''[출력결과]
네이브>>>http://www.naver.com
다아음>>>http://www.daum.net'''
soup = BeautifulSoup(html,'html.parser')
h1 = soup.find_all('a')
for h in h1:
print(h)
print(h.text,'>>>',h.attrs['href'])
| true |
5a54c98dafa3cf877bc5149417a50ce9ca609715 | Python | rhdp0/e-Gibbs | /e-Gibbs.py | UTF-8 | 6,556 | 2.78125 | 3 | [] | no_license | # e-Gibbs v.1.0
# Desenvolvedor: Rafael Henrique Dias Pereira
import PySimpleGUI as sg
sg.theme('LightBrown13')
class TelaPython:
def __init__(self):
#Layout
layout = [
[sg.Text('Arquivo',size=(8,0)),sg.Input(size=(30,0),key='nome'),sg.FileBrowse()],
[
sg.Text('Número de Moléculas',size=(8,0)),
sg.Input(size=(7,0),key='nmol'),
sg.InputOptionMenu((
'298', '200', '210', '220', '230', '240', '250', '260', '270', '280', '290', '300', '310', '320', '330', '340', '350', '360', '370', '380', '390', '400'),key='Temperatura'),
sg.Text('°K')
],
[sg.Text('Molécula',size=(8,0)),sg.Input(size=(30,0),key='mol'),sg.InputOptionMenu(('Reagente', 'Produto'),key='P-R')],
[sg.Button('Inserir'),sg.Button('Calcular'),sg.Button('Reiniciar')],
[sg.Text('Molécula',size=(10,0)),sg.Text('Entalpia em kJ/mol',size=(17,0)),sg.Text('Entropia em kJ/ K/mol',size=(20,0))],
[sg.Output(size=(100,20),key = '_output_')]
]
#Janela
self.janela = sg.Window("e-Gibbs v.1.0").layout(layout)
def iniciar(self):
produto = [] #Receberá valores do produto
reagente = [] #Receberá valores do reagente
while True:
self.button, self.values = self.janela.Read()
mol = self.values['mol'].upper()
nmol = int(self.values['nmol'])
temperatura = self.values['Temperatura']
tempList = ['298', '200', '210', '220', '230', '240', '250', '260', '270', '280', '290', '300', '310', '320', '330', '340', '350', '360', '370', '380', '390', '400']
temp = tempValue(tempList, temperatura)
result = openArchive(self.values['nome'], temp)
deltaH = float(result[1]) * 4.18 #ΔH * 4.18
deltaS = (float(result[4])/1000) * 4.18 #(ΔS/1000) * 4.18
#Reinicia os valores para novos cálculos
if self.button == 'Reiniciar':
produto = []
reagente = []
self.janela['_output_'].update('')
formProdutoEnta = []
formReagenteEnta = []
formProdutoEntr = []
formReagenteEntr = []
#Ação ao clicar no botão Calcular
if self.button == 'Calcular':
if len(produto) > 1:
for i in range(len(produto)-1):
formProdutoEnta = (produto[i][1]*produto[i][2]) + produto[i+1][1]*produto[i+1][2] #n*KCAL/MOL + KCAL/MOL entalpia
formProdutoEntr = (produto[i][1]*produto[i][3]) + produto[i+1][1]*produto[i+1][3] #n*CAL/K/MOL + CAL/K/MOL entropia
else:
formProdutoEnta = produto[0][1]*produto[0][2] #n*KCAL/MOL
formProdutoEntr = produto[0][1]*produto[0][3] #n*CAL/K/MOL
if len(reagente) > 1:
for i in range(len(reagente)-1):
formReagenteEnta = (reagente[i][1]*reagente[i][2]) + reagente[i+1][1]*reagente[i+1][2] #n*KCAL/MOL + KCAL/MOL entalpia
formReagenteEntr = (reagente[i][1]*reagente[i][3]) + reagente[i+1][1]*reagente[i+1][3] #n*CAL/K/MOL + CAL/K/MOL entropia
else:
formReagenteEnta = reagente[0][1]*reagente[0][2] #n*KCAL/MOL
formReagenteEntr = reagente[0][1]*reagente[0][3] #n*CAL/K/MOL
entalpia = float(formProdutoEnta) - float(formReagenteEnta) #(Soma dos produtos - soma dos reagentes)
entropia = float(formProdutoEntr) - float(formReagenteEntr) #(Soma dos produtos - soma dos reagentes)
deltaG = entalpia - (int(temperatura)*entropia) #Temperatura * entropia
termoEntropico = -int(temperatura)*(float('%.5f'%entropia)) # -(Temperatura) * entropia
print('')
print ('ΔrH: ', '%.5f'%entalpia,'kJ/mol\n')
print ('ΔrS: ', '%.5f'%entropia,'kJ/mol', ' '*5, 'Termo Entrópico: ','%.5f'%termoEntropico,'kJ/mol\n')
print ('ΔG: ', '%.5f'%deltaG,'kJ/mol')
print("__"*50)
#Evento ao clicar no botão Inserir
if self.button == 'Inserir':
if self.values['P-R'] == 'Produto':
produto.append([mol, nmol, deltaH, deltaS])
print('')
print(mol, ' '*(21 - len(mol)), '%.5f'%deltaH, ' '*28, '%.5f'%deltaS, ' '*30, 'Produto')
print('__'*50)
else:
reagente.append([mol, nmol, deltaH, deltaS, 'Reagente'])
print('')
print(mol, ' '*(21 - len(mol)), '%.5f'%deltaH, ' '*28, '%.5f'%deltaS, ' '*30, 'Reagente')
print('__'*50)
def openArchive(string, temp):
arquivo = open(string, 'r') #Abre o arquivo .out
text = [] #Vai armazenar todo o conteúdo do arquivo
#Adiciona o conteúdo do arquivo na variável text
for line in arquivo:
text.append(line)
#Descobrir quantos átomos tem a molécula
listAtm = [] #Vai conter a linha exata onde é informado o último átomo
stringAtm = ''
#Identifica a linha exata onde é informado o último átomo
for i in text[len(text) - 13]:
if i in '-1234567890.':
stringAtm += i
else:
listAtm.append(stringAtm)
stringAtm = ''
#Captura a str que diz o número de átomos e transforma em int
cont = 0
nAtm = ''
while cont < 7 and nAtm == '':
if listAtm[cont] == '':
cont += 1
else:
nAtm = int(listAtm[cont])
# Pega o ΔH e o ΔS da molécula a 298°
listAux = []
string = ''
for i in text[len(text) - nAtm - 159 + (temp*6)]:
if i in '-1234567890.':
string += i
else:
listAux.append(string)
string =''
#Elimina as str vazias
listMaster = []
for i in listAux:
if i != '':
listMaster.append(i)
arquivo.close()
return listMaster
def tempValue(lista, valor):
for i in range(len(lista)):
if lista[i] == valor:
return i
tela = TelaPython()
tela.iniciar()
| true |
95d9c2def6e77a779af2cb1f2cef8e41362f6e53 | Python | alainlou/leetcode | /p1032.py | UTF-8 | 1,005 | 3.390625 | 3 | [] | no_license | from DS.Trie import TrieNode
class StreamChecker:
def __init__(self, words: List[str]):
self.root = TrieNode()
self.cand = []
self.maxlen = float('-inf')
for w in words:
self.insert(w)
self.maxlen = max(self.maxlen, len(w))
def insert(self, s):
node = self.root
for c in s[::-1]:
index = ord(c)-ord('a')
if node.children[index] is None:
node.children[index] = TrieNode()
node = node.children[index]
node.end = True
def query(self, letter: str) -> bool:
self.cand.insert(0, letter)
if len(self.cand) > self.maxlen:
self.cand.pop()
curr = self.root
for c in self.cand:
if curr.children[ord(c)-ord('a')] is not None:
curr = curr.children[ord(c)-ord('a')]
if curr.end:
return True
else:
return False
return False
| true |
c086efab6358e39a159627c80eb72ed2569ba372 | Python | aaaaaachen/PY1901_0114WORK | /days04/demo04.py | UTF-8 | 5,027 | 3.21875 | 3 | [] | no_license |
import os
import sys
import random
import time
while True:
print("PYTHON1901电商平台用户登录")
print("~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~")
print(" 1.新用户注册")
print(" 2.用户登录")
print(" 3.退出系统")
print("~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~")
print("(温馨提示)请输入:")
a=int(input("请输入选项:"))
if a==1:
os.system("cls")
name=input("请输入您的姓名:")
passwd=input("请输入您的密码:")
print("注册成功")
continue
elif a==2:
print("系统正在升级中。。。。")
name="wu"
passwd="123"
a=input("请输入姓名:")
b=input("请输入密码:")
if a==name and b==passwd:
print("登陆成功")
# time.sleep(3)
os.system("cls")
print("点击任意键继续....")
#商城界面展示
while True:
os.system("cls")
print("~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~")
print(" 1.购物商城")
print(" 2.休闲小游戏")
print(" 3.退出系统")
print("~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~")
print(" 点击任意键继续:")
print("\n\n\n\n")
choice=input("请输入选项(按R键返回主菜单):")
if choice=="1":
os.system("cls")
print("~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~")
print("商品编号\t商品名称\t商品单价\t商品库存\t商品描述")
print("1\t\t苹果\t\t5.00\t\t80\t\t又大又甜")
print("2\t\t橘子\t\t4.00\t\t80\t\t不甜不要钱")
print("3\t\t芒果\t\t8.00\t\t80\t\t味美多汁")
print("4\t\t榴莲\t\t43.00\t\t80\t\t跟屎一样香")
print("~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~")
print(" 点击任意键继续:")
print("\n\n\n\n")
arr=[[1,"苹果",5.00,80,"又大又甜"],\
[2,"橘子",4.00,80,"又大又甜"],\
[3,"芒果",8.00,80,"又大又甜"],\
[4,"榴莲",43.00,80,"又大又甜"]]
id=float(input("\t请选择需要购买的商品:"))
if id>len(arr):
print("\t無商品")
else:
num=float(input("\t请选择购买商品的数量:"))
sum=arr[int(id)-1][2]*num
print("\t您需要支付",sum,"元")
pay=float(input("\t实付金额:"))
print("\n")
input("\t点击任意键结算....")
if pay<sum:
print("\terror")
else:
os.system("cls")
print("~*~*~*~*~*~**~*~*~*~*~*~~*~*~*~*~**~~*")
print("\t商品名称:",arr[int(id)-1][1])
print("\t商品单价:",arr[int(id)-1][2])
print("\t商品个数:",num)
print("\t应付金额:",sum)
print("\t实付金额:",pay)
print("\t找零: ",pay-sum)
print("~*~*~*~*~*~**~*~*~*~*~*~~*~*~*~*~**~~*")
continue
elif choice=="2":
print("****************************************")
print(" 1.石头剪刀布")
print(" 2.老虎棒子鸡")
print(" 3.猜数字")
print("****************************************")
choice=int(input(" 请输入选项:"))
if choice==1:
print("wait")
elif choice==2:
print("wait")
elif choice==3:
print("wait")
# else:
# break
elif choice=="3":
print("客官请慢走....")
sys.exit(1)
elif choice=="R":
print("1s后退出...")
sys.exit(1)
else:
continue
# break
else:
print("用户或密码错误,请重新输入")
continue
elif a==3:
print("系统正在升级中.....")
break
else:
print("请重新输入")
continue | true |
a5de5e09480f2774f2bd202945ec424ea0dc978a | Python | josue0175/Python_Programming | /positional_data.py | UTF-8 | 266 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
import sys
input_file = open(sys.argv[1], "r")
listaa = []
listbb = []
for line in input_file:
lista, listb = line.split()
print lista, listb
listaa.append(float(lista))
listbb.append(float(listb))
print listaa, listbb
| true |
252b69bfbd214df0de2c66b771b07e542f132676 | Python | andyweruan/files-here | /cssi/python1/dictionary.py | UTF-8 | 1,447 | 3.484375 | 3 | [] | no_license | my_juice = ['green', 'rubby', 'carrot']
whole_food_price = [9.99, 15.0, 5.0]
print '{} juice costs ${}'.format(my_juice[0], whole_food_price[0])
juice_dic = {}
juice_dic = {'green': 9.99, 'rubby': 15.0, 'carrot': 5.0}
juice_dic['green'] = 2.99
print juice_dic
my_juice.append('orange') #making new thing into the dictionary
juice_dic['orange'] = 1.99
print juice_dic
is_in = 'apple' in my_juice
'apple' in juice_dic #do the same for string, list, and dictionary
1.99 in juice_dic.values() #can check if key or values exist in your dictionary
print '{} juice costs ${}'.format('orange', juice_dic['orange'])
for juice in juice_dic:
print '{} juice costs ${}'.format(juice, juice_dic[juice])
city_info = {'new_york' : { 'mayor' : "Bill DeBlasio",
'population' : 8337000,
'website' : "http://www.nyc.gov"
},
'los_angeles' : { 'mayor' : "Eric Garcetti",
'population' : 3884307,
'website' : "http://www.lacity.org"
},
'miami' : { 'mayor' : "Tomás Regalado",
'population' : 419777,
'website' : "http://www.miamigov.com"
},
'chicago' : { 'mayor' : "Rahm Emanuel",
'population' : 2695598,
'website' : "http://www.cityofchicago.org/"
}
}
def print_name():
print 'Andy Ruan'
print_name()
def print_name(name):
print 'Hello, ' + name + ' Are you sleepy?'
print 'Hello, %s . Are you sleey?' % (name)
print 'Hello, {}.Are you sleepy?'.formant(name)
sentence = print_name('Andy')
| true |
919a40a3c09186961ce8e698fe10fbe6da47fea0 | Python | Toyib32/Python-Projects-Protek- | /Project2_Chapter8_s.py | UTF-8 | 700 | 3.640625 | 4 | [] | no_license | def dataStat(x):
a = sum(x) / len(x)
b = max(x)
c = min(x)
dataHasil = [a, b, c]
return dataHasil
while True:
try:
n = int(input("Silahkan masukkan banyak data yang Anda inginkan (***data = angka***) :"))
break
except ValueError:
print ("incorrect input! silahkan masukkan kembali data sesuai petunjuk")
continue
dataHasil = []
i = 0
while (i < n):
try :
bilangan = int(input("silahkan input data berupa bilangan bulat :"))
dataHasil.append(bilangan)
i +=1
except ValueError :
print ("incorrect input! silahkan masukkan kembali data sesuai petunjuk")
cetak = dataStat(dataHasil)
print(cetak)
| true |
bebd24dcdc36dbd20afc595aea2247c9913b5c6f | Python | feer56/Kitsune1 | /kitsune/search/tests/test_json.py | UTF-8 | 2,253 | 2.515625 | 3 | [] | permissive | from nose.tools import eq_
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.tests import LocalizingClient
class JSONTest(ElasticTestCase):
client_class = LocalizingClient
def test_json_format(self):
"""JSON without callback should return application/json"""
response = self.client.get(reverse('search'), {
'q': 'bookmarks',
'format': 'json',
})
eq_(response['Content-Type'], 'application/json')
def test_json_callback_validation(self):
"""Various json callbacks -- validation"""
q = 'bookmarks'
format = 'json'
callbacks = (
('callback', 200),
('validCallback', 200),
('obj.method', 200),
('obj.someMethod', 200),
('arr[1]', 200),
('arr[12]', 200),
("alert('xss');foo", 400),
("eval('nastycode')", 400),
("someFunc()", 400),
('x', 200),
('x123', 200),
('$', 200),
('_func', 200),
('"></script><script>alert(\'xss\')</script>', 400),
('">', 400),
('var x=something;foo', 400),
('var x=', 400),
)
for callback, status in callbacks:
response = self.client.get(reverse('search'), {
'q': q,
'format': format,
'callback': callback,
})
eq_(response['Content-Type'], 'application/x-javascript')
eq_(response.status_code, status,
'callback "{0}": expected {1} got {2}'
.format(callback, status, response.status_code))
def test_json_empty_query(self):
"""Empty query returns JSON format"""
# Test with flags for advanced search or not
a_types = (0, 1, 2)
for a in a_types:
# NOTE: We need to follow redirects here because advanced search
# is at a different URL and gets redirected.
response = self.client.get(reverse('search'), {
'format': 'json', 'a': a,
}, follow=True)
eq_(response['Content-Type'], 'application/json')
| true |
5ae512b103671c6fa06f19c0facfb7b89e51c637 | Python | iwob/pysv | /pysv/utils.py | UTF-8 | 17,230 | 3.25 | 3 | [
"MIT"
] | permissive | import logging
import sys
import argparse
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def index_of_closing_parenthesis(words, start, left_enc ='(', right_enc =')'):
"""Returns index of the closing parenthesis of the parenthesis indicated by start."""
num_opened = 1
for i in range(start + 1, len(words)):
if words[i] == left_enc:
num_opened += 1
elif words[i] == right_enc:
num_opened -= 1
if num_opened == 0:
return i
return -1
def index_of_opening_parenthesis(words, start, left_enc ='(', right_enc =')'):
"""Returns index of the opening parenthesis of the parenthesis indicated by start."""
num_opened = -1
i = start-1
while i >= 0:
if words[i] == left_enc:
num_opened += 1
elif words[i] == right_enc:
num_opened -= 1
if num_opened == 0:
return i
i -= 1
return -1
def index_closest_left(words, start, what):
"""Returns index of the closest specified element to the left of the starting position or -1 if no such element was present."""
i = start - 1
while i >= 0:
if words[i] == what:
return i
i -= 1
return -1
def index_closest_right(words, start, what):
"""Returns index of the closest specified element to the right of the starting position or -1 if no such element was present."""
i = start + 1
while i < len(words):
if words[i] == what:
return i
i += 1
return -1
def parenthesis_enclosure(words, start, left_enc = '(', right_enc = ')'):
"""For a given position in the list finds left and right parenthesis (there is also an option to specify arbitrary symbols) and returns a list containing those parenthesis and all elements between them."""
opened = index_of_opening_parenthesis(words, start, left_enc, right_enc)
if opened == -1:
return []
ended = index_of_closing_parenthesis(words, opened, left_enc, right_enc)
if ended == -1:
return []
return words[opened:ended+1]
def assertify(text):
"""Wraps text in the (assert )."""
return '(assert ' + text + ')'
LANG_PYTHON = 'python'
LANG_SMT2 = 'smt2'
def alternative(seq, lang = LANG_PYTHON):
"""Produces a formula connecting all elements from the provided list with an alternative ('or')."""
if lang == LANG_PYTHON:
return merge_elements_by(seq, ' or ')
elif lang == LANG_SMT2:
return _join_smt2(seq, 'or')
else:
raise Exception("'{0}': Not recognized language!".format(lang))
def conjunction(seq, lang = LANG_PYTHON):
"""Produces a formula connecting all elements from the provided list with a conjunction ('and')."""
if lang == LANG_PYTHON:
return merge_elements_by(seq, ' and ')
elif lang == LANG_SMT2:
return _join_smt2(seq, 'and')
else:
raise Exception("'{0}': Not recognized language!".format(lang))
def conjunct_constrs_smt2(constr_list):
"""Merges constraints list using SMT-LIB 2.0 conjunction operator (and). If the list is empty, 'true' is returned."""
noncomments = [c for c in constr_list if c.lstrip()[0] != ';']
if len(noncomments) == 0:
return 'true'
elif len(noncomments) == 1:
return noncomments[0]
else:
return _join_smt2(constr_list, 'and')
def _join_smt2(seq, conn):
"""Produces a SMT-LIB 2.0 formula containing all elements of the sequence merged by a provided connective."""
if len(seq) == 0:
return ''
elif len(seq) == 1:
return seq[0]
else:
return '({0} {1})'.format(conn, ' '.join(seq))
def merge_elements_by(seq, conn, wrapped_in_par = True):
"""Produces a string containing all elements of the sequence merged by a provided connective."""
if len(seq) == 0:
return ''
elif len(seq) == 1:
return seq[0]
else:
sf = '({0})' if wrapped_in_par else '{0}'
return conn.join(sf.format(el) for el in seq)
def str_to_wlist(s, par_open = '(', par_close = ')'):
"""Converts a string to a list of words, where words are delimited by whitespaces."""
return s.replace(par_open, ' '+par_open+' ').replace(par_close, ' '+par_close+' ').split()
def str_to_dict_parenthesis(s):
"""Converts a string in the format: '((n1 v1)...(nk vk))' to a dictionary {n1:v1, ..., nk:vk}."""
return wlist_to_dict_parenthesis(str_to_wlist(s))
def wlist_to_dict_parenthesis(words):
"""Converts a list of strings in the format: ['(', '(', 'n1', 'v1', ')', ..., '(', 'nk', 'vk', ')', ')'] to a dictionary {n1:v1, ..., nk:vk}."""
res = {}
i = 1
while True:
if words[i] == '(':
res[words[i+1]] = words[i+2]
i += 4
else:
break
return res
class Options(object):
"""Options contains all settings which are used within the framework."""
MODE_NORMAL = 'normal'
MODE_MIN = 'min'
MODE_MAX = 'max'
PYTHON = 'python'
SMT2 = 'smt2'
def __init__(self, opts = None, merge_with_vargs = True):
opts = self._preprocess_opts(opts)
if merge_with_vargs:
opts.extend(sys.argv[1:])
args = self._parse_options(opts)
self.options = vars(args)
if self.silent:
logger.disabled = True
def __str__(self):
return str(self.options)
def _parse_options(self, opts):
parser = argparse.ArgumentParser(description="Synthesize or verify Python programs.", add_help=True)
parser.add_argument("--assertions", type=str, nargs='*', default=[],
help="List of additional assertions to be inserted at the end of the script.")
parser.add_argument("--allow_commutative_constr", type=int, choices=[0, 1], default=False)
parser.add_argument("--assignments_as_lets", type=int, choices=[0, 1], default=False)
parser.add_argument("--lang", type=str, choices=["python", "smt2"], default="python",
help="Language, in which written are precondition, program and postcondition. For languages different than SMT-LIB 2.0 (smt2), conversion to correct semantic-preserving SMT-LIB 2.0 constraints is applied before running the solver. If SMT-LIB 2.0 code is provided as input, then it is assummed that it is in the form ready to be used in the final script (certain other options still may modify this code). (default: python)")
parser.add_argument("--pre", type=str,
help="Precondition of the program.")
parser.add_argument("--program", type=str,
help="Source code of the program.")
parser.add_argument("--post", type=str,
help="Postcondition of the program.")
parser.add_argument("--post_in_cnf", type=int, choices=[0, 1], default=False,
help="If set to 1, postcondition will be transformed to CNF form.")
parser.add_argument("--input_vars", type=str, nargs='*', default=[],
help="List of input variables of the program and their types, in which single element is provided in the form: name:type. (default: [])")
parser.add_argument("--local_vars", type=str, nargs='*', default=[],
help="List of local variables present in the program and their types, in which single element is provided in the form: name:type. (default: [])")
parser.add_argument("--free_vars", type=str, nargs='*', default=[],
help="List of free variables present in the programs, provided as only names. Value of those variables will be determined by solver during verification or synthesis. Free variables should be also properly declared as local variables. (default: [])")
parser.add_argument("--logic", type=str, default="NIA")
parser.add_argument("--loop_unrolling_level", type=int, default=5, help="Number of nested if-expressions loops will be unrolled into.")
parser.add_argument("--name_all_assertions", type=int, choices=[0, 1], default=True)
parser.add_argument("--name_struct_assertions", type=int, choices=[0, 1], default=False)
parser.add_argument("--only_script", action="store_true",
help="SMT-LIB 2.0 script will be produced and printed on the standard output, but solver will not be executed. The script will be printed even if silent mode was set.")
parser.add_argument("--produce_proofs", type=int, choices=[0, 1], default=False,
help="If true, then solver's utility to produce unsatisfiability proofs will be used if decision was unsat. Not all solvers support this functionality. (default: 0)")
parser.add_argument("--produce_unsat_core", type=int, choices=[0, 1], default=False,
help="If true, then solver's utility to produce unsatisfiable core of named top-level variables will be used if decision was unsat. Not all solvers support this functionality. (default: 1)")
parser.add_argument("--produce_assignments", type=int, choices=[0, 1], default=False,
help="If true, then solver's utility to return valuations of named variables will be used. Not all solvers support this functionality. (default: 0)")
parser.add_argument("--seed", type=int,
help="Random seed which will be passed to solver and used within the framework.")
parser.add_argument("--show_comments", type=int, choices=[0, 1], default=False)
parser.add_argument("--silent", type=int, choices=[0, 1], default=False,
help="Silent mode.")
parser.add_argument("--solver", choices=["z3", "cvc4", "mathsat", "other"], type=str, default="z3",
help="Specifies SMT solver to be used. By default, binaries of the selected solver are looked for in the solvers_bin directory, but you can also pass a path to them with --solver_path argument. Apart from that, this parameter sets some solver-specific parameters which will be passed to the selected SMT solver. (default: z3)")
parser.add_argument("--solver_interactive_mode", type=int, choices=[0, 1], default=True,
help="In the interactive mode, application sends a set of constraints as an input to the solver, waits for the sat/unsat decision, and then asks for decision-specific data (e.g. model, unsat-core). In 'normal' mode, a script is sent once and solver's output is read only once, which means that solver is queried for all decision-specific data regardless of its actual response. (default: 1)" )
parser.add_argument("--solver_path", type=str,
help="Path to executable binary of the SMT solver.")
parser.add_argument("--ssa_enabled", type=int, choices=[0, 1], default=True,
help="If true, then before running the solver program will be transformed to the SSA form. SSA form of a program generally is necessary for SMT constraints to be correct. Disable only if you know that your program already is in the SSA form (e.g. it is an SMT-LIB expression returning certain value) to speed up production of the script.")
parser.add_argument("--script_prolog", type=str, default=None,
help="Text to be added to the SMT-LIB 2.0 script before any declaration (but after set-options).")
parser.add_argument("--script_epilog", type=str, default=None,
help="Text to be added to the SMT-LIB 2.0 script after all constraints (but before check-sat).")
parser.add_argument("--solver_timeout", type=int, default=None,
help="Time in miliseconds after which solver will abort search for a solution. In such a case a decision will be 'timeout'.")
parser.add_argument("--save_script_to_file", type=int, default=False,
help="If set to true, then last executed script will be saved to 'script.smt2'.")
parser.add_argument("--test_cases", type=str, default="",
help="Test cases from which will be derived appropriate postcondition.")
parser.add_argument("--output_data", type=str, nargs='*', choices=["decision", "holes_content", "raw", "model", "unsat_core", "assignments", "final_code"], default=["raw"],
help="Determines the way in which solver's output will be printed after computations are finished. Elements are printed according to the provided sequence.")
vs_group = parser.add_mutually_exclusive_group()
vs_group.add_argument("--verify", action="store_true")
vs_group.add_argument("--example", action="store_true")
vs_group.add_argument("--synthesize", action="store_true")
# Subparser for verification.
# subparsers = parser.add_subparsers(title="Module",
# description="Chosen module decides what general task will be realized.")
# parser_ver = subparsers.add_parser('verify',
# parents=[parser],
# help='Module used for verification.')
v_group = parser.add_argument_group('VERIFICATION OPTIONS')
self._add_ver_options(v_group)
# Subparser for synthesis.
# parser_synth = subparsers.add_parser('synthesize',
# parents=[parser],
# help='Module used for synthesis.')
s_group = parser.add_argument_group('SYNTHESIS OPTIONS')
self._add_synth_options(s_group)
args = parser.parse_args(opts, namespace=self)
# Manual checking of some dependencies.
if self.ver_annotate_post and (self.post_in_cnf is None or not self.post_in_cnf):
parser.error("'--ver_annotate_post 1' requires '--post_in_cnf 1'.")
return args
def _add_ver_options(self, group):
group.add_argument("--ver_flat_formula", type=int, choices=[0, 1], default=True)
group.add_argument("--ver_annotate_post", type=int, choices=[0, 1], default=False)
group.add_argument("--ver_mode", choices=["normal", "min", "max"], default="normal")
def _add_synth_options(self, group):
group.add_argument("--synth_substitute_free", type=int, choices=[0, 1], default=True)
group.add_argument("--synth_min_passed_tests", type=int, default=None,
help="Specifies at least how many test cases a solution to the synthesis problem must pass. This option requires the 'max' value for the --synth_mode option.")
group.add_argument("--synth_mode", choices=["normal", "min", "max"], type=str, default="normal",
help="If set to value different than 'normal' then certain measure of correctness will be optimized by the solver. Such a measure may be for example number of passed test cases.")
group.add_argument("--synth_holes", type=str, default="",
help="Definitions of all holes present in the program. Format: 'H0_id,H0_grammarDepth,H0_grammar;H1_id,H1_grammarDepth,H1_grammar'. grammarDepth is optional and may be omitted.")
group.add_argument("--tc_rename_vars_in_assignments", type=bool, default=True,
help="If set to true, then all variables in assertions specified in the 'assertion' parameter will be duplicated and renamed for each test case.")
group.add_argument("--tc_fitness_mode", type=str, choices=["normal", "L1"], default="normal",
help="Specifies, how fitness will be computed. 'normal' means a sum of passed test cases.")
def _preprocess_opts(self, opts):
if opts is None:
return []
elif type(opts) == str:
s = opts.split()
return self._preprocess_opts(s)
elif type(opts) == dict:
opts = self._flatten_dict(opts)
def convert(x):
if type(x) == bool:
return "1" if x else "0"
else:
return str(x)
return [convert(x) for x in opts]
def _flatten_dict(self, opts):
new_opts = []
for k in opts:
new_opts.append(k)
if opts[k] is not None:
new_opts.append(opts[k])
return new_opts
def get(self, key, default=None):
if key not in self.options:
return default
else:
return str(self.options[key])
def get_float(self, key, default = None):
if key not in self.options:
return default
else:
return float(self.options[key])
def get_int(self, key, default = None):
if key not in self.options:
return default
else:
return int(self.options[key])
def get_bool(self, key, default = None):
if key not in self.options:
return default
else:
return str(self.options[key]).lower() == 'true' | true |
405b91357e1e3dd18572449dbd0ee94535179d25 | Python | blueshed/blueshed-py | /src/blueshed/utils/generate_password.py | UTF-8 | 1,062 | 2.640625 | 3 | [
"MIT"
] | permissive | '''
Password generation of four commonly used words from a word list
seperated by hyphens.
Created on Apr 7, 2013
@author: peterb
'''
from pkg_resources import resource_filename # @UnresolvedImport
import itertools
import random
from blueshed.utils.email_password import EmailPassword
class GeneratePasswordMixin(object):
def __init__(self):
self._wordlist_ = None
@property
def wordlist(self):
if self._wordlist_ is None:
nbits = 11
filename = resource_filename("blueshed.utils","wordlist.txt")
self._wordlist_ = [line.split()[1] for line in itertools.islice(open(filename), 2**nbits)]
return self._wordlist_
def generate_password(self, nwords=4):
choice = random.SystemRandom().choice
return '-'.join(choice(self.wordlist) for i in range(nwords))
def _email_password_(self, tmpl_path, email, password, **kwargs):
email = EmailPassword(tmpl_path, email, password=password, **kwargs)
email.send() | true |
e0a255334f2ae9611cdf418e4234cb351888db34 | Python | yangxueruivs/LeetCode | /RemoveNthNodeFromEndofList.py | UTF-8 | 710 | 3.359375 | 3 | [] | no_license | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
#Double pointer
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
if not head.next:
return []
start, end, temp, flag = head, head, head, True
for i in range(n-1):
end = end.next
while end.next:
flag = False
temp = start
start = start.next
end = end.next
if flag:
return head.next
temp.next = start.next
return head | true |
7031962bb174b76bb446f535601d8804a97c4f9f | Python | yanzv/CodeEval | /ConvertBinary.py | UTF-8 | 679 | 4.03125 | 4 | [] | no_license | #Decimal to Binary from challenge
#You are given a decimal (base 10) number, print its binary representation.
#Yan Zverev
#2015
def convertToBinary(number):
counter = 2
binaryNumber = ''
while counter*2 <=number:
counter = counter *2
while counter > 0:
if(number-counter) >= 0:
binaryNumber+="1"
number = number - counter
counter = int(counter/2)
else:
binaryNumber+="0"
counter = int(counter/2)
return binaryNumber
binString = convertToBinary(20)
print(binString,":")
if binString[2] == binString[3]:
print("true")
else:
print("false")
| true |
35f9990a8af041a0adedb98858184e7a9fae6599 | Python | clementselvaprasath/learning | /python/coding/codewars/level4/Sudoku.py | UTF-8 | 2,260 | 3.703125 | 4 | [] | no_license | import math
def validate(numbers, length):
# print numbers
expected = "".join(str(x) for x in range(1, length + 1))
numbers.sort()
string = "".join(str(x) for x in numbers)
return expected == string
def perform(sudoku):
data = sudoku.data
length = len(data)
print data
for x in range(0, length):
if not validate(data[x][:], length):
return False
for y in range(0, len(data)):
column = [data[x][y] for x in range(0, len(data))]
if not validate(column, length):
return False
rows = data[0:3]
grid = [r[0:3] for r in rows]
# print grid
n = int(math.sqrt(len(data)))
for x in range(0, len(data), n):
rows = data[x:x+n]
for y in range(0, len(data), n):
grid = [r[y:y + n] for r in rows]
# print "x: " + str(x) + ", y: " + str(y)
# print "subgrid: " + str(grid)
flattened = [j for i in grid for j in i]
# print flattened
if not validate(flattened, length):
return False
return True
class Sudoku(object):
def __init__(self, data):
self.data = data
def is_valid(self):
return perform(self.data)
# Valid Sudoku
goodSudoku1 = Sudoku([
[7, 8, 4, 1, 5, 9, 3, 2, 6],
[5, 3, 9, 6, 7, 2, 8, 4, 1],
[6, 1, 2, 4, 3, 8, 7, 5, 9],
[9, 2, 8, 7, 1, 5, 4, 6, 3],
[3, 5, 7, 8, 4, 6, 1, 9, 2],
[4, 6, 1, 9, 2, 3, 5, 8, 7],
[8, 7, 6, 3, 9, 4, 2, 1, 5],
[2, 4, 3, 5, 6, 1, 9, 7, 8],
[1, 9, 5, 2, 8, 7, 6, 3, 4]
])
goodSudoku2 = Sudoku([
[1, 4, 2, 3],
[3, 2, 4, 1],
[4, 1, 3, 2],
[2, 3, 1, 4]
])
# Invalid Sudoku
badSudoku1 = Sudoku([
[0, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9]
])
badSudoku2 = Sudoku([
[1, 2, 3, 4, 5],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1]
])
print Sudoku(goodSudoku1).is_valid()
print Sudoku(goodSudoku2).is_valid()
print Sudoku(badSudoku1).is_valid()
print Sudoku(badSudoku2).is_valid()
| true |
f05d0374d077e1c4ecdfed93efc19941474f9d53 | Python | arlyon/hyperion | /hyperion_cli/models/neighbourhood.py | UTF-8 | 2,648 | 2.546875 | 3 | [
"MIT"
] | permissive | import peewee as pw
from playhouse.shortcuts import model_to_dict
from .base import BaseModel
class Neighbourhood(BaseModel):
"""
Contains information about a police neighbourhood.
"""
name = pw.CharField()
code = pw.CharField()
description = pw.CharField(null=True)
email = pw.CharField(null=True)
facebook = pw.CharField(null=True)
telephone = pw.CharField(null=True)
twitter = pw.CharField(null=True)
def serialize(self):
data = model_to_dict(self, backrefs=True)
data["links"] = data.pop("links")
data["locations"] = data.pop("locations")
data.pop("postcodes")
return data
@staticmethod
def from_dict(data):
neighbourhood = Neighbourhood(
name=data["name"],
code=data["id"],
description=data["description"] if "description" in data else None,
email=data["contact_details"]["email"] if "email" in data["contact_details"] else None,
facebook=data["contact_details"]["facebook"] if "facebook" in data["contact_details"] else None,
telephone=data["contact_details"]["telephone"] if "telephone" in data["contact_details"] else None,
twitter=data["contact_details"]["twitter"] if "twitter" in data["contact_details"] else None,
)
return neighbourhood
class Link(BaseModel):
"""
Contains a link for a police presence in an area.
"""
name = pw.CharField()
url = pw.CharField()
neighbourhood = pw.ForeignKeyField(Neighbourhood, related_name="links")
@staticmethod
def from_dict(neighbourhood, link):
return Link(
name=link["title"],
url=link["url"],
neighbourhood=neighbourhood,
)
class Location(BaseModel):
"""
Contains data about police stations.
"""
from . import postcode
address = pw.CharField()
description = pw.CharField(null=True)
latitude = pw.FloatField(null=True)
longitude = pw.FloatField(null=True)
name = pw.CharField()
neighbourhood = pw.ForeignKeyField(Neighbourhood, related_name="locations")
postcode = pw.ForeignKeyField(postcode.Postcode)
type = pw.CharField()
@staticmethod
def from_dict(neighbourhood, postcode, location):
return Location(
address=location["address"],
description=location["description"],
latitude=location["latitude"],
longitude=location["longitude"],
name=location["name"],
neighbourhood=neighbourhood,
postcode=postcode,
type=location["type"],
)
| true |
5bb15ee93be06285f92f0358ac194751a5389699 | Python | henriquevedoveli/nlpApp | /app/sumarizador.py | UTF-8 | 1,213 | 2.953125 | 3 | [] | no_license | import nltk
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.tokenize import punkt
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from string import punctuation
from heapq import nlargest
from collections import defaultdict
def sumarizador(texto, lang):
sentencas = sent_tokenize(texto)
palavras = word_tokenize(texto.lower())
if lang == 'pt-br':
sw = set(stopwords.words('portuguese') + list(punctuation))
elif lang == 'en':
sw = set(stopwords.words('english') + list(punctuation))
palavras_sem_stopwords = [palavra for palavra in palavras if palavra not in sw]
frequencia = FreqDist(palavras_sem_stopwords)
sentencas_importantes = defaultdict(int)
for i, sentenca in enumerate(sentencas):
for palavra in word_tokenize(sentenca.lower()):
if palavra in frequencia:
sentencas_importantes[i] += frequencia[palavra]
idx_sentencas_importantes = nlargest(4, sentencas_importantes, sentencas_importantes.get)
texto_resumido = []
for i in idx_sentencas_importantes:
texto_resumido.append(''.join(sentencas[i]))
return texto_resumido
| true |
d1610b6cc6c3c7ac5eb42ee9b31bdc3fece85a35 | Python | ppooiiuuyh/datamining_assignments | /assignment07/assignment07.py | UTF-8 | 1,033 | 3.109375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def fun(x):
# f = np.sin(x) * (1 / (1 + np.exp(-x)))
f = np.abs(x) * np.sin(x)
return f
def mypolyfit(x,y,p):
X = np.array([ [x[j]**i for i in range(p+1)] for j in range(x.shape[0]) ])
#X*Xt
Xt_X = np.matmul(X.T,X)
#print(Xt_X)
#(X*Xt)-1
Xt_X_inv = np.linalg.inv(Xt_X)
#print(Xt_X_inv)
# XF = Y
# XtXF = XtY
# F = (XtX)-1XtY
fits = np.matmul( Xt_X_inv, np.matmul(X.T,y))
#print(fits.shape)
return fits
def mypolyval(x,fits):
X = np.array([ [x[j]**i for i in range(fits.shape[0])] for j in range(x.shape[0])])
vals = np.matmul(X,fits)
return vals
num = 1001
std = 5
n = np.random.rand(num)
nn = n - np.mean(n)
x = np.linspace(-10,10,num)
y1 = fun(x)
y2 = y1 + nn * std
for i in range(2,10):
popt = mypolyfit(x, y1, i)
out = mypolyval(x,popt)
plt.plot(x, y1, 'b.', x, y2, 'k.')
plt.plot(x,out,'r',label="p="+str(i))
plt.legend()
plt.show()
| true |
7eb3ecf9cb5c4c6829f7a4e1bf65e062a660a3b8 | Python | scanner/django-asutils | /asutils/views.py | UTF-8 | 6,658 | 2.53125 | 3 | [
"MIT"
] | permissive | #
# File: $Id: views.py 1864 2008-10-27 22:11:00Z scanner $
#
# Python imports.
#
import os.path
# Django imports
#
from django.utils import simplejson
from django.template import loader, RequestContext
from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponsePermanentRedirect, HttpResponseGone
from django.utils.functional import Promise
from django.utils.encoding import force_unicode
from django.conf import settings
from asutils.middleware import allow_anonymous
##
## We copy direct_to_template() and redirect_to() views from an
## earlier version of django.views.generic.simple so we can continue
## using it in some of our utilities until we do a proper replacement
## of them.
##
####################################################################
#
def direct_to_template(request, template, extra_context=None, mimetype=None,
**kwargs):
"""
Render a given template with any extra URL parameters in the context as
``{{ params }}``.
"""
if extra_context is None: extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
c = RequestContext(request, dictionary)
t = loader.get_template(template)
return HttpResponse(t.render(c), mimetype=mimetype)
####################################################################
#
def redirect_to(request, url, permanent=True, **kwargs):
"""
Redirect to a given URL.
The given url may contain dict-style string formatting, which will be
interpolated against the params in the URL. For example, to redirect from
``/foo/<id>/`` to ``/bar/<id>/``, you could use the following URLconf::
urlpatterns = patterns('',
('^foo/(?P<id>\d+)/$', 'django.views.generic.simple.redirect_to', {'url' : '/bar/%(id)s/'}),
)
If the given url is ``None``, a HttpResponseGone (410) will be issued.
If the ``permanent`` argument is False, then the response will have a 302
HTTP status code. Otherwise, the status code will be 301.
"""
if url is not None:
klass = permanent and \
HttpResponsePermanentRedirect or \
HttpResponseRedirect
return klass(url % kwargs)
else:
return HttpResponseGone()
####################################################################
#
def noauth_dtt(request, template, extra_context=None, mimetype=None, **kwargs):
"""
This is a wrapper around django's 'direct_to_template' with the exception
that we have tagged it with our 'allow_anonymous' function.
This allows us to render templates directly to the caller without our
asutils.middleware.RequireLogin middleware to redirect them to need a login
if they are not authenticated.
"""
return direct_to_template(request, template, extra_context=None,
mimetype=None, **kwargs)
allow_anonymous(noauth_dtt)
####################################################################
#
def direct_to_template_subdir(request, template, subdir = None, **kwargs):
"""
A wrapper around django's direct_to_template view. It prepend a
given file path on to the template we are given. This lets us
render templates in a sub-dir of the url pattern that matches this
template.
What do I mean?
Take for example:
url(r'^foo/(?P<template>.*\.html)$', direct_to_template,
{'subdir' : 'subdir/'}),
Which will template any url that matches <parent url>/foo/bar.html for any
'bar'. The problem is if this is a sub-url pattern match this is going to
look for the template "bar.html" when we may actually want it to get the
template "<parent url>/foo/bar.html"
Arguments:
- `request`: django httprequest object...
- `template`: The template to render.
- `subdir`: The subdir to prepend to the template.
- `**kwargs`: kwargs to pass in to
"""
if subdir is not None:
# XXX Hm. I guess this would not work on windows because
# os.path would active differently.
template = os.path.join(subdir,template)
return direct_to_template(request, template, **kwargs)
####################################################################
#
def noauth_dtt_sd(request, template, subdir = None, **kwargs):
"""
This is a thin wrapper around 'direct_to_template_subdir' defined
above. The purpose is that we can tag this view with our 'allow_anonymous'
function that will let us serve static pages without requiring the remote
user to be authenticated.
Arguments:
- `request`: django httprequest object...
- `template`: The template to render.
- `subdir`: The subdir to prepend to the template.
- `**kwargs`: kwargs to pass in to
"""
return direct_to_template_subdir(request, template, subdir,
**kwargs)
allow_anonymous(noauth_dtt_sd)
####################################################################
#
# NOTE: This was cribbed directly from:
# http://www.djangosnippets.org/snippets/1157/
#
class LazyEncoder(simplejson.JSONEncoder):
def default(self, o):
if isinstance(o, Promise):
return force_unicode(o)
else:
return super(LazyEncoder, self).default(o)
class JSONResponse(HttpResponse):
def __init__(self, data):
HttpResponse.__init__(
self, content=simplejson.dumps(data, cls=LazyEncoder),
#mimetype="text/html",
)
def ajax_form_handler(
request, form_cls, require_login=True, allow_get=settings.DEBUG
):
"""
Some ajax heavy apps require a lot of views that are merely a
wrapper around the form. This generic view can be used for them.
NOTE: This was cribbed directly from:
http://www.djangosnippets.org/snippets/1157/
"""
if require_login and not request.user.is_authenticated():
raise Http404("login required")
if not allow_get and request.method != "POST":
raise Http404("only post allowed")
if isinstance(form_cls, basestring):
# can take form_cls of the form: "project.app.forms.FormName"
from django.core.urlresolvers import get_mod_func
mod_name, form_name = get_mod_func(form_cls)
form_cls = getattr(__import__(mod_name, {}, {}, ['']), form_name)
form = form_cls(request, request.REQUEST)
if form.is_valid():
return JSONResponse({ 'success': True, 'response': form.save() })
return JSONResponse({ 'success': False, 'errors': form.errors })
| true |
fab5388db022c36571df2ddb3868a82c388b2cce | Python | brendan-donegan/lp-to-lk | /lp-to-lk/lp-to-lk | UTF-8 | 3,375 | 2.578125 | 3 | [] | no_license | #!/usr/bin/python3
import requests
import sys
from argparse import ArgumentParser
from launchpadlib.launchpad import Launchpad
from requests.auth import HTTPBasicAuth
MAAS_BOARD_ID = "19865958"
MAAS_BOARD_URL = "https://canonical.leankit.com/kanban/api/boards/{boardId}"
MAAS_CARD_URL = "https://canonical.leankit.com/kanban/api/board/{boardId}/AddCard/lane/{laneId}/position/0"
MAAS_BOARD_IDS_URL = "https://canonical.leankit.com/kanban/api/board/{boardId}/GetBoardIdentifiers"
def get_maas_board(lk_user, lk_pass):
response = requests.get(
MAAS_BOARD_URL.format(boardId=MAAS_BOARD_ID),
auth=HTTPBasicAuth(lk_user, lk_pass)
)
if response.ok:
return response.json()['ReplyData'][0]
else:
return None
def get_defects_identifier(lk_user, lk_pass):
response = requests.get(
MAAS_BOARD_IDS_URL.format(boardId=MAAS_BOARD_ID),
auth=HTTPBasicAuth(lk_user, lk_pass)
)
if response.ok:
for card_type in response.json()['ReplyData'][0]['CardTypes']:
if card_type['Name'] == 'Defect':
return card_type['Id']
return None
def get_lane_from_name(lane_map, lanes, lane_name):
for lane_id in lanes:
lane = lane_map[lane_id]
if lane['Title'] == lane_name:
return lane
return None
def add_bug_to_leankit(bug, lane, lk_user, lk_pass):
maas_board = get_maas_board(lk_user, lk_pass)
if maas_board is not None:
lane_map = {lane['Id']: lane for lane in maas_board['Lanes']}
lane_tokens = lane.split('/')
lane_or_backlog = lane_tokens.pop(0)
if lane_or_backlog == 'Backlog':
lane = get_lane_from_name(
lane_map,
maas_board['BacklogTopLevelLaneId'],
lane_or_backlog
)
else:
lane = get_lane_from_name(
lane_map,
maas_board['TopLevelLaneIds'],
lane_or_backlog
)
while len(lane_tokens) > 0:
lane_name = lane_tokens.pop(0)
lane = get_lane_from_name(
lane_map,
lane['ChildLaneIds'],
lane_name
)
# Now add to Leankit
print("Adding to Leankit")
params = {
'Title': bug.title,
'Description': bug.description,
'TypeId': get_defects_identifier(lk_user, lk_pass),
'Priority': 1,
'ExternalCardID': str(bug.id),
'ExternalSystemUrl': bug.web_link,
}
uri = MAAS_CARD_URL.format(boardId=MAAS_BOARD_ID, laneId=lane['Id'])
response = requests.post(
uri,
params=params,
auth=HTTPBasicAuth(lk_user, lk_pass)
)
def main():
parser = ArgumentParser("Import a Launchpad bug to Leankit")
parser.add_argument('lk_user', help='Your Leankit username')
parser.add_argument('lk_pass', help='Your Leankit password')
parser.add_argument('lp_id', help='The Launchpad bug number to import')
parser.add_argument('lane', help='The name of the Leankit board lane to add the card to.')
args = parser.parse_args()
lp = Launchpad.login_with(sys.argv[0], 'production')
bug = lp.bugs[int(args.lp_id)]
add_bug_to_leankit(bug, args.lane, args.lk_user, args.lk_pass)
if __name__ == "__main__":
main()
| true |
e68d1e1a9d9012adfad7ff780cb86e8023500b1f | Python | NathanielChavdarov/algorithms-old | /primecruncher.py | UTF-8 | 518 | 3.265625 | 3 | [] | no_license | import primegen as p
def primecruncher():
fileHandler = open("primes.txt", "w")
fileHandler.write("2\n3\n")
for j in range(5, 1000000+1):
if p.primetester2(j):
fileHandler.write(str(j) + "\n")
fileHandler.close()
if __name__ == "__main__":
import timeit
n = 1
while True:
t = timeit.timeit(primecruncher, number=n)
if t < 0.5:
n <<= 1
else:
break
print(f"Time = {t}, Iterations = {n}, time per iteration = {t/n}")
| true |
0470db504db767b7411000822e7d6506afb26103 | Python | fcole90/demotivational-policy-descent | /demotivational_policy_descent/tests/run_data_analysis.py | UTF-8 | 2,273 | 2.796875 | 3 | [
"MIT"
] | permissive | import argparse
import numpy as np
from demotivational_policy_descent.environment.pong import Pong
from demotivational_policy_descent.agents.simple_ai import PongAi
from demotivational_policy_descent.agents.policy_gradient import PolicyGradient
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--headless", action="store_true", help="Run in headless mode")
args = parser.parse_args()
def reduce_size(observation):
return observation[:, 10:-10]
def plot(observation, title=None, bn=False):
ob = observation / 255
if title is not None:
plt.title(title)
if bn is True:
plt.imshow(ob, cmap="gray")
else:
plt.imshow(ob)
plt.show()
def main():
env = Pong(headless=args.headless)
episodes = 10
player_id = 1
opponent_id = 3 - player_id
opponent = PongAi(env, opponent_id)
player = PongAi(env, player_id)
env.set_names(player.get_name(), opponent.get_name())
ob1, ob2 = env.reset()
ob1[:, -15:-1, :] = 0
plot(ob1, "Observation of an initial state")
exit()
plot(PolicyGradient.average_black_white(ob1), "Desaturated (average grayscale) observation", bn=True)
print("Shape:", ob1.shape)
ob1_p = PolicyGradient.preprocess(ob1)
print("Shape (preprocessed):", ob1_p.shape)
plot(ob1_p, "Downsampled, black and white obesrvation", bn=True)
exit()
plot(ob2, "State 0, PL2")
for i in range(5):
action1 = player.get_action()
action2 = opponent.get_action()
(ob1, ob2), (rew1, rew2), done, info = env.step((action1, action2))
plot(ob1, "State 5, PL1")
plot(ob2, "State 5, PL2")
exit()
for i in range(0, episodes):
done = False
while not done:
action1 = player.get_action()
action2 = opponent.get_action()
(ob1, ob2), (rew1, rew2), done, info = env.step((action1, action2))
if not args.headless:
env.render()
if done:
observation = env.reset()
# plot(ob1) # plot the reset observation
print("episode {} over".format(i+1))
# Needs to be called in the end to shut down pygame
env.end()
if __name__ == "__main__":
main() | true |
80a9e302a4c98b8fbb7d9defe3832ff795093672 | Python | ayush1202/BigDataMeetup | /WY_DataAnalysis_Meetup2_Ayush0619.py | UTF-8 | 10,191 | 3.078125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 15:45:51 2018
@author: AyushRastogi
"""
import sqlite3 # database library - included with python3
import pandas as pd # data processing and csv file IO library
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns # python graphing library
plt.style.use('seaborn')
sns.set(style="white", color_codes=True)
plt.rc('figure', figsize=(10, 6)) #plot customization
np.set_printoptions(precision=4, suppress=True)
# plt.rcdefaults() # reset to default matplotlib parameters
import warnings #ignore unwanted messages
warnings.filterwarnings("ignore")
import os
os.path
os.getcwd() # Get the default working directory
# path = r'C:\Users\ayush\Desktop\Meetup2_All Files' #Laptop
path = r'C:\Users\AyushRastogi\OneDrive\Meetup2\Meetup2_BigData' #Computer
os.chdir(path)
# Setting up the connections
# conn = sqlite3.connect(r'C:\Users\ayush\Desktop\Meetup2_All Files\WY_Production.sqlite') # 4,990,092 rows
conn = sqlite3.connect(path+r'\WY_Production.sqlite') # 4,990,092 rows
cur = conn.cursor()
# Merge the data with the fracfocus database
#conn2 = sqlite3.connect(r'C:\Users\ayush\Desktop\Meetup2_All Files\FracFocus.sqlite') # 3,239,346 rows
conn2 = sqlite3.connect(path+r'\FracFocus.sqlite') # 3,239,346 rows
cur2 = conn2.cursor()
# Connections to the two databases complete
# SQL Query 1 - Data from database converted to a dataframe
data = pd.read_sql_query(''' SELECT * FROM Production;''', conn) # Campbell County wells from Production Table in SQL Database
# data2 = pd.read_csv(r'C:\Users\ayush\Desktop\Meetup2_All Files\Converse.csv') #Converse County wells
data2 = pd.read_csv(r'C:\Users\AyushRastogi\OneDrive\Meetup2\Meetup2_BigData\Converse.csv') #Converse County wells
# Append the production files
data_prod = data.append(data2)
print (data_prod.tail(10)) #default head() function prints 5 results
print (data_prod.shape) # in the form of rows x columns
data_prod.columns
data_prod.index
data_prod.describe() # get basic statistics for the dataset, does not include any string type elements
# Number of unique API
data_prod.APINO.nunique() # This gives us 29,737 well records
# SQL Query 2 - Import the data from FracFocus Database while selecting Campbell and Converse counties in Wyoming
data_FF1 = pd.read_sql_query(''' SELECT APINumber AS APINO, TotalBaseWaterVolume, CountyName, CountyNumber
FROM FracFocusRegistry
WHERE (Statenumber = 49 AND (CountyNumber = 5 OR CountyNumber = 9));''', conn2)
data_FF2 = pd.read_sql_query(''' SELECT APINumber AS APINO, TotalBaseWaterVolume, CountyName, CountyNumber
FROM registryupload
WHERE (Statenumber = 49 AND (CountyNumber = 5 OR CountyNumber = 9));''', conn2)
data_FF = pd.merge(data_FF1,data_FF2, on = 'APINO')
# Looking into the FracFocus database
print (data_FF.head(10)) #default head() function prints 5 results
print (data_FF.shape) # in the form of rows x columns
data_FF.columns
data_FF.index
data_FF.APINO.nunique() # This gives us 712 well records
# Look into the format in which APINumber is included in the Fracfocus Registry
data_FF['APINO']
# API Number Format Manipulation
data_prod['StateCode'] = '4900'
data_prod['Trail_zero'] = '0000'
data_prod['APINO'] = data_prod['StateCode'] + data_prod['APINO'].astype(str) + data_prod['Trail_zero']
data_prod['APINO']
data_prod.APINO.nunique() # This gives us 29,737 well records, so we didnt lose any well
# -------------Merging Dataframes (Merge the dataframes based on same API)
data_merged = pd.merge(data_prod,data_FF, on = 'APINO') # Default merge is on 'inner'
data_merged.APINO.nunique() # At this point we have 685 wells
# Column for Cumulative value, Groupby function can be understood as (Split, Apply Function and Combine)
# Also converting the numbers to float
data_merged['cum_oil'] = data_merged.groupby(['APINO'])['Oil'].apply(lambda x: x.cumsum()).astype(float)
data_merged['cum_gas'] = data_merged.groupby(['APINO'])['Gas'].apply(lambda x: x.cumsum()).astype(float)
data_merged['cum_water'] = data_merged.groupby(['APINO'])['Water'].apply(lambda x: x.cumsum()).astype(float)
data_merged['cum_days'] = data_merged.groupby(['APINO'])['Days'].apply(lambda x: x.cumsum()).astype(float)
# Another method for calculating cumulative sum based on a group
#data['cum_oil2'] = data.groupby('APINO')['Oil'].transform(pd.Series.cumsum)
# Sorting the table by APINO
data_merged = data_merged.sort_values(['APINO'])
df = data_merged[['APINO', 'cum_oil', 'cum_gas', 'cum_water', 'cum_days']].astype(float) # New Dataframe with selected columns
df = df[(df[['cum_oil','cum_gas','cum_days']] != 0).all(axis=1)]
df.columns
df = df.reset_index()
df = df.sort_values(['index'])
df.to_csv(os.path.join(path,r'Cumulative_Production_0619.csv')) # Converting the file to csv
df.APINO.nunique() # We have 682 wells left
# Date Manipulation - Convert the date column from string to datetime format
# data['Date'] = pd.to_datetime(data['Date'], infer_datetime_format=True, errors='ignore')
# data['Date'] = pd.to_datetime(data['Date'], errors='ignore')
# filtering the date for production after 2005
# data = data[(data['Date'] > '2005-01-01')]
# data_merged = data_merged.drop(data_merged[data_merged.Days == 99].index)
# multiple rows where days = 99 (incorrect default value)
# data_merged.isnull().sum() # Checking if there is any NULL value in the dataset
# data_merged = data_merged.dropna(axis=0, how='any') # entire row with even a single NA value will be removed - Better option to filter data
# -------------------------INTERPOLATION-------------------------------
# **Interpolation carried out on another script**
df = pd.read_csv(path+r'\Cumulative_Production_OGW_0619.csv')
df
#-------------Brief Statistical Analysis and Visualization - Oil Production------------------------------------------------
df.rename(columns={'60_Interpol_OIL': '60_day_cum_oil', '90_Interpol_OIL': '90_day_cum_oil', '180_Interpol_OIL': '180_day_cum_oil', '365_Interpol_OIL': '365_day_cum_oil', '730_Interpol_OIL': '730_day_cum_oil' }, inplace=True)
df.columns
df = df[(df[['180_day_cum_oil','365_day_cum_oil','730_day_cum_oil', ]] != 0).all(axis=1)]
# import statsmodels and run basic analysis on 180, 365 and 730 days
# Descriptive Statistics
df.describe()
df.set_index('APINO')
import statsmodels.formula.api as smf
# Scatter Plot
X1 = df['180_day_cum_oil']
X2 = df['365_day_cum_oil']
Y = df['730_day_cum_oil']
plt.subplot(211)
plt.scatter(X1, Y, marker='o', cmap='Dark2', color='r')
plt.title('Scatter Plot')
plt.xlabel("180 Day Production")
plt.ylabel ("730 Day Production")
plt.subplot(212)
plt.scatter(X2, Y, marker='.', cmap='Dark2',color='g')
plt.xlabel("365 Day Production")
plt.ylabel ("730 Day Production")
# Basic Statistical Analysis Using Statsmodels
model1 = smf.ols(formula = 'Y ~ X1', data=df).fit()
print (model1.summary())
# Method 2
model2 = smf.ols(formula = 'Y ~ X2', data=df).fit()
print (model2.params)
# print (model2.summary)
from scipy import stats
from scipy.stats import norm
# Histograms and Density plots for all the columns calculated
# Check for the following statistical parameters
# 1. Normality - checking for the normal distribution
# 2. Homoscedasticity - assumption that dependent variables exhibit equal levels of variance across the range of predictor variables
# 3. Linearity - Good idea to check in case any data transformation is required
# 4. Absence of correlated errors
# Distplot - Visualizing the distribution of a dataset
# Histogram with KDE (Kernel Density Estimation is a non-parametric way to estimate the probability density function of a random variable)
sns.set()
sns.distplot(df['180_day_cum_oil'], axlabel=False, hist=True, kde=True, bins=50, color = 'blue',label ='180 Day Cumulative', hist_kws={'edgecolor':'black'},kde_kws={'linewidth': 4})
sns.distplot(df['365_day_cum_oil'], axlabel=False, hist=True, kde=True, bins=50, color = 'red',label ='365 Day Cumulative', hist_kws={'edgecolor':'black'},kde_kws={'linewidth': 4})
sns.distplot(df['730_day_cum_oil'], axlabel=False, hist=True, kde=True, bins=50, color = 'green',label ='730 Day Cumulative', hist_kws={'edgecolor':'black'},kde_kws={'linewidth': 4}).set_title('Distribution Comparison')
plt.legend()
plt.show()
#skewness and kurtosis
print("Skewness: %f" % df['730_day_cum_oil'].skew())
print("Kurtosis: %f" % df['730_day_cum_oil'].kurt())
#Probability Plot - Quantiles - To get an idea about normality and see where the samples deviate from normality
sns.distplot(df['730_day_cum_oil'], fit = norm)
fig = plt.figure()
res = stats.probplot(df['730_day_cum_oil'], plot=plt)
# Adding the labels
# applying log transformation, in case of positive skewness, log transformations usually works well
df['730_day_cum_oil_trans'] = np.log(df['730_day_cum_oil'])
sns.distplot(df['730_day_cum_oil_trans'], fit = norm)
fig = plt.figure()
res = stats.probplot(df['730_day_cum_oil_trans'], plot=plt)
# Pairtplot - Useful for exploring correlations between multidimensional data
sns.set(style="ticks", color_codes=True)
sns.pairplot(df, size=3, palette="husl", vars=["180_day_cum_oil", "365_day_cum_oil", "730_day_cum_oil"], kind="reg", markers=".")
# Correlation Matrix and Heatmap
df2 = df[["60_day_cum_oil", "90_day_cum_oil", "180_day_cum_oil","365_day_cum_oil","730_day_cum_oil"]]
corr_matrix = df2.corr()
f, ax = plt.subplots(figsize = (6, 6))
cm = sns.light_palette("green", as_cmap=True)
s = sns.heatmap(corr_matrix, vmax=0.8, square=True, annot=True, fmt=".2f", cmap = cm)
# Jointplot - Useful for joint distribution between different datasets
sns.jointplot(X1, Y, data=df, kind='reg')
sns.jointplot(X2, Y, data=df, kind='reg')
# Convert the three dataframes we created to .csv file for tableau
df.to_csv(os.path.join(path,r'Data_Final0619.csv'))
cur2.close()
conn2.close()
cur.close()
conn.close() | true |
00ef06a541df6a0326ac2ece78fcceb38b6231c7 | Python | joon3007/machine_learning_jobpair | /preprocess/augmentation.py | UTF-8 | 1,918 | 2.71875 | 3 | [] | no_license | import scipy.misc
from scipy.ndimage import zoom
import numpy as np
from PIL import Image, ImageEnhance, ImageFilter
import colorsys
# ref site : https://stackoverflow.com/questions/22937589/how-to-add-noise-gaussian-salt-and-pepper-etc-to-image-in-python-with-opencv/30609854
def __noisy(img, noise_type = 'gaussian', mean = 0, var = 10):
# Gaussian-distributed additive noise
if noise_type == 'gaussian':
row, col, ch = img.shape
sigma = var**0.5
gauss = np.random.normal(mean, sigma, (row, col, ch))
gauss = gauss.reshape(row, col, ch)
noisy = img + gauss
return noisy
# Replace random pixels with 0 or 1
elif noise_type == 's&p':
row, col, ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(img)
# salt mode
num_salt = np.ceil(amount * img.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in img.shape]
out[coords] = 1
# pepper mode
num_pepper = np.ceil(amount * img.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in img.shape]
out[coords] = 0
return out
# Poisson-distributed noise generated from the data
elif noise_type == 'poisson':
vals = len(np.unique(img))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(img * vals) / float(vals)
return noisy
# Multiplicative noise using out = image + n*image, where n is uniform noise with specified mean & variance
elif noise_type == 'speckle':
row, col, ch = img.shape
gauss = np.random.randn(row, col, ch)
gauss = gauss.reshape(row, col, ch)
noisy = img + img * gauss
return noisy
# add gaussian noise
def gaussian_noise(img, mean = 0, var = 10):
return __noisy(img, noise_type = 'gaussian', mean = mean, var = var)
| true |
cd7a6d219b7503bb30a8dc84a49dff114f7cf74c | Python | RKouchoo/ImVideo | /ImVideo.py | UTF-8 | 448 | 2.609375 | 3 | [] | no_license | import cv2
import numpy as np
import glob
frame_delay = 15
path = '/raw_images'
name = 'latest_timelapse.avi'
img_array = []
for filename in glob.glob(path):
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
out = cv2.VideoWriter(name, cv2.VideoWriter_fourcc(*'DIVX'), frame_delay, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release() | true |
c21c92b4b76e12254470daab949dd56d449e8f88 | Python | jarbus/multiagent-particle-envs | /agents_using_gym/gymMountainCarv0/policy.py | UTF-8 | 3,167 | 2.78125 | 3 | [
"MIT"
] | permissive | import numpy as np
from pyglet.window import key
from multiagent.scenarios.simple import Scenario
# individual agent policy
class Policy(object):
def __init__(self):
self.move = [False for i in range(4)]
def action(self, obs):
#agent = env.agents
raise NotImplementedError()
# interactive policy based on keyboard input
# hard-coded to deal only with movement, not communication
class InteractivePolicy(Policy):
def __init__(self, env, agent_index):
super(InteractivePolicy, self).__init__()
self.env = env
#self.agent_index = agent_index
# hard-coded keyboard events
self.move = [False for i in range(4)]
self.comm = [False for i in range(env.world.dim_c)]
# register keyboard events with this environment's window
env.viewers[agent_index].window.on_key_press = self.key_press
env.viewers[agent_index].window.on_key_release = self.key_release
def action(self, obs):
# ignore observation and just act based on keyboard events
#x_axis = self.env.agents[self.agent_index].state.p_pos[0]
#y_axis = self.env.agents[self.agent_index].state.p_pos[1]
'''
If we try to implement Q-learning in Interactive.action(self, obs),
we may first need to have a get_reward() function for each agent.
Or a simpler way is to have Interactive.action(self, obs) return the action space
each time. Then implement the Q-learning algorithm in bin/interactive.py since interactive.py have access to everything
and it's more convinient to implement.
'''
#obs[2] is the x-axis of the relative position between first landmark and the agent
if obs[2] < 0:
self.move[1] = True
elif obs[2] > 0:
self.move[0] = True
else:
self.move[0] = False
self.move[1] = False
if obs[3] > 0:
self.move[3] = True
elif obs[3] < 0:
self.move[2] = True
else:
self.move[2] = False
self.move[3] = False
if self.env.discrete_action_input:
u = 0
if self.move[0]: u = 1
if self.move[1]: u = 2
if self.move[2]: u = 4
if self.move[3]: u = 3
else:
u = np.zeros(5) # 5-d because of no-move action
if self.move[0]: u[1] += 1.0
if self.move[1]: u[2] += 1.0
if self.move[3]: u[3] += 1.0
if self.move[2]: u[4] += 1.0
if True not in self.move:
u[0] += 1.0
return np.concatenate([u, np.zeros(self.env.world.dim_c)])
# keyboard event callbacks
def key_press(self, k, mod):
if k==key.LEFT: self.move[0] = True
if k==key.RIGHT: self.move[1] = True
if k==key.UP: self.move[2] = True
if k==key.DOWN: self.move[3] = True
def key_release(self, k, mod):
if k==key.LEFT: self.move[0] = False
if k==key.RIGHT: self.move[1] = False
if k==key.UP: self.move[2] = False
if k==key.DOWN: self.move[3] = False
| true |
e922f4f7b3dd0f7e5bc97c008b3a4b52dc34b104 | Python | saubhik/leetcode | /problems/four_sum_ii.py | UTF-8 | 4,160 | 3.484375 | 3 | [] | no_license | from collections import Counter, defaultdict
from typing import List
from unittest import TestCase
class Solution:
# Gets TLEd with one HashMap.
# Time Complexity: O(n^3)
# Space Complexity: O(n)
def fourSumCount(
self, A: List[int], B: List[int], C: List[int], D: List[int]
) -> int:
n, counter_d, count = len(A), Counter(D), 0
for a in range(n):
for b in range(n):
for c in range(n):
num = -1 * (A[a] + B[b] + C[c])
if num in counter_d:
count += counter_d[num]
return count
class SolutionTwo:
# Two HashMaps.
# Time Complexity: O(n^2).
# Space Complexity: O(n^2).
def fourSumCount(
self, A: List[int], B: List[int], C: List[int], D: List[int]
) -> int:
n = len(A)
a_plus_b = defaultdict(int)
for a in range(n):
for b in range(n):
a_plus_b[A[a] + B[b]] += 1
c_plus_d = defaultdict(int)
for c in range(n):
for d in range(n):
c_plus_d[C[c] + D[d]] += 1
count = 0
for key in a_plus_b:
if -key in c_plus_d:
count += a_plus_b[key] * c_plus_d[-key]
return count
class SolutionThree:
# Optimize Solution Two using 1 HashMap.
# Time Complexity: O(n^2).
# Space Complexity: O(n^2).
def fourSumCount(
self, A: List[int], B: List[int], C: List[int], D: List[int]
) -> int:
partial_sums = dict()
for a in A:
for b in B:
partial_sums[a + b] = partial_sums.get(a + b, 0) + 1
count = 0
for c in C:
for d in D:
count += partial_sums.get(-(c + d), 0)
return count
class SolutionFour:
# Solve the general case: kSumIII.
def fourSumCount(
self, A: List[int], B: List[int], C: List[int], D: List[int]
) -> int:
return self.nSumCount(lists=[A, B, C, D])
def nSumCount(self, lists: List[List[int]]) -> int:
k = len(lists)
partial_sums = dict()
def addToHash(start: int, end: int, partial_sum: int = 0) -> None:
if start == end:
partial_sums[partial_sum] = partial_sums.get(partial_sum, 0) + 1
return
for elem in lists[start]:
addToHash(start=start + 1, end=end, partial_sum=partial_sum + elem)
def countComplements(start: int, end: int, partial_sum: int = 0) -> int:
if start == end:
return partial_sums.get(-partial_sum, 0)
count = 0
for elem in lists[start]:
count += countComplements(
start=start + 1, end=end, partial_sum=partial_sum + elem
)
return count
mid = k // 2
addToHash(start=0, end=mid)
return countComplements(start=mid, end=k)
class TestSolution(TestCase):
def test_example_1(self):
assert Solution().fourSumCount(A=[1, 2], B=[-2, -1], C=[-1, 2], D=[0, 2]) == 2
assert (
SolutionTwo().fourSumCount(A=[1, 2], B=[-2, -1], C=[-1, 2], D=[0, 2]) == 2
)
assert (
SolutionThree().fourSumCount(A=[1, 2], B=[-2, -1], C=[-1, 2], D=[0, 2]) == 2
)
assert (
SolutionFour().fourSumCount(A=[1, 2], B=[-2, -1], C=[-1, 2], D=[0, 2]) == 2
)
def test_example_2(self):
assert (
Solution().fourSumCount(
A=[0, 1, -1], B=[-1, 1, 0], C=[0, 0, 1], D=[-1, 1, 1]
)
== 17
)
assert (
SolutionTwo().fourSumCount(
A=[0, 1, -1], B=[-1, 1, 0], C=[0, 0, 1], D=[-1, 1, 1]
)
== 17
)
assert (
SolutionThree().fourSumCount(
A=[0, 1, -1], B=[-1, 1, 0], C=[0, 0, 1], D=[-1, 1, 1]
)
== 17
)
assert (
SolutionFour().fourSumCount(
A=[0, 1, -1], B=[-1, 1, 0], C=[0, 0, 1], D=[-1, 1, 1]
)
== 17
)
| true |
078fa7354c097a1d1543347aaba9d2e210f1d995 | Python | Korimse/Baekjoon_Practice | /baekjoon/1520.py | UTF-8 | 618 | 3.015625 | 3 | [] | no_license | from collections import deque
dx = [0,0,-1,1]
dy = [1,-1,0,0]
def bfs(n, m):
queue = deque()
count = 0
queue.append((0,0))
while queue:
x,y = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0<=nx<n and 0<=ny<m:
if arr[x][y] > arr[nx][ny]:
queue.append((nx,ny))
if nx == (n-1) and ny == (m-1):
count += 1
return count
n, m = map(int, input().split())
arr = []
for i in range(n):
arr.append(list(map(int, input().split())))
print(bfs(n,m)) | true |
1faf90d70072cd60e8266ce190f1c219007b11f0 | Python | tmu-nlp/NLPtutorial2017 | /Omori/tutorial01/train-unigram.py | UTF-8 | 644 | 3.25 | 3 | [] | no_license | import sys
from collections import defaultdict
def train_unigram(input_file, output_file):
word_count = defaultdict(int)
total = 0
with open(input_file, 'r') as f:
for line in f:
word_list = line.strip().split()
word_list.append("</s>")
for word in word_list:
word_count[word] += 1
total += 1
with open(output_file, 'w') as f:
for word, count in sorted(word_count.items(), key=lambda x: -x[1]):
p = count / total
f.write(word+'\t'+str(p)+'\n')
if __name__ == "__main__":
train_unigram(sys.argv[1], sys.argv[2])
| true |
82c2e6bd2385f917ea0fb0e7c2be562ef458233e | Python | Jasonandy/Python-X | /cn/opencv/color/color_four.py | UTF-8 | 1,598 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | import cv2
import numpy as np #导入库
blue_lower = np.array([100,43,46])
blue_upper = np.array([124,255,255]) #设置颜色区间
cap = cv2.VideoCapture(0) #打开摄像头
cap.set(3,640)
cap.set(4,480) #设置窗口的大小
while 1: #进入无线循环
ret,frame = cap.read() #将摄像头拍摄到的画面作为frame的值
frame = cv2.GaussianBlur(frame,(5,5),0) #高斯滤波GaussianBlur() 让图片模糊
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) #将图片的色域转换为HSV的样式 以便检测
mask = cv2.inRange(hsv,blue_lower,blue_upper) #设置阈值,去除背景 保留所设置的颜色
mask = cv2.erode(mask,None,iterations=2) #显示腐蚀后的图像
mask = cv2.GaussianBlur(mask,(3,3),0) #高斯模糊
res = cv2.bitwise_and(frame,frame,mask=mask) #图像合并
cnts = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2] #边缘检测
if len(cnts) >0 : #通过边缘检测来确定所识别物体的位置信息得到相对坐标
cnt = max(cnts,key=cv2.contourArea)
(x,y),radius = cv2.minEnclosingCircle(cnt)
cv2.circle(frame,(int(x),int(y)),int(radius),(255,0,255),2) #画出一个圆
print(int(x),int(y))
else:
pass
cv2.imshow('frame',frame) #将具体的测试效果显示出来
cv2.imshow('mask',mask)
cv2.imshow('res',res)
if cv2.waitKey(5) & 0xFF == 27: #如果按了ESC就退出 当然也可以自己设置
break
cap.release()
cv2.destroyAllWindows() #后面两句是常规操作,每次使用摄像头都需要这样设置一波 | true |
b76570774b3f65eac1129e40e8a54f997c801a1b | Python | mittalsam20/music_player | /musicplayer.py | UTF-8 | 7,474 | 2.515625 | 3 | [] | no_license | #----------------------------------------------VOLUME FUNCTIONS------------------------------------------
def vup():
vol= mixer.music.get_volume()
mixer.music.set_volume(vol+0.05)
voltext.configure(text='{}%'.format(int(mixer.music.get_volume()*100)))
volbar['value']=mixer.music.get_volume()*100
def vdown():
vol= mixer.music.get_volume()
mixer.music.set_volume(vol-0.05)
voltext.configure(text='{}%'.format(int(mixer.music.get_volume()*100)))
volbar['value']=mixer.music.get_volume()*100
def mute():
global cur_vol
cur_vol=mixer.music.get_volume()
mixer.music.set_volume(0)
root.mute_button.grid_remove()
root.unmute_button.grid()
root.status_song.configure(text='MUTED')
voltext.configure(text='{}%'.format(int(mixer.music.get_volume()*100)))
volbar['value']=mixer.music.get_volume()*100
def unmute():
root.mute_button.grid()
root.unmute_button.grid_remove()
root.status_song.configure(text='Playing..')
mixer.music.set_volume(cur_vol)
voltext.configure(text='{}%'.format(int(mixer.music.get_volume()*100)))
volbar['value']=mixer.music.get_volume()*100
#------------------------------------------------PLAY/PAUSE----------------------------------------
def songpath():
open_song=filedialog.askopenfilename(title='Select Audio File')
songname.set(open_song)
def playsong():
root.status_song.configure(text='PLAYING..')
seekbody.grid()
mixer.music.load(songname.get())
mixer.music.play()
# song_len=int(MP3(songname.get()).info.length)
# print(song_len)
# seekbar['maximum']=song_len
# endtime.configure(text='{}:{}'.format())
def resumesong():
mixer.music.unpause()
root.status_song.configure(text='RESUMED')
root.resume_button.grid_remove()
root.pause_button.grid()
def pausesong():
mixer.music.pause()
root.status_song.configure(text='PAUSED')
root.pause_button.grid_remove()
root.resume_button.grid()
def stopsong():
root.status_song.configure(text='STOPPED')
mixer.music.stop()
# def realtime():
# seek_pos=mixer.music.get_pos()/1000
# seekbar['value']=seek_pos
# seekbar.after(2,realtime)
#---------------------------------------------GUI FUNCTION-------------------------------------------------
def gui():
global voltext,volbar,status_song,seekbody,song_len,seekbar,endtime
#-------------------------------------------------LABELS----------------------------------------------
browse_song=Label(root,text="Select Audio Track",bg="lawn green",font=('Comic Sans MS',20,'bold'))
browse_song.grid(row=0,column=0,padx=10,pady=0)
root.status_song=Label(root,text="No Track Selected",bg="lawn green",font=('Comic Sans MS',20,'bold'))
root.status_song.grid(row=1,column=2,ipadx=5,ipady=5)
credit=Label(root,text="MADE BY SAMARTH GARG(18BEC094) \n AND \n SAMAKSH MITTAL(18BEC093)",bg="lawn green",font=('arial',14,'bold'))
credit.grid(row=3,column=1,padx=0,pady=0,columnspan=3)
#-------------------------------------------------ENTRIES----------------------------------------------
song_entry=Entry(root,font=('arial',20,'bold'),width=40,textvariable=songname)
song_entry.grid(row=0,column=1,columnspan=3,padx=20,pady=0)
#-------------------------------------------------BUTTONS------------------------------------------
browse_button=Button(root,text='Browse',font=('Comic Sans MS',25,'bold'),width=8,activebackground='grey30',command=songpath,bd=5)
browse_button.grid(row=0,column=4,padx=10,pady=20)
play_button=Button(root,text='PLAY',bg="lawn green",font=('Comic Sans MS',25,'bold'),width=8,bd=5,activebackground='green4',command=playsong)
play_button.grid(row=1,column=0,pady=10)
root.resume_button=Button(root,text='RESUME',font=('Comic Sans MS',25,'bold'),width=8,activebackground='grey30',command=resumesong,bd=5)
root.resume_button.grid(row=2,column=0,pady=10)
root.pause_button=Button(root,text='PAUSE',font=('Comic Sans MS',25,'bold'),width=8,bd=5,activebackground='grey30',command=pausesong)
root.pause_button.grid(row=2,column=0,pady=10)
stop_button=Button(root,text='STOP',bg="firebrick1",font=('Comic Sans MS',25,'bold'),width=8,bd=5,activebackground='red4',command=stopsong)
stop_button.grid(row=3,column=0)
vup_button=Button(root,text='Vol +',font=('Comic Sans MS',25,'bold'),width=8,bd=5,activebackground='grey30',command=vup)
vup_button.grid(row=1,column=4,padx=20,pady=0)
vdown_button=Button(root,text='Vol -',font=('Comic Sans MS',25,'bold'),width=8,bd=5,activebackground='grey30',command=vdown)
vdown_button.grid(row=2,column=4,padx=0,pady=0)
root.unmute_button=Button(root,text='UNMUTE',font=('Comic Sans MS',25,'bold'),width=8,bd=5,activebackground='grey30',command=unmute)
root.unmute_button.grid(row=3,column=4,padx=0,pady=0)
root.mute_button=Button(root,text='MUTE',font=('Comic Sans MS',25,'bold'),width=8,bd=5,activebackground='grey30',command=mute)
root.mute_button.grid(row=3,column=4,padx=0,pady=0)
#----------------------------------------------------------VOLUME BAR-------------------------------------------------
vollabel=Label(root,text='',bg='red',bd=1)
vollabel.grid(row=1,column=5,rowspan=3,padx=10,ipadx=0,pady=20)
volbar=Progressbar(vollabel,orient=VERTICAL,mode='determinate',value=100,length=220)
volbar.grid(row=0,column=0,ipadx=8)
voltext=Label(vollabel,text='100%',bg='lightgray',width=4,font=('arial',10,'bold'))
voltext.grid(row=0,column=0)
#----------------------------------------------------------SEEK BAR-------------------------------------------------
seekbody=Label(root,text='',bg='red',bd=1)
seekbody.grid(row=2,column=1,columnspan=3,padx=0,pady=0)
seekbody.grid_remove()
starttime=Label(seekbody,text='0:00',bg='red',bd=1)
starttime.grid(row=0,column=0,padx=0,pady=0)
endtime=Label(seekbody,text='3:00',bg='red',bd=1)
endtime.grid(row=0,column=3,padx=0,pady=0)
seekbar=Progressbar(seekbody,orient=HORIZONTAL,mode='determinate',value=40,length=530)
seekbar.grid(row=0,column=2,ipady=2)
#-----------------------------------------------------------------MAIN--------------------------------------------------
from tkinter import * # tkinter is imported for GUI
from tkinter import filedialog # for browsing through the files
from tkinter.ttk import Progressbar # for volume and seek bar
from pygame import mixer # for different functions like mixer.music.play()
# from mutagen.mp3 import MP3
# import datetime
mixer.init() # intializing mixer into the program(a function from pygame)
root=Tk()
root.geometry('1150x370+120+150') # 1200X370 is the dimensions of appication dialog box
root.title('MUSIC PLAYER') # 100 is the margin from left side and 150 is the margin from top
root.resizable(0,0)
root.configure(bg='gray25')
songname=StringVar()
gui() # calling user defined function gui(all the frontend is in this function)
root.mainloop() # infinte loop
#--------------------------------------MADE BY SAMAKSH MITTAL AND SAMARTH GARG--------------------------------------------------
#-----------------------------------------------END OF PROGRAM------------------------------------------------------------------- | true |
abc71348921b57342f14e5036686f72e2276b048 | Python | AlphaWolf384/pythonlearning | /Game/RockPaperScissorv1.2.py | UTF-8 | 1,497 | 3.984375 | 4 | [] | no_license | ''' Rock, Paper, Scissors v1.2 '''
from random import randint
print('Press X to stop game')
p_score = 0
c_score = 0
d_score = 0
while True:
print('Player: ' + str(p_score) + ' & Computer: ' + str(c_score) + ' & Draw: ' + str(d_score))
player = raw_input('Rock (r), Paper (p), or Scissors (s)? ')
if player == 'r' or player == 'p' or player == 's':
chosen = randint(1,3)
if player == 'r':
player_sign = 'O'
elif player == 'p':
player_sign = '___'
else:
player_sign = '>8'
if chosen == 1:
computer = 'r'
computer_sign = 'O'
elif chosen == 2:
computer = 'p'
computer_sign = '___'
else:
computer = 's'
computer_sign = '>8'
print(player_sign + " vs " + computer_sign)
if player == computer:
print('DRAW!')
d_score += 1
elif player == 'r' and computer == 's':
print('PLAYER WIN!')
p_score += 1
elif player == 'p' and computer == 'r':
print('PLAYER WIN!')
p_score += 1
elif player == 's' and computer == 'p':
print('PLAYER WIN!')
p_score += 1
else:
print('COMPUTER WIN!')
c_score += 1
elif player == 'x' or player == 'X':
break
else:
print('Please Try again')
| true |
d2d7682d630303bcb04e95d9f5bfff9a2eeda0ab | Python | IanOlin/linearityFinal | /simulation.py | UTF-8 | 836 | 3.484375 | 3 | [] | no_license | import math
import numpy as np
class Qubit(object):
'''This defines a qubit in our simulation, we will be using a spin-(1/2) particle
spin is defined as (alpha)|0> + (beta)|1> '''
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
self.theta = math.atan2(math.sqrt(x**2 + y**2), math.sqrt(z**2))
self.phi = math.atan2(math.sqrt(x**2 + y**2), math.sqrt(x**2))
self.alpha = math.cos(self.theta/2.)
self.beta = (math.e**(1j*self.phi))*math.sin(self.theta/2.)
def check(self):
if abs(self.alpha)**2 + abs(self.beta)**2 == 1:
print "Good spin"
return True
else:
print "Bad spin"
return False
def test():
testBit = Qubit(1,0,0)
testBit.check()
if __name__ == '__main__':
test()
| true |
6c74c5a83cc9f0c646636109dc03beb7b410ba64 | Python | cn5036518/xq_py | /other/a1接口层框架设计1-0326/src22/001/002.py | UTF-8 | 385 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# h1=0
class H2:
def __init__(self):
# self.h1 = 2
pass
def test(self):
# global h1
self.h1 = 2
return self.h1
h11 = H2()
# ret1 = h11.test()
# print(ret1)
# print(h1)
class H1:
def test2(self):
# print(ret1)
return H2().h1
h33 = H1()
print(h33.test2())
| true |
c24f14f7afac629106010c2052064c5dba32ceaf | Python | Dearyyyyy/TCG | /data/3920/AC_py/515567.py | UTF-8 | 170 | 3.3125 | 3 | [] | no_license | # coding=utf-8
aNum = input()
a1 = int(aNum[0])
a2 = int(aNum[1])
a3 = int(aNum[2])
aNum = int(aNum)
if aNum == a1**3+a2**3+a3**3:
print("YES")
else:
print("NO") | true |
43c5fb82d3e4611a047001cd9e7cc4d4e05480f0 | Python | Real-Fael/faculdade_atividades | /data_structure/extensible_hash/Hash_extensivel.py | UTF-8 | 12,971 | 3.0625 | 3 | [] | no_license | import sys
import csv
import numpy as np
import time
TAMPAG_DEFAULT = 3000 # tamanho que funciona bem para todos os casos testados
FILE_DEFAULT = "D_a2_i20000.csv" # lê um arquivo padrao qualquer
class Registro(object): # registro com todos os campos
def __init__(self, campos=None): # inicializa o registro com os campos vazios
if campos is None:
campos = [[], []]
self.key = int(
campos[0]) # recebe primeiro valor do vetor se nao for passado nenhum parametro chave recebe um vetor vazio
self.campos = campos[1:] # recebe os outros campos ou uma lista vazia
def __sizeof__(self): # retorna o tamanho do registro como o somatorio dos campos do mesmo
# return sys.getsizeof(self.key)+sys.getsizeof(self.campos)
return sys.getsizeof(self.key) + sum(sys.getsizeof(x) for x in
self.campos) # utilizei uma função lambda para pegar o valor de bytes de cada elemento
# ao inves do tamanho do ponteiro como estava anteriormente dessa forma cada posição do vetor tem 28Bytes
def __str__(self):
return "{k}:{c}".format(k=self.key, c=self.campos)
def getKey(self): # retorna a chave do registro
return self.key
def getRegistro(self): # retorna o proprio registro
return self
class Bucket(object): # objeto bucket que armazenará um conjunto de registros
def __init__(self, tamBuket=1024, profundidade=2):
self.profundidade = profundidade # armazena a profundidade local do bucket
self.tamBuket = tamBuket # armazena o tamanho do bucket
self.registros = [] # cria um vetor vazio inicialmente
def __str__(self): # cria uma representação do bucket em forma de string
aux = "|Profundidade:{} ".format(self.profundidade)
for x in self.registros:
aux = aux + "(" + x.__str__() + ")" + " , "
return aux + "| "
def __sizeof__(self): # calcula o tamanho do bucket pela quantidade e tamanho dos registros
aux = sum(sys.getsizeof(x) for x in
self.registros)
return aux
def estaVazia(self): # verifica se o bucket esta vazia
if not self.registros: # verifica se nao há nenhum algum elemento dentro do vetor
return True
else:
return False
def estaCheia(self):
if self.estaVazia(): # se não esta vazia entao podemos inserir elementos
return False
else: # se tem algum elemento devemos verificar se há espaço para mais um
if (self.tamBuket - self.__sizeof__()) >= self.registros[0].__sizeof__():
# se o espaço disponivel é maior que uma amostra do registro entao tem espaço para outro igual
return False
else:
return True
def inserirNoBucket(self, elemento, ignore=False): # ignore serve para ignorar overflow
if not self.estaCheia(): # verifica se o tamanho disponivel suporta mais um registro
self.registros.append(elemento) # caso tenha espaço adiciona elemento na pagina folha
return None
else: # caso nao suporte vamos criar um novo bucket e mudar as profundidades para 1 a mais
self.registros.append(elemento)
if not ignore: # se ignore for True significa que é pra aceitar esse Overflow na pagina
self.profundidade += 1 # aumenta a profudnidade do Bucket
novoBucket = Bucket(self.tamBuket, self.profundidade) # cria um novo bucket com a mesma profundidade
return novoBucket # retorna o novo Bucket
return None
class Hash_extensivel(object): # classe Hash extensivel
def __init__(self, tamBucket=1024):
self.profundidade_global = 2 # cria profundidade global inicial
self.tamBucket = tamBucket # seta o tamanho das paginas de bucket
# cria um vetor de buckets vazio do mesmo tamanho da profundidade global
self.vetBucket = [Bucket(self.tamBucket) for x in range(2 ** self.profundidade_global)]
def removerHash(self, chaveElemento): # função para remover a hash
pos, auxBucket = self.buscarHash(
chaveElemento) # prucura o elemento retorna a posição e o bucket onde o elemento esta
if pos is not None and auxBucket is not None:
aux = auxBucket.registros.pop(
pos) # removemos o elemento da lista e guardamos em uma variavel para poder processalo
#print("elemento Removido")
return aux
#return True
else:
# print("elemento nao existe não pode ser removido")
return None
#return False
def buscarHash(self, chaveElemento): # busca o elemento pela chave
# calculla o hash global para ver em que ponteiro devemos procurar
chaveHash = chaveElemento % (2 ** self.profundidade_global)
profundidadeAterior = self.profundidade_global # pega a profundidade anterior como referencia
# pega a profundidade do bucket referenciado pela chave hash global
prufundidadeAtual = self.vetBucket[chaveHash].profundidade
while profundidadeAterior > prufundidadeAtual: # devemos refazer a hash com a profundidade do bucket apontado pela hash anterior
profundidadeAterior = prufundidadeAtual # a anterior recebe a atual
chaveHash = chaveElemento % (
2 ** prufundidadeAtual) # calcula nova chave a partir da profundidade atual do bucket
prufundidadeAtual = self.vetBucket[chaveHash].profundidade # entao pegamos a nova profundidade atual
# chaveBucket = chaveElemento % (2 ** auxBucket.profundidade)
auxBucket = self.vetBucket[chaveHash] # entao pegamos o bucket de acordo com a hash correspondente
encontrou = False
cont = -1
pos = -1
for x in auxBucket.registros: # percorre todos os registros
cont += 1
if x.getKey() == chaveElemento: # verifica se as chaves correspondem
encontrou = True
registro = x
pos = cont
if encontrou: # se tiver encontrado o elemento correspondente a chave
# print("elemento {a} encontrado no bucket{b}".format(a=registro, b=auxBucket))
return pos, auxBucket # retorna a posição e o bucket que o registro esta
else:
# print("elemento nao encontrado")
return None, None
#versao 2 do inserir
def insereHash(self, elemento): # função para inserir na hash
chaveHash = elemento.getKey() % (2 ** self.profundidade_global) # calcula hash global
novoBucket = self.vetBucket[chaveHash].inserirNoBucket(elemento)
# quando para esse elemento nao houver uma profundidade menor entao podemos inserilo no bucket
if novoBucket is not None: # se nao é nulo entao criamos um novo bucket
# print("bucket dividido")
if novoBucket.profundidade > self.profundidade_global: # se a profundidade do bucket for maior temos que dobrar a head
self.profundidade_global += 1 # aumentamos a profundidade do hash
divisorAnterior = 2 ** (self.profundidade_global - 1)
divisorAtual = 2 ** self.profundidade_global
print("aumentando tamanho do HEAD para:", divisorAtual)
# percorremos as novas posições do head que ira dobrar
for x in range(divisorAnterior, divisorAtual):
# fazemos com que as novas posições apontem para os buckets ja existentes no mod anterior
# self.vetBucket[x] = self.vetBucket[x % divisorAnterior]#test
self.vetBucket.append(self.vetBucket[x % divisorAnterior])
# agora colocaremos o novo Bucket para ser apontado pela posição da head correspondente
profAnt= novoBucket.profundidade-1
total=int(self.profundidade_global-novoBucket.profundidade)
primeiro=elemento.getKey() % 2**profAnt + 2**profAnt
self.vetBucket[primeiro] = novoBucket
t= 2**(total)
for cont in range(1,t):
proximo=primeiro + cont*2**novoBucket.profundidade
self.vetBucket[proximo]=novoBucket
# pegamos os elementos do bucket cheio para passar novamente pela hash
aux = self.vetBucket[chaveHash].registros
# calcula a posição que sera substituida
self.vetBucket[chaveHash].registros = []
# self.vetBucket[chaveHash].registros = [] # zeramos os registros e vamos redistribuilos
self.insereOverFlow(aux)
def insereOverFlow(self,vetReg):
pot= 2 ** self.profundidade_global
for x in vetReg:
# self.insereHash(x)
novaChaveHash = x.getKey() % pot
self.vetBucket[novaChaveHash].inserirNoBucket(x, ignore=True)
def __str__(self): # cria uma string para representar a hash
cont = -1
aux = ""
for x in self.vetBucket:
cont += 1
aux = aux + "indice {}-->".format(cont) + x.__str__() + "\n"
return aux
def get_arguments(print_help=False): # pega os argumentos do console
'''
Get arguments
'''
import argparse
parser = argparse.ArgumentParser('HashEX')
# -tp agora é para setar o tamanho da pagina
parser.add_argument('-tp', '--tamPagina', action='store', type=int,
default=TAMPAG_DEFAULT,
help='Maximum page size (default: ' +
str(TAMPAG_DEFAULT) + ')')
# -f é o nome do arquivo de entrada
parser.add_argument('-f', '--filename', action='store', type=str,
default=FILE_DEFAULT,
help='Input filename (default: ' +
FILE_DEFAULT + ')')
args = parser.parse_args()
if print_help:
parser.print_help()
return args
if __name__ == '__main__':
inicio = time.time() # pega o tempo inicial
# pegando os argumentos
args = get_arguments()
# leitura de arquivo
arquivo = open(args.filename)
dados = csv.DictReader(arquivo)
# Bmais=Arvore(tamPaginas=526)
hashPrincipal = Hash_extensivel(args.tamPagina)
quantidade = 0
for data in dados:
operacao = list(data.values()) # transforma cada linha do dicionario em um vetor
if operacao[0] == "+":
quantidade += 1
aux = [int(a) for a in operacao[1:]] # converte a entrada para vetor de inteiros
aux = Registro(aux) # gera um objeto Registro com o vetor de inteiros
#print("\ninserindo o registro:{n}".format(n=quantidade), " de tamanho", aux.__sizeof__(),
# "Chave:{}".format(aux.getKey()))
hashPrincipal.insereHash(aux)
# print("iteracao {}".format(quantidade), "\n*******HASH*********\n\n", hashPrincipal)
#if quantidade ==1000:
# print("\n*******HASH*********\n\n", hashPrincipal)
# break
# print(quantidade)
elif operacao[0] == "-":
quantidade += 1
#print("\nRemovendo o registro:{n}".format(n=quantidade), "Chave:{}".format(operacao[1]))
hashPrincipal.removerHash(int(operacao[1]))
fim = time.time()
print("\nA criação desta HASH demorou:{:4.4f} segundos".format(fim - inicio))
while (1):
entrada = int(input(
"******Escolha uma opção******\n-1) Buscar Elemento\n-2) Remover Elemento\n-3) Mostrar Hash\n-4) Sair\n"))
if entrada == 1:
entrada2 = int(input("digite a chave para buscar: "))
inicio = time.time()
pos,baux=hashPrincipal.buscarHash(entrada2) # busca e exibe o elemento
if baux is not None:
print("encontrado elemento: {} , no Bucket{}".format(baux.registros[pos],baux))
else:
print("nao encontrado")
fim = time.time()
print("\nA execução desta função demorou:{:4.4f} segundos".format(fim - inicio))
elif entrada == 2:
entrada2 = int(input("digite a chave para remoção: "))
inicio = time.time()
baux=hashPrincipal.removerHash(entrada2) # remove e mostra o elemento removido
if baux is not None:
print("elemento: {} , Removido".format(baux))
else:
print("nao encontrado")
fim = time.time()
print("\nA execução desta função demorou:{:4.4f} segundos".format(fim - inicio))
elif entrada == 3:
print("\n*******HASH*********\n\n", hashPrincipal) # mostra a hash
elif entrada == 4:
break
else:
print("Opção invalida tente novamente!")
| true |