blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2cbe147eaa879f496cfa8f57646c8f13e5688a84
|
Python
|
cmartinezbjmu/DatosEnormes
|
/Script/mapReduce/Reuters/taxis/reducer-1.py
|
UTF-8
| 1,262
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
import operator
from collections import Counter
tipo_veh = ''
destino = ''
dia_semana = ''
dic_principal = dict()
conteo_destinos = dict()
for entrada in sys.stdin:
entrada.strip()
try:
tipo_veh, dia_semana, destino = entrada.split(',')
destino = destino.split('\n')[0]
llave_principal = '{}_{}'.format(tipo_veh, dia_semana)
llave_destino = destino
if llave_principal in dic_principal:
# 'si existe llave principal'
if llave_destino in conteo_destinos:
# 'si existe el destino'
contador = conteo_destinos[llave_destino]
contador += 1
conteo_destinos[llave_destino] = contador
else:
conteo_destinos[llave_destino] = 1
else:
conteo_destinos = dict()
conteo_destinos[llave_destino] = 1
dic_principal[llave_principal] = conteo_destinos
except ValueError as e:
pass
max_ubicacion = None
keys = list(dic_principal.keys())
salida = dict()
for key, values in dic_principal.items():
max_ubicacion = max(values, key=values.get)
salida[key] = '%s,%s' % (max_ubicacion,values[max_ubicacion])
print salida
| true
|
0d74a4311d188d16e18c5d0af6429fe786ac0c52
|
Python
|
kaulraghav/Top-Interview-Questions
|
/1. Easy/2. Strings/3. First Unique Character in a String.py
|
UTF-8
| 469
| 4
| 4
|
[] |
no_license
|
#Dictionary + .index() function
#Time: O(n), Space: O(1)
class Solution:
def firstUniqChar(self, s: str) -> int:
dict = {}
for i in range(len(s)):
if s[i] not in dict:
dict[s[i]] = 0
dict[s[i]] += 1
for k, v in dict.items():
if v == 1:
return s.index(k) #.index() returns the first index of the matching value
return -1
| true
|
d54cfe61bda47f78f28fafd6522e2cb72965e42e
|
Python
|
rocky-ye/NLP
|
/predict.py
|
UTF-8
| 2,224
| 3.078125
| 3
|
[] |
no_license
|
import gensim, os
from joblib import load
# load models
count_vect = load('./models/CountVectorizer.joblib')
tfidf_transformer = load('./models/TfidfTransformer.joblib')
clf = load('./models/bestModel.joblib')
def clean_doc(text, vocab):
""" turn a text doc into clean tokens
Args:
text (Pandas Series): Uncleaned text
vocab ([type]): vacab from tranining data
Returns:
ndarray: cleaned text
"""
# to lower case and get rid of punctuations using regex
# further clean the text data, keeping own words in the training vocab
if type(text) == str: # single string
text = text.lower()
text = text.replace('[^\w\s]', '')
return ' '.join([w for w in text.split() if w in vocab])
else: # list of strings
text = pd.Series(text)
text = text.str.lower()
text = text.str.replace('[^\w\s]', '')
cleaned = []
for x in text.values:
cleaned.append(' '.join([w for w in x.split() if w in vocab]))
return np.array(cleaned)
def load_doc(filename):
""" load doc into memory
Args:
filename (String): file path
Returns:
list: list of Strings
"""
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
def predict(text, count_vect, tfidf_transformer, clf):
"""Predict sentiment
Args:
text (String): Input text
count_vect (sklearn transformer): turn into tokens
tfidf_transformer (sklearn transformer): apply tfdif
clf (sklearn model): linear svm model
"""
X_test_counts = count_vect.transform(text)
X_test_tfidf = tfidf_transformer.transform(X_test_counts)
y_pred = clf.predict(X_test_tfidf)
return y_pred
def sentiment(text):
if len(text):
# load vocab dictionary
vocab_filename = './data/cleaned/vocab.txt'
vocab = load_doc(vocab_filename)
vocab = set(vocab.split())
# clean test data
cleaned = clean_doc(text, vocab)
print('predicte here')
return predict([cleaned], count_vect, tfidf_transformer, clf)[0]
return None
| true
|
2d6475ca9dc30b0e546a6b990a2aa587fa91ef99
|
Python
|
SmileXDrus/Selenium-Python_autotests
|
/ExplicitWaitExp.py
|
UTF-8
| 897
| 2.640625
| 3
|
[] |
no_license
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium import webdriver
#Настройка
opt = webdriver.ChromeOptions()
opt.add_experimental_option('w3c', False)
#Подключение
dr = webdriver.Chrome(chrome_options=opt)
dr.get("http://suninjuly.github.io/wait2.html")
# говорим Selenium проверять в течение 5 секунд, пока кнопка не станет кликабельной
button = WebDriverWait(dr, 5).until(
ec.element_to_be_clickable((By.ID, "check"))
)
button.click()
#Проверка
if dr.find_element_by_id("check_message").text == "Проверка прошла успешно!":
print("Верно!")
dr.close()
dr.quit()
else:
print("Ошибка!")
dr.close()
dr.quit()
| true
|
9317d3b716d5a43a940bcee25f1088db4604c88f
|
Python
|
kapootot/projects
|
/py_scripts/src/sort.py
|
UTF-8
| 237
| 3.40625
| 3
|
[] |
no_license
|
a = [7, 1, 3, 5, 2, 8]
# a = [1, 7, 3, 5, 2, 8]
def my_sort(arr):
min_val = arr[0]
max_val = [arr[len(arr)]-1]
i = 0
while i < len(a):
if a[i] < min_val:
a[a.index(min_val)] = a[i]
a[i]
| true
|
8bfbbb0d8870921f91583940dbac85cc6b0dae75
|
Python
|
AurelC2G/practice
|
/googlecodejam/2018/1B/Transmutation/code.py
|
UTF-8
| 963
| 2.640625
| 3
|
[] |
no_license
|
import sys
import itertools
import math
import collections
import functools
sys.setrecursionlimit(10000)
def inputInts():
return map(int, raw_input().split())
def consumeOne(what):
if G[what]:
G[what] -= 1
return True
if reserved[what]:
# we are trying to produce this, there must be a loop
return False
Tr = transforms[what]
if Tr[0] == what or Tr[1] == what:
# this transformation just wastes resources
return False
reserved[what] = True
out = consumeOne(Tr[0]) and consumeOne(Tr[1])
reserved[what] = False
return out
T = int(raw_input())
for testId in range(T):
M = int(raw_input())
transforms = []
for i in xrange(M):
a,b = inputInts()
transforms.append([a-1, b-1])
G = inputInts()
reserved = [False for i in xrange(M)]
res = 0
while consumeOne(0):
res += 1
print "Case #{:d}: {:d}".format(testId+1, res)
| true
|
d3b1bc2a52cbce04f477570ffeab8eac2394ec24
|
Python
|
taesookim0412/Python-Algorithms
|
/2020_/05/LeetCode/11M_NumberOfIslands.py
|
UTF-8
| 1,766
| 3.671875
| 4
|
[] |
no_license
|
from typing import List
#05-21-2020_
#Runtime: 156 ms, faster than 41.77% of Python3 online submissions for Number of Islands.
#Memory Usage: 15 MB, less than 9.40% of Python3 online submissions for Number of Islands.
#https://leetcode.com/problems/number-of-islands
#Interesting finds:
#The order for the recursive steps don't matter you could do UDLR, LDRU, RLUD, whichever,
#and it won't matter
#Also the "-1" was unused.
class Solution:
grid = [[]]
#O(n^2) runtime
#Constant space
def numIslands(self, grid: List[List[str]]) -> int:
self.grid = grid
islands = 0
print(grid)
for i, a in enumerate(grid):
for j, b in enumerate(grid[i]):
if grid[i][j] == "1":
self.traverseIsland(i, j)
islands += 1
return islands
def traverseIsland(self, i: int, j: int):
grid = self.grid
#oob
if i < 0 or i == len(grid): return
if j < 0 or j == len(grid[i]): return
if grid[i][j] != "1": return
grid[i][j] = "-1"
#uldr (ccw)
self.traverseIsland(i-1, j)
self.traverseIsland(i, j-1)
self.traverseIsland(i+1, j)
self.traverseIsland(i, j+1)
#udlr
# self.traverseIsland(i - 1, j)
# self.traverseIsland(i + 1, j)
# self.traverseIsland(i, j - 1)
# self.traverseIsland(i, j + 1)
def strList(self, list: List[int]) -> List[str]:
newList = []
for i, a in enumerate(list):
newList.append(str(a))
return newList
def __init__(self):
print(self.numIslands([self.strList([1, 1, 0, 1]), self.strList([1, 1, 0, 0]), self.strList([1, 1, 0, 1])]))
Solution()
| true
|
a795fd09015b1840afdba949021b23b83cb0d24e
|
Python
|
karlos88/learn-python-the-hard-way
|
/ex05/ex05.py
|
UTF-8
| 1,575
| 3.796875
| 4
|
[] |
no_license
|
name = 'Zed A. Shaw'
age = 35 # not a lie
height = 74 # inches
height_cm = height * 2.54
weight = 180 # lbs
weight_kg = weight * 0.454
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print("Let's talk about %s." % name)
print("He's %d inches tall." % height)
print("He's %d pounds heavy." % weight)
print("Actually that's not too heavy.")
print("He's got %s eyes and %s hair." % (eyes, hair))
print("His teeth are usually %s depending on the coffee." % teeth)
# this line is tricky, try to get it exactly right
print("If I add %d, %d, and %d I get %d." % (
age, height, weight, age + height + weight))
print("Printing a string - %s" % "test")
print("Printing an int - %d" % 10)
print("Printing a float with 3 points decimal precision- %.3f" % 10)
print("Printing a HEX 10 - %X" % 10)
# python format characters
# d Signed integer decimal
# i Signed integer decimal
# o Unsigned octal
# u Unsigned decimal
# x Unsigned hexadecimal (lowercase)
# X Unsigned hexadecimal (uppercase)
# e Floating point exponential format (lowercase)
# E Floating point exponential format (uppercase)
# f Floating point decimal format
# F Floating point decimal format
# g Same as "e" if exponent is greater than -4 or less than precision, "f" otherwise
# G Same as "E" if exponent is greater than -4 or less than precision, "F" otherwise
# c Single character (accepts integer or single character string)
# r String (converts any python object using repr())
# s String (converts any python object using str())
# % No argument is converted, results in a "%" character in the result
| true
|
89d660083c67f616966ad5bfc80d4f91871c1f75
|
Python
|
chris-r-harwell/dsp
|
/Prework_5.1/q24StringFrontBack.py
|
UTF-8
| 832
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/env python3
import os
import sys
# Complete the function below.
def front_back(s1, s2):
"""
INPUT: two strings
OUTPUT: a-front + b-front + a-back + b-back
where -front and -back are the strings split in half with
an extra char in front for those with an odd number of characters.
"""
length_of_a = len(s1)
length_of_b = len(s2)
middle_of_a = length_of_a // 2
if length_of_a % 2 != 0:
middle_of_a += 1
a_front = s1[:middle_of_a]
a_back = s1[middle_of_a:]
middle_of_b = length_of_b // 2
if length_of_b % 2 != 0:
middle_of_b += + 1
b_front = s2[:middle_of_b]
b_back = s2[middle_of_b:]
return a_front + b_front + a_back + b_back
s1 = sys.stdin.readline().strip()
s2 = sys.stdin.readline().strip()
res = front_back(s1, s2)
print(res)
| true
|
8ecd4cc877f53debe45e3cdee9c6d46a9c59f12f
|
Python
|
rsoundrapandi/pythonProject1
|
/Ifelse.py
|
UTF-8
| 1,460
| 4.125
| 4
|
[] |
no_license
|
#
# def Leap(year):
# if (year % 4) == 0:
# if (year % 100) == 0:
# if (year % 400) == 0:
# print("leap year")
# else:
# print("Not a leap year")
# else:
# print("Not a leap year")
# else:
# print("Not a leap year")
#
# Leap(2000)
#
# def find():
# fruits = ["apple", "orange", "cherry"]
# for i in fruits:
# if i =='orange':
# print(i)
#
# else:
# print("Test")
#
# return i
#
# find()
# for i in range(0,100):
# if i > 1:
# # check for factors
# for n in range(2, i):
# if i % n == 0:
# print(i, "is not a prime number")
# print(i, "times", i // i, "is", i)
# break
# else:
# print(i, "is a prime number")
#
# # if input number is less than
# # or equal to 1, it is not prime
# else:
# print(i, "is not a prime number")
# Python program to display all the prime numbers within an interval
# lower = 1
# upper = 100
#
# print("Prime numbers between", lower, "and", upper, "are:")
#
# for num in range(lower, upper + 1):
# # all prime numbers are greater than 1
# if num > 1:
# for i in range(2, num):
# if (num % i) == 0:
# break
# else:
# print(num)
x= input("Enter your value: " )
if(x%2==0):
print("even")
else:
print("odd")
| true
|
249c399ed616febc28cca89ea124531b6117f6dc
|
Python
|
smpio/gunicorn
|
/gunicorn/metrics/store.py
|
UTF-8
| 1,996
| 2.734375
| 3
|
[
"HPND",
"MIT"
] |
permissive
|
import time
import pickle
from collections import namedtuple
class MetricsStore(object):
Item = namedtuple('Item', ['metric_name', 'metric_type', 'tags', 'value'])
def __init__(self, log):
self.data = {}
self.log = log
def __getstate__(self):
return self.data
def __setstate__(self, state):
self.data = state
self.log = None
def clear(self):
self.data = {}
def add(self, metric_name, metric_type, value, **tags):
tags_hash = hash(frozenset(tags.items()))
self.data[metric_name, tags_hash] = (metric_type, value, tags)
def add_worker(self, worker):
try:
stats = pickle.loads(worker.tmp.read())
except EOFError:
return
except Exception:
self.log.exception('Failed to load worker stats')
return
if stats.request_ended_at >= stats.request_started_at:
# worker is not handling request now
idle_time = time.time() - stats.request_ended_at
else:
# worker is handling a request now
idle_time = 0
idle_time_sum = stats.idle_time_sum + idle_time
self.add('idle_time_seconds', 'gauge', idle_time, pid=worker.pid)
self.add('idle_time_seconds_sum', 'summary', idle_time_sum, pid=worker.pid)
def __iter__(self):
for (metric_name, _), (metric_type, value, tags) in self.data.items():
yield self.Item(metric_name, metric_type, tags, value)
def prometheus_iter(self):
for item in self:
tags_str = ','.join('{}="{}"'.format(*kv) for kv in item.tags.items())
if tags_str:
tags_str = '{%s}' % tags_str
yield '# TYPE {} {}'.format(item.metric_name, item.metric_type)
yield '{}{} {}'.format(item.metric_name, tags_str, item.value)
def prometheus_dump(self):
return '\n'.join(line for line in self.prometheus_iter()).encode('utf-8') + b'\n'
| true
|
6bc696866e5b064da851e32fcfefbac270e06d8c
|
Python
|
ib407ov/LABS---11
|
/Завдання 1.py
|
UTF-8
| 3,169
| 3.765625
| 4
|
[] |
no_license
|
#
# конструктор без параметрів, конструктор з параметрами, конструктор копіювання;
# введення/виведення елементів вектора;
# визначення довжини вектора;
# нормування вектора;
# порівняння з іншим вектором;
# перевантаження операторів + (додавання векторів), – (віднімання векторів), * (знаходження скалярного добутку).
#
import math
class TVektor2D:
def __init__(self, *args):
arguments_count = len(args)
if arguments_count == 0:
self.x1 = 0
self.x2 = 0
self.y1 = 1
self.y2 = 1
elif arguments_count == 1:
self.x1 = args[0]
self.x2 = 0
self.y1 = 0
self.y2 = 0
elif arguments_count == 2:
self.x1 = args[0]
self.x2 = args[1]
self.y1 = 0
self.y2 = 0
elif arguments_count == 3:
self.x1 = args[0]
self.x2 = args[1]
self.y1 = args[2]
self.y2 = 0
else:
self.x1 = args[0]
self.x2 = args[1]
self.y1 = args[2]
self.y2 = args[3]
@property
def function(self):
print('firs vektor = [{0}, {1}]'.format(self.x1, self.x2))
print('second vektor = [{0}, {1}]'.format(self.y1, self.y2))
@property
def lehghtVekror(self):
return print(math.fabs(math.sqrt(((self.x1 - self.y1) ** 2 ) + ((self.x2 - self.y2) ** 2))))
@property
def normalizationvektor(self):
z = (math.pow(self.x1, 2) + math.pow(self.x2, 2) + (self.y1 ** 2) + (self.x2 ** 2))
print('x1 = ', self.x1/z)
print('x2 = ', self.x2/z)
print('y1 = ', self.y1/z)
print('y2 = ', self.y2/z)
@property
def comparison(self):
A = math.sqrt(math.pow(self.x1, 2) + math.pow(self.x2, 2))
B = math.sqrt(math.pow(self.y1, 2) + math.pow(self.y2, 2))
A = math.fabs(A)
B = math.fabs(B)
return (A, B)
def __add__(self, other):
return TVektor2D(self.x1 + other,
self.x2 + other,
self.y1 + other,
self.y2 + other)
def __sub__(self, other):
return TVektor2D(self.x1 - other,
self.x2 - other,
self.y1 - other,
self.y2 - other)
def __mul__(self, other):
return TVektor2D(self.x1 * other,
self.x2 * other,
self.y1 * other,
self.y2 * other)
g = TVektor2D(5, 8, 3, 2)
# ------2
g.function
# ------3
g.lehghtVekror
# ------4
g.normalizationvektor
# ------5
print(g.comparison)
p = g+10
# ------6
print(p.function)
p = g - 10
# ------7
print(p.function)
# ------8
p = g * 2
print(p.function)
| true
|
84d02f0f17bc5f92c9b010dab07824d17774a1c5
|
Python
|
yigalirani/leetcode
|
/20190501_google_phone_interview_question.py
|
UTF-8
| 1,473
| 2.90625
| 3
|
[] |
no_license
|
''' sorry attempt to automaticalt convert to iteration. idea: can this
def get_alloc_f()
def flat_runner(input,f):
stack=[]
` ans.append(input)
cache={}
while(stack):
top=stack.pop()
ans = cache.get(top,None)
if ans:
if len(stack)==0:
return ans
continue
def get_alloc_iter(n,k):
stack=[]
` ans.append((n,k))
cache={}
while(stack):
top=stack.pop()
if len(stack)==0 and top in cache:
return stack[top]
'''
def get_alloc2(n,k):
if k==1:
return [[n]]
ans=[]
for i in range(0,n+1):
for x in get_alloc2(n-i,k-1):
ans.append(x+[i])
return ans
def get_alloc(n,k):
return [tuple(x) for x in get_alloc2(n,k)]
def get_alloc_iter(n,k):
cur=[(n,k,[])]
while True:
nxt=[]
all_done=True
for n,k,r in cur:
if k==0:
nxt.append((n,k,r))
continue
all_done=False
#print ('>>>>>',n,k,r)
for i in range(n+1):
#print(i)
if k>1 or i==n:
nxt.append((n-i,k-1,r+[i]))
if all_done:
return [tuple(x[2]) for x in nxt]
cur=nxt
a1=set(get_alloc(40,5))
a2=set(get_alloc_iter(20,5))
#print(a1)
print(a2)
print(len(a1))
print (a1.difference(a2))
| true
|
f53df7fcd50e5e3c37054fc3f17f343aa9935c6e
|
Python
|
mangalagb/Leetcode
|
/Medium/ArrayNesting.py
|
UTF-8
| 1,334
| 3.859375
| 4
|
[] |
no_license
|
# A zero-indexed array A of length N contains all integers from 0 to N-1. Find and return
# the longest length of set S, where S[i] = {A[i], A[A[i]], A[A[A[i]]], ... }
# subjected to the rule below.
#
# Suppose the first element in S starts with the selection of element A[i] of index = i,
# the next element in S should be A[A[i]], and then A[A[A[i]]]… By that analogy,
# we stop adding right before a duplicate element occurs in S.
#
# The tricky part here is that the numbers always form a ring, and no matter which number of this
# ring you start with total count will always be same, so no need to step on it one more time......
class Solution(object):
def arrayNesting(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
count = 0
for i in range(0, len(nums)):
index = i
while True:
current = nums[index]
if current == -1:
result = max(result, count)
count = 0
break
else:
count += 1
nums[index] = -1
index = current
return result
my_sol = Solution()
A = [5, 4, 0, 3, 1, 6, 2]
print(my_sol.arrayNesting(A)) #4
| true
|
7a276883ba18a820c8258b0bb6d1f168d6e6e25b
|
Python
|
PyMarcus/Jogo_da_forca
|
/game/baseGame.py
|
UTF-8
| 693
| 3.703125
| 4
|
[] |
no_license
|
from presents import Start
from colorama import Fore
class Base:
"""
Base of the game
"""
def __init__(self, nome, life = 100):
self.nome = nome
self.life = life
def setLife(self, lif):
self.life -= lif
return print(f"Player {str(self.nome)} -- life: {str(round(self.life))} %")
def game(self):
"""
method called for each descount in the life
"""
print()
print(Fore.RED + f"Player {str(self.nome)} -- life: {str(self.life)} %")
inicio = Start()
valor = inicio.startGame()
return valor
if __name__ == '__main__':
ok = Base("Marcus")
print(str(ok))
ok.game()
| true
|
3a8b10b41f75e8bfdded430c80fe77b58fff683b
|
Python
|
xiaohai2016/StatRank
|
/analysis.py
|
UTF-8
| 2,412
| 2.859375
| 3
|
[] |
no_license
|
"""Data anlysis and visualization script(s)"""
from __future__ import print_function
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
import click
from data_loader import load_microsoft_dataset
def tsne_fit(feature_vectors, n_components=2, verbose=1, perplexity=10, n_iter=300):
"""Use t-SNE algorithm to perform dimension reduction for feature vectors"""
tsne = TSNE(n_components=n_components, verbose=verbose, perplexity=perplexity, n_iter=n_iter)
return tsne.fit_transform(feature_vectors)
def pca_fit(feature_vectors, n_components=2):
"""Use PCA algorithm to perform dimension reduction for feature vectors"""
pca = PCA(n_components=n_components)
return pca.fit_transform(feature_vectors)
@click.command()
@click.option('--dataset', default='MQ2007', help='MQ2007 or MQ2008')
@click.option('--fold', default=1, help='1 to 5')
@click.option('--algo', default='pca', help='tsne or pca')
@click.option('--plot-count', default=10)
@click.option('--batch-count', default=10)
def plot_analysis(dataset, fold, algo, plot_count, batch_count):
"""Use t-SNE/PCA algorithm to plot feature vectors"""
path = f"resources/{dataset}/Fold{fold}/train.txt"
data = load_microsoft_dataset(path, feature_count=46)
data_len = len(data)
if plot_count < 0:
plot_count = (data_len // batch_count) + 1
for tries in range(plot_count):
all_rel_scores = None
all_feature_vectors = None
for idx in range(batch_count):
data_idx = idx + tries * batch_count
if data_idx >= data_len:
break
_, rel_scores, feature_vectors = data[data_idx]
if all_rel_scores is None:
all_rel_scores = rel_scores.astype(int)
all_feature_vectors = feature_vectors
else:
all_rel_scores = np.concatenate((all_rel_scores, rel_scores.astype(int)), axis=0)
all_feature_vectors = np.concatenate((all_feature_vectors, feature_vectors), axis=0)
if all_rel_scores is None:
break
results = tsne_fit(all_feature_vectors) if algo == "tsne" else pca_fit(all_feature_vectors)
plt.figure(figsize=(16, 10))
sns.scatterplot(
x=results[:, 0], y=results[:, 1],
hue=all_rel_scores,
size=all_rel_scores,
legend="full"
)
plt.show()
# pylint: disable=no-value-for-parameter
if __name__ == '__main__':
plot_analysis()
| true
|
62f49dc02c5ee44296cc73bd2b1a0f652329e7f5
|
Python
|
konstantinAriel/RayTracer
|
/scr/WignerDistrib/getTimeDate.py
|
UTF-8
| 192
| 3.109375
| 3
|
[] |
no_license
|
import datetime
now = datetime.datetime.now()
print ("Current year: %d" % now.year)
print ("Current month: %d" % now.month)
print ("Current day: %d" % now.day)
print ('Current day:', now.day)
| true
|
f0cb75586236360a0511dccb7a340eb19a79a953
|
Python
|
Dupeydoo/DRAG
|
/DRAGProj/forms/custominputform.py
|
UTF-8
| 4,011
| 2.875
| 3
|
[] |
no_license
|
from django import forms
from DRAG.datacontext import context
"""
A module containing the form class and recipes to make the custom beat input form.
Author:
James
Version:
1.0.0
See:
forms.Form,
DRAGProj.views
"""
instrument_choices = (
(1, "Hi-Hat"),
(2, "Hi-Hat and Kick"),
(3, "Hi-Hat and Snare"),
(4, "Snare Drum"),
(5, "Kick Drum"),
(6, "Kick and Snare"),
(7, "High-Tom Drum"),
(8, "Kick and High-Tom"),
(9, "Hi-Hat and High-Tom"),
(10, "High-Tom and Snare"),
(11, "Play Nothing"),
(12, "High-Hat, Kick and Snare"),
(13, "Hi-Hat, Kick and High-Tom"),
(14, "High-Tom, Snare and Kick"),
(15, "Hi-Hat, High-Tom and Snare"),
(16, "All Instruments")
)
"""
instrument_choices (:obj:`tuple` of :obj:`tuple`): The choices to be visible on the input drop-downs.
"""
genres = [
("Rock", "Rock"),
("Jazz", "Jazz"),
("Blues", "Blues")
]
"""
genres (:obj:`list` of :obj:`tuple`): The choices of genre for the input drop-down.
"""
class CustomInputForm(forms.Form):
"""
The CustomInputForm class has fields for each beat of a 2 bar crotchet 4:4 time_sig track
where the user may create a custom track from a selection of instruments.
Attributes:
beat + number (:obj:`TypedChoiceField`): Each beat has its own drop-down object using a HTML5 Select.
bpm (:obj:`IntegerField`): An object representing a HTML5 number input for the bpm.
genre (:obj:`ChoiceField`): An object representing a HTML5 drop-down for choosing track genre.
"""
beat_one = forms.TypedChoiceField(choices=instrument_choices, coerce=int,
widget=forms.Select(attrs={"class": "form-control cb"}))
beat_two = forms.TypedChoiceField(choices=instrument_choices, coerce=int,
widget=forms.Select(attrs={"class": "form-control cb"}))
beat_three = forms.TypedChoiceField(choices=instrument_choices, coerce=int,
widget=forms.Select(attrs={"class": "form-control cb"}))
beat_four = forms.TypedChoiceField(choices=instrument_choices, coerce=int,
widget=forms.Select(attrs={"class": "form-control cb"}))
beat_five = forms.TypedChoiceField(choices=instrument_choices, coerce=int,
widget=forms.Select(attrs={"class": "form-control cb"}))
beat_six = forms.TypedChoiceField(choices=instrument_choices, coerce=int,
widget=forms.Select(attrs={"class": "form-control cb"}))
beat_seven = forms.TypedChoiceField(choices=instrument_choices, coerce=int,
widget=forms.Select(attrs={"class": "form-control cb"}))
beat_eight = forms.TypedChoiceField(choices=instrument_choices, coerce=int,
widget=forms.Select(attrs={"class": "form-control cb"}))
bpm = forms.IntegerField(max_value=250, min_value=60,
widget=forms.NumberInput(attrs={"id": "bpm", "name": "bpm"}))
genre = forms.ChoiceField(choices=genres, widget=forms.Select(attrs={"class": "form-control", "id": "genre"}))
def clean(self):
cleaned_data = super().clean()
bpm = cleaned_data["bpm"]
beats = 0
for key, value in cleaned_data.items():
if "beat" in key:
if value < 1 or value > 16 or not isinstance(value, int):
raise forms.ValidationError(
str(key) + " was given an incorrect value."
)
beats += 1
if beats < context["time_signature"]:
raise forms.ValidationError(
"You must provide all the beats of the song."
)
if bpm > 250 or bpm < 60:
raise forms.ValidationError(
"BPM must be between 60 and 250."
)
| true
|
8134355096ebc5c06860ffc87ead0c07350a83ea
|
Python
|
Bharanij27/bharanirep
|
/Pys84.py
|
UTF-8
| 53
| 2.953125
| 3
|
[] |
no_license
|
import math
n=float(input())
c=math.ceil(n)
print(c)
| true
|
df1cc09629ae505580adcc27e0f7cf1005d2413d
|
Python
|
rcarbal/Bible-it-Python-server
|
/constants/periods.py
|
UTF-8
| 1,936
| 2.625
| 3
|
[] |
no_license
|
GENERAL_BIBLICAL_PERIODS = [
# Periods of the Bible from Adams synchronological chart
# The Anti Diluvian Period
# {
# 'position': 1,
# 'name': 'The Anti Diluvian',
# 'first_year': -4004,
# 'last_year': -2348
# },
{
'position': 1,
'name': 'The Patriarchal Age',
'first_year': -4004,
'last_year': -1491
},
{
'position': 2,
'name': 'The Mosaic Age',
'first_year': -1491,
'last_year': 33
},
{
'position': 3,
'name': 'The Christian Era',
'first_year': 33,
'last_year': 2019
}
# {
# 'position': 1,
# 'name': 'Patriarchs',
# 'first_year': -2200,
# 'last_year': -1850
# },
# {
# 'position': 2,
# 'name': 'Sojourn',
# 'first_year': -1850,
# 'last_year': -1450
# },
# {
# 'position': 3,
# 'name': 'Wilderness',
# 'first_year': -1450,
# 'last_year': -1400
# },
# {
# 'position': 4,
# 'name': 'Conquest',
# 'first_year': -1400,
# 'last_year': -1350
# },
# {
# 'position': 5,
# 'name': 'Judges',
# 'first_year': -1350,
# 'last_year': -1050
# },
# {
# 'position': 6,
# 'name': 'United Monarchy',
# 'first_year': -1050,
# 'last_year': -930
# },
# {
# 'position': 7,
# 'name': 'Divided Monarchy',
# 'first_year': -930,
# 'last_year': -587
# },
# {
# 'position': 8,
# 'name': 'Exile',
# 'first_year': -587,
# 'last_year': -539
# },
# {
# 'position': 9,
# 'name': 'Pos-exilic period',
# 'first_year': -539,
# 'last_year': -400
# }
]
| true
|
614e13fef86fc5528ed43679e0a1112f14f6d09c
|
Python
|
HarrysDev1ce/ccc
|
/2019/j3.py
|
UTF-8
| 839
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
repeated_count = int(input())
encoded_lines = []
for _ in range(repeated_count):
line = input()
pairs = []
# Position in line
i = 0
# We use a `while` loop as we may increment `i` within the loop body.
while i < len(line):
repeated_count = 0
c = line[i]
# Increment the position and the number of repeated characters while
# we're not out of range and the character is the same...
while i < len(line) and line[i] == c:
repeated_count += 1
i += 1
# And then append the number of times the character was repeated,
# followed by the character.
pairs.append(f"{repeated_count} {c}")
# Then add the encoded line.
encoded_lines.append(" ".join(pairs))
# Finally, print all encoded lines.
print("\n".join(encoded_lines))
| true
|
e24d514b88f33042a27ea20a98fce8bbad7a4704
|
Python
|
medeiroscwb/curso_em_video_python
|
/A 8 - Utilizando Módulos.py
|
UTF-8
| 1,523
| 3.6875
| 4
|
[] |
no_license
|
'''
Utilizando Módulos:
Módulos são programas feitos em python que trazem uma série de funcionalidades e soluções
que já foram testadas e aprovadas pela comunidade Python.
Podemos importar um módulo, ou biblioteca utilizando o comando IMPORT. É convencionado
importar os módulos necessários nas primeiras linahs do programa.
>import math (importa a biblioteca "math")
>from math import pi (de dentro da biblioteca "math", importa a função "pi")
Bibllioteca math:
ceil (arredonda para cima)
floor (arredonda para baixo)
trunc (truncar, elimina da virgula para frente)
pow (potência)
sqrt (squareroot, raiz quadrada)
factorial (fatoração)
'''
#import math
#num = 9
#raiz = math.sqrt(num) (importando dessa forma, é preciso colocar "math." antes da função)
#print(raiz)
#>3.0
#from math import sqrt
#num = 9
#raiz = sqrt(num) (dessa forma não é preciso referenciar o modulo MATH)
#print(raiz)
#>3.0
'''Podemos consultar um manual de instruções dos modulos distribuídos em
https://docs.python.org/3/'''
import random
num = random.random()
print(num)
> numero racional aleatório entre 0 e 1
num = random.randint(1,1000)
print(num)
> numero inteiro aleatório entre 0 e 1000
'''
digite import, segure ctrl e aperte espaço para obter a lista dos módulos padrão,
que já vem com a instalação do python + módulos instalados
Além dos módulos padrão, podemos baixar e até mesmo publicar módulos em
pypi.org
Desafio: Ex 16 ao 21'''
| true
|
767539cf8d4b5a5e7fc020978dfea770eeb04848
|
Python
|
jiamin0329/sjtuaero_post
|
/cfdppparser.py
|
UTF-8
| 16,758
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/local/bin/python
#######################################################
# File description
#######################################################
# This class is used to parse infos produced in CFD++.
# Get toltal/inviscid/viscous drag, lift and moment
# from following files:
# 1. case.log => get ref values (pressure, temperature,
# velocities, etc.)
# get boundary infos
# 2. mcfd.info1 => get total/inv/vis forces and moments
# 3. infout1f.inp => get ref geom values and alpha
# 4. infout1f.out => will be used if it is a cl-driver case
#######################################################
# Date Author Comment
# 27-Aug-2017 Jiamin Xu Initial creation
# 28-Oct-2017 Jiamin Xu Add Validate()
# 06-Jan-2018 Jiamin Xu Add symm plane type
# 07-Jan-2018 Jiamin Xu Add FindWing()
#######################################################
# Import module
#######################################################
import os
import math
from enum import Enum
#######################################################
# Constants
#######################################################
const_R = 287.0
const_gamma = 1.4
class SymmetryPlaneType(Enum):
none = 0
xyPlane = 1
xzPlane = 2
#######################################################
# Class
#######################################################
class CFDppParser:
def __init__(self, caseName):
'''Initialize function'''
# input files
self.caseName = caseName
self.logFileName = caseName + "/" + caseName + ".log"
self.info1FileName = caseName + "/mcfd.info1"
self.infinFileName = caseName + "/infout1f.inp"
self.infoutFileName = caseName + "/infout1f.out"
## validate input files
self.__Validate()
# symmetry plane type
self.symmPlane = SymmetryPlaneType.none
self.indexLift = -99
self.indexDrag = -99
self.indexSide = -99
# ref values
self.refPres = 0.0
self.refTemp = 0.0
self.refVels = [0.0, 0.0, 0.0]
self.refVmag = 0.0
self.refDens = 0.0
self.refMach = 0.0
# bc infos
self.numBounds = 0
self.noSlipWalls = []
# ref geom values
self.alpha = 0.0
self.refArea = 0.0
self.refLength = 0.0
self.refOrign = [0.0, 0.0, 0.0]
# output results
self.force_tol = [0.0, 0.0, 0.0]
self.force_inv = [0.0, 0.0, 0.0]
self.force_vis = [0.0, 0.0, 0.0]
self.moment = [0.0, 0.0, 0.0]
self.div_moment = 0.0
# center of pressure
self.xCenterOfPressure = 0.0
# boundary id of wing
self.idUpper = -1
self.idLower = -1
pass
## Accessor methods
def GetCaseName(self):
'''Return case name'''
return self.caseName
def GetAlpha(self):
'''Return angle of attack'''
return self.alpha
def GetRefArea(self):
'''Return reference area'''
return self.refArea
def GetNumBounds(self):
'''Return number of boundaries'''
return self.numBounds
def GetNoSlipWalls(self):
'''Return no-slip walls'''
return self.noSlipWalls
def GetForceTol(self):
'''Return total forces
[0] - x force
[1] - y force
[2] - z force'''
return self.force_tol
def GetForceInv(self):
'''Return inviscid forces
[0] - x inviscid force
[1] - y inviscid force
[2] - z inviscid force'''
return self.force_inv
def GetForceVis(self):
'''Return viscous forces
[0] - x viscous force
[1] - y viscous force
[2] - z viscous force'''
return self.force_vis
def GetMoment(self):
'''Return moments
[0] - x moment
[1] - y moment
[2] - z moment'''
return self.moment
def GetRefPres(self):
'''Return reference pressure'''
return self.refPres
def GetRefTemp(self):
'''Return reference temperature'''
return self.refTemp
def GetRefVels(self):
'''Return reference velocity
[0] - x velocity
[1] - y velocity
[2] - z velocity'''
return self.refVels
def GetRefVmag(self):
'''Return reference velocity magnitude'''
return self.refVmag
def GetRefDens(self):
'''Return reference density'''
return self.refDens
def GetMa(self):
'''Return freestream mach number'''
return self.refMach
def GetCoeffLift(self):
'''Return lift coefficients
[0] - total lift coefficient
[1] - inviscid lift coefficient
[2] - viscous lift coefficient'''
Cl = [0.0, 0.0, 0.0]
coeff_div = 0.5*self.refDens*self.refVmag*self.refVmag*self.refArea
# lift coeff - total
Cl[0] = (self.force_tol[self.indexLift]*math.cos(math.radians(self.alpha)) - self.force_tol[self.indexDrag]*math.sin(math.radians(self.alpha)))/coeff_div
# lift coeff - inviscid
Cl[1] = (self.force_inv[self.indexLift]*math.cos(math.radians(self.alpha)) - self.force_inv[self.indexDrag]*math.sin(math.radians(self.alpha)))/coeff_div
# lift coeff - viscous
Cl[2] = (self.force_vis[self.indexLift]*math.cos(math.radians(self.alpha)) - self.force_vis[self.indexDrag]*math.sin(math.radians(self.alpha)))/coeff_div
return Cl
def GetCoeffDrag(self):
'''Return drag coefficients
[0] - total drag coefficient
[1] - inviscid drag coefficient
[2] - viscous drag coefficient'''
Cd = [0.0, 0.0, 0.0]
coeff_div = 0.5*self.refDens*self.refVmag*self.refVmag*self.refArea
# drag coeff - total
Cd[0] = (self.force_tol[self.indexLift]*math.sin(math.radians(self.alpha)) + self.force_tol[self.indexDrag]*math.cos(math.radians(self.alpha)))/coeff_div
# drag coeff - invicid
Cd[1] = (self.force_inv[self.indexLift]*math.sin(math.radians(self.alpha)) + self.force_inv[self.indexDrag]*math.cos(math.radians(self.alpha)))/coeff_div
# drag coeff - viscous
Cd[2] = (self.force_vis[self.indexLift]*math.sin(math.radians(self.alpha)) + self.force_vis[self.indexDrag]*math.cos(math.radians(self.alpha)))/coeff_div
return Cd
def GetLDRatio(self):
'''Return Lift/Drag ratio'''
cl = self.GetCoeffLift()
cd = self.GetCoeffDrag()
ld = cl[0]/cd[0]
return ld
def GetCoeffMoment(self):
'''Return moment coefficient'''
coeff_div = 0.5*self.refDens*self.refVmag*self.refVmag*self.refArea*self.refLength
Cm = ((self.moment[self.indexSide] + self.force_tol[self.indexLift]*self.refOrign[self.indexDrag] - self.force_tol[self.indexDrag]*self.refOrign[self.indexLift]))/coeff_div
return Cm
def GetCenterOfPressure(self):
return self.moment[self.indexSide]/self.force_tol[self.indexLift]
def GetWingBoundaryIds(self):
wings = []
wings.append(self.idUpper)
wings.append(self.idLower)
return wings
## main methods
def __Validate(self):
'''Validate all dependency files'''
isValidated = True
## validate folder
if not os.path.exists(self.caseName):
isValidated = False
## validate files
## check log file
if isValidated:
if not os.path.exists(self.logFileName):
print("File not Found:" + self.logFileName)
isValidated = False
## check mcfd.info1 file
if isValidated:
if not os.path.exists(self.info1FileName):
print("File not Found:" + self.info1FileName)
isValidated = False
## check infout1f file
if isValidated:
if not os.path.exists(self.infinFileName) and \
not os.path.exists(self.infoutFileName):
print("File not Found:" + self.infinFileName + " or " + self.infoutFileName)
isValidated = False
return isValidated
def IsClDriverCase(self):
'''Check if it is a CL-driver case'''
isClDriverCase = False
try:
inpFile = open(self.logFileName)
inpTexts = inpFile.readlines()
for i in range(len(inpTexts)):
if "cldriver_controls" in inpTexts[i]:
isClDriverCase = True
break
inpFile.close()
except Exception as e:
print(e)
exit(1)
return isClDriverCase
def Process(self):
# processing datas
self.__ReadRefVals()
self.__ReadBcInfos()
self.__ReadRefGeomVals()
self.__ReadForces()
pass
def __ReadRefVals(self):
'''Read reference valuse from log file'''
try:
logFile = open(self.logFileName)
lines = logFile.readlines()
for line in lines:
# get ref pressure
if "aero_pres" in line and len(line.split()) == 3:
self.refPres = float(line.split()[2])
# get ref temperature
if "aero_temp" in line and len(line.split()) == 3:
self.refTemp = float(line.split()[2])
# get ref velocities
if "aero_u" in line and len(line.split()) == 3:
self.refVels[0] = float(line.split()[2])
if "aero_v" in line and len(line.split()) == 3:
self.refVels[1] = float(line.split()[2])
if "aero_w" in line and len(line.split()) == 3:
self.refVels[2] = float(line.split()[2])
# get velocity magnitude
for vel in self.refVels:
self.refVmag += vel*vel
self.refVmag = math.sqrt(self.refVmag)
# get ref density
self.refDens = self.refPres/const_R/self.refTemp
# get ref mach number
sound_speed = math.sqrt(const_gamma*const_R*self.refTemp)
self.refMach = self.refVmag/sound_speed
logFile.close()
except Exception as e:
print(e)
exit(1)
def __ReadBcInfos(self):
'''Get bc infos from log file'''
try:
logFile = open(self.logFileName)
lines = logFile.readlines()
# get total number of boundaries
for line in lines:
if "mbcons" in line and len(line.split()) == 3:
self.numBounds = int(line.split()[2])
break
# get no-slip walls
staIndex = 0
for line in lines:
if "# No-slip adiabatic wall" in line:
staIndex = lines.index(line, staIndex, len(lines))
noSlipWall = lines[staIndex-2].split()[1]
self.noSlipWalls.append(int(noSlipWall))
staIndex = staIndex + 1
logFile.close()
except Exception as e:
print(e)
exit(1)
def __ReadRefGeomVals(self):
'''Return aerodynamic reference values'''
try:
infFileName = ""
if self.IsClDriverCase():
infFileName = self.infoutFileName
else:
infFileName = self.infinFileName
infFile = open(infFileName)
lines = infFile.readlines()
for line in lines:
if "alpha" in line:
self.alpha = float(line.split()[1])
if "axref" in line:
self.refArea = float(line.split()[1])
if "lxref" in line:
self.refLength = float(line.split()[1])
if "xcen" in line:
self.refOrign[0] = float(line.split()[1])
if "ycen" in line:
self.refOrign[1] = float(line.split()[1])
if "zcen" in line:
self.refOrign[2] = float(line.split()[1])
if "plane" in line:
plane = line.split()[1]
if plane == "xy":
self.SymmPlaneType = SymmetryPlaneType.xyPlane
self.indexDrag = 0
self.indexLift = 1
self.indexSide = 2
else:
self.SymmPlaneType = SymmetryPlaneType.xzPlane
self.indexDrag = 0
self.indexLift = 2
self.indexSide = 1
infFile.close()
except Exception as e:
print(e)
exit(1)
def __ReadForces(self):
'''Read forces from mcfd.info1 file'''
try:
info1File = open(self.info1FileName)
lines = info1File.readlines()
numLines = len(lines)
staIndex = numLines-23*self.numBounds+1
for ibc in range(self.numBounds):
blockIndex = staIndex+ibc*23
if ibc+1 in self.noSlipWalls:
# force x
self.force_tol[0] += float(lines[blockIndex+3].split()[2])
self.force_inv[0] += float(lines[blockIndex+3].split()[3])
self.force_vis[0] += float(lines[blockIndex+3].split()[4])
# force y
self.force_tol[1] += float(lines[blockIndex+4].split()[2])
self.force_inv[1] += float(lines[blockIndex+4].split()[3])
self.force_vis[1] += float(lines[blockIndex+4].split()[4])
# force z
self.force_tol[2] += float(lines[blockIndex+5].split()[2])
self.force_inv[2] += float(lines[blockIndex+5].split()[3])
self.force_vis[2] += float(lines[blockIndex+5].split()[4])
# x/y/z moment
self.moment[0] += float(lines[blockIndex+6].split()[2])
self.moment[1] += float(lines[blockIndex+7].split()[2])
self.moment[2] += float(lines[blockIndex+8].split()[2])
info1File.close()
except Exception as e:
print(e)
exit(1)
def FindWing(self):
'''Find boundary id of wing'''
isWingUpperFound = False
isWingLowerFoudn = False
inpFile = open(self.logFileName)
inpTexts = inpFile.readlines()
for i in range(len(inpTexts)):
if "WINGUPPER" in inpTexts[i]:
self.idUpper = int(inpTexts[i].split()[0])
isWingUpperFound = True
if "WINGLOWER" in inpTexts[i]:
self.idLower = int(inpTexts[i].split()[0])
isWingLowerFound = True
return isWingUpperFound and isWingLowerFound
#######################################################
# Main Function
#######################################################
if __name__ == '__main__':
'''unit test case'''
cfdppParser = CFDppParser("sample")
cfdppParser.Process()
print( " Case Name: "+ cfdppParser.GetCaseName() )
print( " Cl-driver case: "+ str(cfdppParser.IsClDriverCase()) )
print( " Total number of bounds: "+ str(cfdppParser.GetNumBounds()) )
print( " No-slip walls: "+ str(cfdppParser.GetNoSlipWalls()) )
print( " Angle of attack: "+ str(cfdppParser.GetAlpha()) )
print( " Lift Coefficient: "+ str(cfdppParser.GetCoeffLift()) )
print( " Drag Coefficient: "+ str(cfdppParser.GetCoeffDrag()) )
print( " Moment Coefficient: "+ str(cfdppParser.GetCoeffMoment()) )
print( " L/D: "+ str(cfdppParser.GetLDRatio()) )
print( " Force: "+ str(cfdppParser.GetForceTol()) )
print( " Moment: "+ str(cfdppParser.GetMoment()) )
print( "Center of Pressure (x-dir): "+ str(cfdppParser.GetCenterOfPressure()))
| true
|
ca13f7ef900457342dd4ce38115e149065935031
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_74/289.py
|
UTF-8
| 521
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from sys import argv
def solve(v):
p = [1,1]
t = [0,0]
s = 0
while v:
x,y = v.pop(0)
d = 1+max(0,abs(p[x]-y)-t[x])
s += d
p[x] = y
t[x] = 0
t[1-x] += d
return s
f = open(argv[1],'r')
n = int(f.readline())
for x in xrange(n):
l = f.readline().split()
w = int(l.pop(0))
d = {'O':0,'B':1}
v = []
for y in xrange(w):
v.append((d[l.pop(0)],int(l.pop(0))))
print "Case #%d: %d" % (x,solve(v))
| true
|
7e0b1398e550c7bb6a50d99bf1b0e974e9fb9ca6
|
Python
|
RebeccaNacshon/ETL_Implementation
|
/ETL/program/server.py
|
UTF-8
| 3,394
| 2.921875
| 3
|
[] |
no_license
|
import logging
import socket
from database.sqlite_program import Database
from heartbeat.heartbeat import Heartbeat
from exception import corrupted_file_exception
import jsocket.tserver
import time
logger = logging.getLogger("jsocket.example_servers")
count = 1
""" custom server which inherits ThreadedServer and implementing the _processMessage(obj) method."""
class MyServer(jsocket.tserver.ThreadedServer):
""" This is a basic example of a custom ThreadedServer. """
def __init__(self):
super(MyServer, self).__init__()
self.timeout = 40.0
logger.warning("MyServer class in customServer is for example purposes only.")
def _process_message(self, obj):
""" virtual method """
if obj != '':
if obj['message'] == "new connection":
logger.info("new connection.")
else:
logger.info(obj)
class MyFactoryThread(jsocket.tserver.ServerFactoryThread):
""" This is an example factory thread, which the server factory will
instantiate for each new connection.
"""
def __init__(self):
super(MyFactoryThread, self).__init__()
self.timeout = 40.0
def _process_message(self, obj):
global count
"""
Here I catch the message recieved from the client and procees it
"""
if obj != '':
if obj['message'] == "new connection":
logger.info("new connection.")
else:
logger.info(obj)
db = Database('test.db')
"""
Definition of the database
"""
db.create_table()
""" if database already exists I retrieve the max id to increase the global parameter count for the new id
"""
max_id = db.retrieve_max_id()
print max_id
count = max_id + 1
print count
""" This is the heartbeat signal we send from server to database every minute
"""
Heartbeat(3600, db)
for json in obj['message']:
""" Here we go through the json and parse it into the files
"""
print json['filename']
file_name = json['filename']
""" I check here if the file is corrupted using the custom exception class which contains function that checks if the
file is named "corrupt" if it is exception is raised."""
try:
corrupted_file_exception.check_filename(file_name)
except :
print "corrupted file found!"
continue
""" here I insert the new file into the database
"""
db.insert(count, file_name)
print count
print file_name
count = count + 1
print count
logger.info("in process method")
if __name__ == "__main__":
server = jsocket.tserver.ServerFactory(MyFactoryThread, address=socket.gethostname(), port=8081)
server.start()
time.sleep(40)
| true
|
0c84b47c773a38fa630b0e224f1df9f6fffc25be
|
Python
|
siddharth1k/git-project
|
/Program2.py
|
UTF-8
| 121
| 4
| 4
|
[] |
no_license
|
#FIRST PROGRAM
print ("Enter any number:")
x = int(input(''))
if x>5:
print (x)
else:
print (" no Less than 5")
| true
|
adb0be95f150810514ef9c36ddc8e174e206f48f
|
Python
|
Andremarcucci98/Python_udemy_geek_university
|
/Capitulos/Secao_13/Exercicios_13/exercicio_25_agenda_turbinada/agenda.py
|
UTF-8
| 3,207
| 3.46875
| 3
|
[] |
no_license
|
from time import sleep
from typing import List
from Capitulos.Secao_13.Exercicios_13.exercicio_25_agenda_turbinada.models.contato import Contato
agenda: List[Contato]
def main() -> None:
menu()
def menu() -> None:
print('========================')
print('======== Agenda ========')
print('========================')
print('Selecione umas das opções abaixo: ')
print('---------------------------------')
print('1 - Criar um contato\n'
'2 - Remover um contato\n'
'3 - Pesquisar contato pelo nome\n'
'4 - Listar todos os contatos\n'
'5 - Listar contatos cujo nome inicia em dada letra\n'
'6 - Imprimir os aniversariantes do mês')
print('---------------------------------')
opcao: int = int(input('Digite a opção: '))
if opcao == 1:
cria_contato()
elif opcao == 2:
remove_contato()
elif opcao == 3:
pesquisa_contato()
elif opcao == 4:
lista_contatos()
elif opcao == 5:
lista_contatos_ordem_alfa()
elif opcao == 6:
aniversariantes()
else:
print('Nenhuma opção solicitada, tenha um bom dia!')
quit()
def cria_contato():
print('Opção 1 selecionada! Informe os dados do contato.')
nome: str = input('Nome do contato: ')
email: str = input('E-mail do contato: ')
telefone: str = input('Telefone: ')
conta: Contato = Contato(nome, email, telefone)
with open('exercicio_25_agenda_automatizada.txt', 'a', encoding='UTF-8') as arq_escrita:
#arq_escrita.write(f'Nome: {conta.nome} /E-mail: {conta.email} /Telefone: {conta.telefone}\n')
arq_escrita.write(f'{conta.nome} /{conta.email} /{conta.telefone}\n')
print('Conta criada com sucesso')
sleep(1)
menu()
def remove_contato():
contato_remove: str = str(input('Digite o contato a ser removido: '))
with open('exercicio_25_agenda_automatizada.txt', encoding='UTF-8') as arq_leitura:
lista = arq_leitura.readlines()
for line in lista:
if contato_remove in line:
lista.remove(line)
with open('exercicio_25_agenda_automatizada.txt', 'w', encoding='UTF-8') as new_arq_leitura:
for line in lista:
new_arq_leitura.write(line)
else:
pass
sleep(1)
menu()
def pesquisa_contato():
contato = str(input('Digite o nome do contato: ')).strip()
with open('exercicio_25_agenda_automatizada.txt', 'r', encoding='UTF-8') as arq_leitura:
lista = arq_leitura.readlines()
for linha in lista:
if contato in linha:
print(linha)
break
else:
print('Contato não encontrado! Tente novamente')
sleep(1)
menu()
def lista_contatos():
with open('exercicio_25_agenda_automatizada.txt', 'r', encoding='UTF-8') as arq_leitura:
lista = arq_leitura.readlines()
for linha in lista:
lista_palavras = linha.strip().split('/')
print(lista_palavras)
sleep(2)
menu()
def lista_contatos_ordem_alfa():
pass
def aniversariantes():
pass
if __name__ == '__main__':
main()
| true
|
ecb0248986465e57c3dc08ade4bebd96732eb0b6
|
Python
|
jakudapi/aoc2017
|
/day08-1.py
|
UTF-8
| 2,001
| 3.9375
| 4
|
[] |
no_license
|
'''
--- Day 8: I Heard You Like Registers ---
You receive a signal directly from the CPU. Because of your recent assistance with jump instructions, it would like you to compute the result of a series of unusual register instructions.
Each instruction consists of several parts: the register to modify, whether to increase or decrease that register's value, the amount by which to increase or decrease it, and a condition. If the condition fails, skip the instruction without modifying the register. The registers all start at 0. The instructions look like this:
b inc 5 if a > 1
a inc 1 if b < 5
c dec -10 if a >= 1
c inc -20 if c == 10
These instructions would be processed as follows:
Because a starts at 0, it is not greater than 1, and so b is not modified.
a is increased by 1 (to 1) because b is less than 5 (it is 0).
c is decreased by -10 (to 10) because a is now greater than or equal to 1 (it is 1).
c is increased by -20 (to -10) because c is equal to 10.
After this process, the largest value in any register is 1.
You might also encounter <= (less than or equal to) or != (not equal to). However, the CPU doesn't have the bandwidth to tell you what all the registers are named, and leaves that to you to determine.
What is the largest value in any register after completing the instructions in your puzzle input?
'''
from collections import Counter
from operator import lt, gt, eq, ne, ge, le
if __name__ == "__main__":
with open("day08input.txt", 'r') as fp:
instrucs = fp.readlines()
registers = Counter()
conditions = {"==":eq, ">":gt, "<":lt, ">=":ge, "<=":le, "!=":ne}
for instruc in instrucs:
instruc = instruc.strip().split(" ")
val_condition = int(instruc[-1])
condition = instruc[-2]
reg_test = instruc[-3]
if conditions[condition](registers[reg_test], val_condition):
if instruc[1] == "inc":
registers[instruc[0]] += int(instruc[2])
else:
registers[instruc[0]] -= int(instruc[2])
print(registers.most_common(1))
| true
|
b139575e646b96bd12500ea20aed10d697b67d4f
|
Python
|
spegesilden/kattis
|
/r2/r.py
|
UTF-8
| 70
| 3.078125
| 3
|
[] |
no_license
|
line = input().split()
r = int(line[0])
s = int(line[1])
print(2*s-r)
| true
|
71a1a6f6f1940f97da933c7fc07c66bd812765fb
|
Python
|
mauryanidhi/Fake-news-classification
|
/fake_news_machine_learning.py
|
UTF-8
| 4,865
| 3.15625
| 3
|
[] |
no_license
|
import pandas as pd
df=pd.read_csv('train.csv')
df.head()
#Drop Nan Values
df=df.dropna()
## Get the Independent Features
X=df.drop('label',axis=1)
## Get the Dependent features
y=df['label']
X.shape
y.shape
messages=X.copy()
messages['title'][1]
messages.reset_index(inplace=True)
import nltk
import re
from nltk.corpus import stopwords
nltk.download('stopwords')
### Dataset Preprocessing
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
corpus = []
for i in range(0, len(messages)):
print(i)
review = re.sub('[^a-zA-Z]', ' ', messages['title'][i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if not word in stopwords.words('english')]
review = ' '.join(review)
corpus.append(review)
corpus[3]
## Applying Countvectorizer
# Creating the Bag of Words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features=5000,ngram_range=(1,3))
X = cv.fit_transform(corpus).toarray()
X.shape
## Divide the dataset into Train and Test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
cv.get_feature_names()[:20]
cv.get_params()
count_df = pd.DataFrame(X_train, columns=cv.get_feature_names())
count_df.head()
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
from sklearn.naive_bayes import MultinomialNB
classifier=MultinomialNB()
from sklearn import metrics
import numpy as np
import itertools
classifier.fit(X_train, y_train)
pred = classifier.predict(X_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred)
plot_confusion_matrix(cm, classes=['FAKE', 'REAL'])
classifier.fit(X_train, y_train)
pred = classifier.predict(X_test)
score = metrics.accuracy_score(y_test, pred)
score
#Multinomial Classifier with Hyperparameter
classifier=MultinomialNB(alpha=0.1)
previous_score=0
for alpha in np.arange(0,1,0.1):
sub_classifier=MultinomialNB(alpha=alpha)
sub_classifier.fit(X_train,y_train)
y_pred=sub_classifier.predict(X_test)
score = metrics.accuracy_score(y_test, y_pred)
if score>previous_score:
classifier=sub_classifier
print("Alpha: {}, Score : {}".format(alpha,score))
## Get Features names
feature_names = cv.get_feature_names()
classifier.coef_[0]
### Most real
sorted(zip(classifier.coef_[0], feature_names), reverse=True)[:20]
### Most fake
sorted(zip(classifier.coef_[0], feature_names))[:5000]
## TFidf Vectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_v=TfidfVectorizer(max_features=5000,ngram_range=(1,3))
X=tfidf_v.fit_transform(corpus).toarray()
X.shape
y=df['label']
## Divide the dataset into Train and Test
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
tfidf_v.get_feature_names()[:20]
tfidf_v.get_params()
count_df = pd.DataFrame(X_train, columns=tfidf_v.get_feature_names())
count_df.head()
import matplotlib.pyplot as plt
from sklearn.naive_bayes import MultinomialNB
classifier=MultinomialNB()
from sklearn import metrics
import numpy as np
classifier.fit(X_train, y_train)
pred = classifier.predict(X_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred)
plot_confusion_matrix(cm, classes=['FAKE', 'REAL'])
classifier.fit(X_train, y_train)
pred = classifier.predict(X_test)
score = metrics.accuracy_score(y_test, pred)
score
| true
|
b3aa022074dbcac5d4ea2bf4f22c0e3e65f5d6b8
|
Python
|
Mayberry2021/cursed_comment_filter
|
/db_controller.py
|
UTF-8
| 4,392
| 2.921875
| 3
|
[] |
no_license
|
import sqlite3
import time
class db_controller(object):
def __init__(self, db_name):
self.db = sqlite3.connect(db_name)
self.curs = self.db.cursor()
def checking_duplicate(self, content): # 댓글 중복 검사
sql = 'SELECT content FROM comment_set WHERE content==?'
result = self.curs.execute(sql, [content])
if (not result.fetchone()):
return True
else:
return False
def create_table(self): # 테이블 생성
self.curs.execute('CREATE TABLE comment_set (part, src, content, length, date)')
def insert(self,total_info): # 댓글 데이터 삽입
sql = f"INSERT INTO comment_set (part, src, content, length, date) values (?,?,?,?,?)"
for x in total_info:
if (self.checking_duplicate(x[2])):
self.curs.execute(sql,(x[0], x[1], x[2], x[3],x[4]))
self.db.commit()
def get_total_avg_len(self): # DB 내 전체 댓글 평균 길이 반환
sql = 'SELECT AVG(length) FROM comment_set'
result = self.curs.execute(sql).fetchone()
return result[0]
def get_avg_len(self, part): # 섹션 내 댓글 평균 길이 반환
sql = 'SELECT AVG(length) FROM comment_set WHERE part == ?'
result = self.curs.execute(sql, [part]).fetchone()
return result[0]
def get_total_comment(self): # DB 내 모든 댓글 가져오기
total = []
sql = 'SELECT content FROM comment_set'
result = self.curs.execute(sql).fetchall()
for x in result:
total.append(x[0])
return total
def get_section_comment(self, part): # 섹션 내 모든 댓글 가져오기
total = []
sql = 'SELECT content FROM comment_set WHERE part == ?'
result = self.curs.execute(sql, [part]).fetchall()
for x in result:
total.append(x[0])
return total
def get_data(self, criteria, part, length): # 섹션 및 등분별 댓글 데이터 가지고 오기
if criteria == 'all':
text_set = []
sql = f'SELECT content, length FROM comment_set WHERE part==?'
result = self.curs.execute(sql, [part]).fetchall()
for data in result:
text_set.append(data[0])
return text_set
elif criteria == 'low':
text_set = []
sql = f'SELECT content, length FROM comment_set WHERE part==? AND length <= {length}'
result = self.curs.execute(sql,[part]).fetchall()
for data in result:
text_set.append(data[0])
return text_set
elif criteria == 'mid':
text_set = []
sql = f'SELECT content, length FROM comment_set WHERE part==? AND length > {length}-10 AND length < {length}+10'
result = self.curs.execute(sql,[part]).fetchall()
for data in result:
text_set.append(data[0])
return text_set
elif criteria == 'high':
text_set = []
sql = f'SELECT content, length FROM comment_set WHERE part==? AND length >= {length}'
result = self.curs.execute(sql,[part]).fetchall()
for data in result:
text_set.append(data[0])
return text_set
elif criteria == 'mid-high':
text_set = []
sql = f'SELECT content, length FROM comment_set WHERE part==? AND length >= {length}'
result = self.curs.execute(sql,[part]).fetchall()
for data in result:
text_set.append(data[0])
return text_set
def get_total_data(self, criteria, length): # DB내 전체 댓글 데이터 등분별로 구분하여 가지고 오기
if criteria == 'all':
text_set = []
sql = f'SELECT content, length FROM comment_set'
result = self.curs.execute(sql).fetchall()
for data in result:
text_set.append(data[0])
return text_set
elif criteria == 'low':
text_set = []
sql = f'SELECT content, length FROM comment_set WHERE length <= {length}'
result = self.curs.execute(sql).fetchall()
for data in result:
text_set.append(data[0])
return text_set
elif criteria == 'mid':
text_set = []
sql = f'SELECT content, length FROM comment_set WHERE length > {length}-10 AND length < {length}+10'
result = self.curs.execute(sql).fetchall()
for data in result:
text_set.append(data[0])
return text_set
elif criteria == 'high':
text_set = []
sql = f'SELECT content, length FROM comment_set WHERE length >= {length}'
result = self.curs.execute(sql).fetchall()
for data in result:
text_set.append(data[0])
return text_set
elif criteria == 'mid-high':
text_set = []
sql = f'SELECT content, length FROM comment_set WHERE length >= {length}'
result = self.curs.execute(sql).fetchall()
for data in result:
text_set.append(data[0])
return text_set
| true
|
c9779a06f01b040aaa50c5d42d9d5cb64326fd40
|
Python
|
osmanmusa/LCSIA
|
/models/AtoW_grad.py
|
UTF-8
| 3,740
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
file : AtoW_grad.py
author: Xiaohan Chen
email : chernxh@tamu.edu
date : 2019-02-20
"""
import numpy as np
import tensorflow as tf
import utils.train
from utils.tf import get_subgradient_func, bmxbm, mxbm
class AtoW_grad(object):
"""Docstring for AtoW_grad. """
def __init__(self, m, n, T, Binit, eta, loss, Q, scope):
"""TODO: to be defined1.
:T: TODO
:loss: TODO
"""
self._m = m
self._n = n
self._Binit = Binit
self._T = T
self._loss = loss
self._eta = eta
self._Q = Q
self._scope = scope
# subgradient function
self._subgradient_func = get_subgradient_func(loss)
# setup layers
self.setup_layers (scope)
def setup_layers(self, scope):
"""TODO: Docstring for setup_layers.
:returns: TODO
"""
with tf.variable_scope (scope, reuse=False) as vs:
# B initialization
if isinstance(self._Binit, np.ndarray):
Binit = (self._eta * self._Binit).astype(np.float32)
self._Binit_ = tf.constant(value=Binit,
dtype=tf.float32,
name='Binit')
elif Binit == 'uniform':
self._Binit_ = tf.random_uniform_initializer(-0.01, 0.01,
dtype=tf.float32)
elif Binit == 'normal':
self._Binit_ = tf.random_normal_initializer(0.0, 0.01,
dtype=tf.float32)
# weights
for i in range (self._T):
tf.get_variable (name='B_%d'%(i+1),
dtype=tf.float32,
initializer=self._Binit_)
# Q matrix in loss and subgradient
if self._Q is None:
self._Q_ = None
else:
self._Q_ = tf.constant (value=self._Q, dtype=tf.float32, name='Q')
# identity
eye = np.eye (self._n)
self._eye_ = tf.constant (value=eye,
dtype=tf.float32,
name='eye')
def inference(self, A_):
"""TODO: Docstring for function.
:A_: A tensor or placeholder with shape (batchsize, m, n)
:returns: TODO
"""
At_ = tf.transpose (A_, [0,2,1])
W_ = A_
Q_ = self._Q_
with tf.variable_scope (self._scope, reuse=True) as vs:
for i in range (self._T):
Z_ = bmxbm (At_, W_, batch_first=True) - self._eye_
dF_ = self._subgradient_func (Z_, Q_)
B_ = tf.get_variable ('B_%d'%(i+1))
W_ = W_ - mxbm (B_, dF_)
return W_
def save_trainable_variables (self , sess , savefn):
"""
Save trainable variables in the model to npz file with current value of each
variable in tf.trainable_variables().
:sess: Tensorflow session.
:savefn: File name of saved file.
"""
state = getattr (self , 'state' , {})
utils.train.save_trainable_variables(
sess, savefn, self._scope, **state )
def load_trainable_variables (self, sess, savefn):
"""
Load trainable variables from saved file.
:sess: TODO
:savefn: TODO
:returns: TODO
"""
self.state = utils.train.load_trainable_variables(sess, savefn)
| true
|
aa6fa4cdf1b7f7dfa21845ba9a65ae8cbfca7bc0
|
Python
|
cukejianya/leetcode
|
/array_and_strings/check_straight_line.py
|
UTF-8
| 588
| 3.4375
| 3
|
[] |
no_license
|
class Solution:
def checkStraightLine(self, coordinates: List[List[int]]) -> bool:
prev_x, prev_y = coordinates.pop(0)
slope = None
for coord in coordinates:
curr_x, curr_y = coord
denominator = (curr_x - prev_x)
if denominator:
curr_slope = (curr_y - prev_y) / denominator
else:
curr_slope = 10**5 #This is suppose to be infinity.
if not slope:
slope = curr_slope
elif slope != curr_slope:
return False
return True
| true
|
6fbc97cdfb8c491a9ebdb507eb7fa46fa921467c
|
Python
|
mad-ops/AdventOfCode
|
/day03/2020_code.py
|
UTF-8
| 801
| 3.390625
| 3
|
[] |
no_license
|
import os
# look mah, im a straight line
# flipping along x
# y = 3x
scriptdir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(scriptdir,'2020_input.txt')) as slopes:
track = [x.strip() for x in slopes.readlines()]
def ski(track, m):
count, tree, width = 0, 0, None
for row in track[1:]:
count += 1
if width is None:
width = len(row)
track = row.strip() * (1 + int(m*count/width))
tree += 1 if track[count*m] == '#' else 0
return tree
if __name__ == "__main__":
tree = lambda x: ski(track,x)
print(tree(3))
#ooh this is cool, we just rip out all the tracks we're skipping
#over and process it as the same as the first run asked for
print( tree(1) * tree(3) * tree(5) * tree(7) * ski(track[::2], 1))
| true
|
66f8d68ad5d2eea77c7b54cc184d69a854f4d36d
|
Python
|
spettiett/dsc-capstone-project-v2-online-ds-pt-061019
|
/flatiron_stats.py
|
UTF-8
| 5,772
| 3.0625
| 3
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#flatiron_stats
import numpy as np
import scipy.stats as stats
from scipy.stats import anderson
import matplotlib.pyplot as plt
def monte_carlo_test(var1, var2, popl, col):
"""Non-Normal Distribution - Non-Parametric Tests: Using Monte Carlo Test"""
print(f"Non-Parametric Tests: Using Monte Carlo Test")
print(f"_____")
mean_diff = np.mean(var1) - np.mean(var2)
sample_diffs = []
counter = 0
for i in range(1000):
samp1 = popl.sample(replace=False, n=len(var1))
samp2 = popl.drop(samp1.index,axis=0)
sample_diff = samp1[col].mean() - samp2[col].mean()
sample_diffs.append(sample_diff)
if sample_diff > mean_diff:
counter += 1
alpha = 0.05
p= (counter/10000)
print(f"P-value: {p}, is derived from 10,000 Monte Carlo simulations") #, Rounded P-value: {np.round((p), 4)}")
if p <= alpha:
print(f"Test Conclusion: __Reject H0__ \n")
else:
print(f"Test Conclusion: __Fail to reject H0__ \n")
plt.hist(sample_diffs)
plt.axvline(mean_diff,color = 'k', label="Mean")
plt.legend()
plt.title(f"p-value: {counter/10000} | mean value: {np.round(mean_diff,0)}")
plt.show()
def shapiro_test(*argv):
""" Statistical Normality Tests: Using Shapiro-Wilk Test """
print(f"Statistical Normality Tests: Using Shapiro-Wilk Test")
print(f"_____")
alpha = 0.05
for arg in argv:
stat, p = stats.shapiro(arg)
print(f"Statistic: {round(stat, 4)}, P-value: {p}, Rounded P-value: {np.round((p), 4)}")
if p <= alpha:
print(f"Test Conclusion: __Reject H0__ Sample does NOT look Gaussian (non-normal distribution)\n")
else:
print(f"Test Conclusion: __Fail to reject H0__ Sample looks Gaussian (normal distribution)\n")
def anderson_test(*argv):
"""Statistical Normality Tests: Using Anderson-Darling Test"""
alpha = 0.05
print(f"Statistical Normality Tests: Using Anderson-Darling Test")
print(f"_____")
for arg in argv:
result = anderson(arg)
print('\nStatistic: %.3f' % result.statistic)
p = 0
for i in range(len(result.critical_values)):
sl, cv = result.significance_level[i], result.critical_values[i]
if result.statistic < result.critical_values[i]:
print('%.3f: %.3f __Fail to reject H0__ Data looks normal' % (sl, cv))
else:
print('%.3f: %.3f __Reject H0__ Data does NOT look normal' % (sl, cv))
def ks_test(sample1, dist):
""" Statistical Normality Tests: Using K-S Test """
print(f"Statistical Normality Tests: Using K-S Test")
print(f"_____")
alpha = 0.05
stat, p = stats.kstest(sample1, dist)
print(f"Statistic: {round(stat, 4)}, P-value: {p}, Rounded P-value: {np.round((p), 4)}")
if p <= alpha:
print(f"Test Conclusion: __Reject H0__ Sample distribution is NOT identical to a normal distribution\n")
else:
print(f"Test Conclusion: __Fail to reject H0__ Sample distribution is identical to a normal distribution")
def levene_test(sample1, sample2):
""" Statistical Variance Tests: Using Levene Variance Test """
alpha = 0.05
stat, p = stats.levene(sample1, sample2)
print(f"Statistic: {round(stat, 4)}, P-value: {p}, Rounded P-value: {np.round((p), 4)}")
if p <= alpha:
print(f"Test Conclusion: __Reject H0__ Variances are NOT equal\n")
else:
print(f"Test Conclusion: __Fail to reject H0__ Variances are equal (homoscedasticity)")
def cohens_d(group1, group2):
""" Running effect size calculation """
numer = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1, var2 = group1.var(), group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
denom = np.sqrt(pooled_var)
return numer / denom
def welch_t(a, b):
""" Calculate Welch's t statistic for two samples. """
numerator = a.mean() - b.mean()
# “ddof = Delta Degrees of Freedom”: the divisor used in the calculation is N - ddof,
# where N represents the number of elements. By default ddof is zero.
denominator = np.sqrt(a.var(ddof=1)/a.size + b.var(ddof=1)/b.size)
return np.abs(numerator/denominator)
def welch_df(a, b):
""" Calculate the effective degrees of freedom for two samples. This function returns the degrees of freedom """
s1 = a.var(ddof=1)
s2 = b.var(ddof=1)
n1 = a.size
n2 = b.size
numerator = (s1/n1 + s2/n2)**2
denominator = (s1/ n1)**2/(n1 - 1) + (s2/ n2)**2/(n2 - 1)
return numerator/denominator
def p_value_welch_ttest(a, b, two_sided=False):
"""Calculates the p-value for Welch's t-test given two samples.
By default, the returned p-value is for a one-sided t-test.
Set the two-sided parameter to True if you wish to perform a two-sided t-test instead.
"""
t = welch_t(a, b)
df = welch_df(a, b)
p = 1-stats.t.cdf(np.abs(t), df)
if two_sided:
return 2*p
else:
return p
############################################################
# # Normality Check?
# # Statistical Normality Tests: Using D’Agostino’s K^2 Test
# from scipy.stats import normaltest
# stat, p = normaltest(bev_EU["OrderTotal"])
# print('Statistics=%.3f, p=%.3f' % (stat, p))
# # interpret
# if p > alpha:
# print(f"p-value is {np.round((p), 4)}, Fail to reject H0, sample looks Gaussian (normal distribution)")
# else:
# print(f"p-value is {np.round((p), 4)}, Reject H0, sample does not look Gaussian (non-normal distribution)")
| true
|
54d0459b082babf9b07b7f112f7e639e13c04e6c
|
Python
|
ejohnso9/programming
|
/codeeval/chal_121/chal_121.py
|
UTF-8
| 1,096
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
"""
Solution for codeeval challenge #121 (LOST IN TRANSLATION)
https://www.codeeval.com/open_challenges/121/
AUTHOR: Erik Johnson
DATE: 2015-NOV-16
"""
"""
def build_map(lines):
d = {} # rv
for i in range(3):
enc = lines[i].rstrip()
dec = lines[i + 3].rstrip()
assert len(enc) == len(dec)
for j in xrange(len(enc)):
if enc[j] != ' ':
d[enc[j]] = dec[j]
return d
"""
XLATE_D = {
'a': 'y', 'g': 'v', 'm': 'l', 's': 'n',
'b': 'h', 'h': 'x', 'n': 'b', 't': 'w',
'c': 'e', 'i': 'd', 'o': 'k', 'u': 'j',
'd': 's', 'j': 'u', 'p': 'r', 'v': 'p',
'e': 'o', 'k': 'i', 'q': 'z', 'w': 'f',
'f': 'c', 'l': 'g', 'r': 't', 'x': 'm',
'y': 'a', 'z': 'q', ' ': ' ',
}
if __name__ == "__main__":
lines_out = []
for line in open(sys.argv[1], 'r'):
line = line.rstrip()
lines_out.append(
''.join([XLATE_D[c] for c in line]))
print '\n'.join(lines_out)
sys.stdout.flush()
| true
|
3546cf42938b48a93005de90e76d758232d1386e
|
Python
|
geezardo/codechef
|
/compete/ISCC2018/T21.py
|
UTF-8
| 109
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
t = int(input())
for i in range(t):
(m, n) = [int(x) for x in input().split()]
print((n * m) % 3)
| true
|
bc903328114d9caaa36ac36eff323d8ff4c17c72
|
Python
|
dbbrandt/short_answer_granding_capstone_project
|
/calc_ngrams.py
|
UTF-8
| 377
| 2.578125
| 3
|
[] |
no_license
|
import pandas as pd
from similarity import calculate_containment
answers = pd.read_csv('data/sag2/answers.csv', dtype={'id':str})
questions = pd.read_csv('data/sag2/questions.csv', dtype={'id':str})
n = [1,2]
ngrams = calculate_containment(questions, answers, n)
df = pd.DataFrame(ngrams, columns=['1','2','correct'])
df.to_csv('data/sag2/answer_ngrams.csv', index=False)
| true
|
052a0b9bd82c713b13bc862a0287b84484396652
|
Python
|
raphelemmanuvel/airflow-dags
|
/sample.py
|
UTF-8
| 580
| 3.0625
| 3
|
[] |
no_license
|
from airflow.operators import bash_operator
from airflow.operators import python_operator
def greeting():
import logging
logging.info('Hello World!')
# An instance of an operator is called a task. In this case, the
# hello_python task calls the "greeting" Python function.
hello_python = python_operator.PythonOperator(
task_id='hello',
python_callable=greeting)
# Likewise, the goodbye_bash task calls a Bash script.
goodbye_bash = bash_operator.BashOperator(
task_id='bye',
bash_command='echo Goodbye.')
| true
|
1097d7470a0c7492796e6734406121b77511151c
|
Python
|
TRHX/Python3-Spider-Practice
|
/JSReverse/passport_yhd_com/yhd_login.py
|
UTF-8
| 1,476
| 2.515625
| 3
|
[] |
no_license
|
# ==================================
# --*-- coding: utf-8 --*--
# @Time : 2021-10-09
# @Author : TRHX
# @Blog : www.itrhx.com
# @CSDN : itrhx.blog.csdn.net
# @FileName: yhd_login.py
# @Software: PyCharm
# ==================================
import execjs
import requests
login_url = 'https://passport.yhd.com/publicPassport/login.do'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
}
def get_encrypted_data(username, password):
with open('yhd_encrypt.js', 'r', encoding='utf-8') as f:
yhd_js = f.read()
encrypted_data = execjs.compile(yhd_js).call('getEncryptedData', username, password)
return encrypted_data
def login(encrypted_data):
data = {
'credentials.username': encrypted_data['encryptedUsername'],
'credentials.password': encrypted_data['encryptedPassword'],
'sig': '',
'is_jab': True,
'captchaToken': '',
'jab_st': 1,
'loginSource': 1,
'returnUrl': 'http://www.yhd.com',
'isAutoLogin': 0,
'slideData': ''
}
response = requests.post(url=login_url, data=data, headers=headers)
print(response.text)
def main():
username = input('请输入登录账号: ')
password = input('请输入登录密码: ')
encrypted_data = get_encrypted_data(username, password)
login(encrypted_data)
if __name__ == '__main__':
main()
| true
|
c8a1ee5cbde3e3e2ec18f9d52de5309bb14bbbc0
|
Python
|
dacapo1142/clustering-tools
|
/tools/nmi_str.py
|
UTF-8
| 648
| 2.515625
| 3
|
[] |
no_license
|
from sklearn.metrics.cluster import normalized_mutual_info_score
import sys
from collections import deque
file1, file2 = sys.argv[1:3]
v1 = deque()
v2 = deque()
d = dict()
reindex = 0
with open(file1) as f:
for cid, line in enumerate(f):
vlabels = line.strip().split()
v1.extend([cid] * len(vlabels))
for vlabel in vlabels:
d[vlabel] = reindex
reindex += 1
v1 = list(v1)
v2 = [0] * len(v1)
with open(file2) as f:
for cid, line in enumerate(f):
vlabels = line.strip().split()
for vlabel in vlabels:
v2[d[vlabel]] = cid
print(normalized_mutual_info_score(v1, v2))
| true
|
dbd82650037fd2ef8af35fb49b5063782691e5bd
|
Python
|
KimYeong-su/programmers
|
/python/DFS_BFS/tripRoute.py
|
UTF-8
| 728
| 2.90625
| 3
|
[] |
no_license
|
def solution(tickets):
global answer
answer = []
N = len(tickets)
visit = [False] * N
def check(tickets, visit, result):
global answer
if False not in visit:
answer.append(result)
return
for i in range(N):
if visit[i]: continue
s,e = tickets[i]
if s == result[-1]:
visit[i] = True
check(tickets, visit, result + [e])
visit[i] = False
check(tickets, visit, ['ICN'])
answer.sort()
return answer[0]
print(solution([['ICN', 'JFK'], ['HND', 'IAD'], ['JFK', 'HND']]))
print(solution([['ICN', 'SFO'], ['ICN', 'ATL'], ['SFO', 'ATL'], ['ATL', 'ICN'], ['ATL','SFO']]))
| true
|
5503e5181cb5adfc7b4bfafe7979dea2389defbd
|
Python
|
tatiaris/tatiame-old
|
/coding_problems/project_euler/2.py
|
UTF-8
| 566
| 4.21875
| 4
|
[] |
no_license
|
# Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
# 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
# By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
# Solution:
limit = 4000000
fibonacci = [1,2]
even_sum = 0
while fibonacci[-1] + fibonacci[-2] < limit:
fibonacci.append(fibonacci[-1] + fibonacci[-2])
for num in fibonacci:
if num%2 == 0:
even_sum += num
print(even_sum)
# Answer: 4613732
| true
|
ecae1ca3d4bb7cbdcd335a2be20b993b601d3ac8
|
Python
|
ttomchy/LeetCodeInAction
|
/greedy/q861_score_after_flipping_matrix/solution.py
|
UTF-8
| 1,003
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
FileName: solution.py
Description:
Author: Barry Chow
Date: 2020/12/7 3:13 PM
Version: 0.1
"""
import math
class Solution:
def matrixScore(self, A):
m = len(A)
n = len(A[0])
#行变换,第1列的值,全部换成1
for i in range(m):
if A[i][0]==0:
A[i] = [(1-A[i][j]) for j in range(n)]
#列变换,从第二列开始,若0的个数大于1的个数,则转换
for j in range(1,n):
#对于第j列元素
count = sum([A[i][j] for i in range(m)])
if count<(m/2):
for i in range(m):
A[i][j] = 1-A[i][j]
#统计元素值
res = 0
for i in range(m):
for j in range(n):
res += A[i][j]*math.pow(2,n-j-1)
return res
if __name__ =='__main__':
A = [[0,0,1,1],[1,0,1,0],[1,1,0,0]]
s = Solution()
res = s.matrixScore(A)
assert res ==39
| true
|
d5f6f0aded9246414167679cd073eac227ddf3c0
|
Python
|
Franklin-Wu/project-euler
|
/p066.py
|
UTF-8
| 4,138
| 3.6875
| 4
|
[] |
no_license
|
# Diophantine equation
#
# Consider quadratic Diophantine equations of the form:
# x^2 - D * y^2 = 1
#
# For example, when D = 13, the minimal solution in x is 649^2 - 13 * 180^2 = 1.
#
# It can be assumed that there are no solutions in positive integers when D is square.
#
# By finding minimal solutions in x for D = {2, 3, 5, 6, 7}, we obtain the following:
# 3^2 - 2 * 2^2 = 1
# 2^2 - 3 * 1^2 = 1
# 9^2 - 5 * 4^2 = 1
# 5^2 - 6 * 2^2 = 1
# 8^2 - 7 * 3^2 = 1
#
# Hence, by considering minimal solutions in x for D <= 7, the largest x is obtained when D = 5.
#
# Find the value of D <= 1000 in minimal solutions of x for which the largest value of x is obtained.
import math;
import time;
import sys;
start_time = time.time();
N = 1000;
# Extended Euclidean algorithm.
# Finds the gcd and the integral solutions to:
# ax + by = gcd(a, b)
# Source: https://en.wikipedia.org/wiki/Euclidean_algorithm and https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm.
def extended_euclid(a, b):
r0 = a;
s0 = 1;
t0 = 0;
r1 = b;
s1 = 0;
t1 = 1;
q1 = None;
while r1 != 0:
q1 = r0 / r1;
r2 = r0 % r1;
s2 = s0 - (q1 * s1);
t2 = t0 - (q1 * t1);
# print q1, r2, s2, t2;
r0 = r1;
r1 = r2;
s0 = s1;
s1 = s2;
t0 = t1;
t1 = t2;
return (r0, s0, t0);
# Find the m closest to sqrt(D) such that k divides (x + ym), i.e. k | (x + ym).
# kt = x + ym
# kt - ym = x
# kt + -ym = x
# extended_euclid(k, -y) will give a solution (g, r, s) such that kr + (-ys) = g, where g = gcd(k, -y).
# It must be that g|x, or there is no solution (this function checks this). So x/g is integral.
# kr + (-ys) = g
# Multiply by x / g:
# k(rx/g) + -y(sx/g) = gx/g = x
# This means the following are solutions to the third equation above, kt + -ym = x:
# t = rx/g, m = sx/g
# Solutions for m will be in the form kt + m' where m' is any solution for m. Since sx/g is a solution for m:
# m = kt + sx/g
# sx/g will not generally be > 0 and <= k. To get that value:
# m = kt + ((sx/g) % k)
# To get the m closest to sqrt(D):
# m = kt + ((sx/g) % k) ~ sqrt(D)
# kt = sqrt(D) - ((sx/g) % k)
# t = (sqrt(D) - ((sx/g) % k)) / k
# m = kt + ((sx/g) % k)
def find_m(D, x, y, k):
sqrtD = int(round(math.sqrt(D)));
(g, r, s) = extended_euclid(k, -y);
if x % g != 0:
print "Error: %d does not divide %d." % (g, x);
sys.exit(-1);
print D, k;
z = ((s * (x / g)) % k);
print z;
t = (sqrtD - z) / k;
m1 = (k * t) + z;
m2 = m1 + k;
print m1, m2;
if D - (m1 * m1) < (m2 * m2) - D:
return m1;
else:
return m2;
# Source: https://en.wikipedia.org/wiki/Chakravala_method
# Examples from source:
# 3 | 1m + 8 -> 3 | 1m + 2 -> m = 3t + 1
# 6 | 5m + 41 -> 6 | 5m + 5 -> m = 6t + 5
# 7 | 11m + 90 -> 7 | 11m + 6 -> m = 7t + 2
# Own examples:
# 22 | 4m + 6 -> 22 | 4m + 6 -> m = 22t + 4
# 11 | 2m + 3 -> 11 | 2m + 3 -> m = 11t + 4
def chakravala(D, x, y, k):
abs_k = abs(k);
m = find_m(D, x, y, abs_k);
x_prime = ((x * m) + (D * y)) / abs_k;
y_prime = (x + (y * m)) / abs_k;
k_prime = ((m * m) - D) / k;
print "m = %d." % m;
print ((x * m) + (D * y)), abs_k, ((x * m) + (D * y)) % abs_k, x_prime;
print (x + (y * m)), abs_k, (x + (y * m)) % abs_k, y_prime;
print (D - (m * m)), k, (D - (m * m)) % k, k_prime;
return (x_prime, y_prime, k_prime);
x_max = 0;
x_argmax_D = 0;
for D in range(N + 1):
# Exclude perfect squares.
if (int(math.sqrt(D)) ** 2) != D:
x = int(math.sqrt(D));
y = 1;
k = (x * x) - D;
while k != 1:
(x, y, k) = chakravala(D, x, y, k);
print x, y, k;
print;
print x;
if x > x_max:
x_max = x;
x_argmax_D = D;
print "----------------------------------------------------------------------------";
print "x_max = %d, x_argmax_D = %d." % (x_max, x_argmax_D);
print;
print "Execution time = %f seconds." % (time.time() - start_time);
| true
|
2ef3200efefbf94d32cf8478380db41ed3899eef
|
Python
|
Noronha1612/wiki_python-brasil
|
/Estruturas de Decisão/ex10.py
|
UTF-8
| 326
| 4.03125
| 4
|
[] |
no_license
|
print("""\33[32mEm que turno você estuda?
[M] Matutino
[V] Vespertino
[N] Noturno""")
esc = input('Sua opção: ').upper()[0]
print('\33[1;33m')
if esc == 'M':
print('Bom dia!')
elif esc == 'V':
print('Boa tarde!')
elif esc == 'N':
print('Boa noite!')
else:
print('\33[1;31mValor inválido.')
print('\33[m')
| true
|
e7f5da98411d75e57988c0a94f85c754bdd69089
|
Python
|
yurilifanov/random
|
/alice_and_mathematics/py/test.py
|
UTF-8
| 489
| 3.046875
| 3
|
[] |
no_license
|
M = 10 ** 9 + 7
A = (M - 1) // 2
B = 2
X = 1
Y = (1 - A) // 2
assert A * X + B * Y == 1, 'Test 1 failed.'
x = 2 * M
a = x % A
b = x % B
assert (b * X * A + a * Y * B) % (M - 1) == x % (M - 1), 'Test 2 failed.'
from scipy.special import binom
def binom_mod2(n, k):
N = format(n, '032b')
K = format(k, '032b')
J = format(k & (n ^ k), '032b')
print('{}\n{}\n{}'.format(N, K, J))
ans = not k & (n ^ k)
assert ans == bool(int(binom(n, k)) % 2), 'Test 3 failed.'
return ans
| true
|
ba264963e12d83427af836b60c96e41c28705628
|
Python
|
joab40/Alpha
|
/lib/walpha_question_class.py
|
UTF-8
| 788
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import wolframalpha
except ImportError:
print 'You need wolframalpha in %s ',libpath
sys.exit(1)
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class alpha(object):
def __init__(self, key):
#creating object
self.client = wolframalpha.Client(key)
def ask(self, msg):
self.res = self.client.query(msg)
return self.res.pods[1].text
def input_ask(self):
self.msg = raw_input("What is your question? ")
self.ask(self.msg)
self.print_message()
def print_message(self):
print self.res.pods[1].text
#test = alpha('WOLFRAMALPHA_API_KEY')
#test.ask('who is the fastest runner on earth')
#test.input_ask()
#test.print_message()
| true
|
c7e5d5b84491c15e36f05192539938a1f0de21b4
|
Python
|
BlancaCalvo/Apps2_Scifact
|
/label_evaluation.py
|
UTF-8
| 2,697
| 2.75
| 3
|
[] |
no_license
|
import argparse
import jsonlines
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
parser = argparse.ArgumentParser()
parser.add_argument('--corpus', type=str, default='data/corpus.jsonl', required=False)
parser.add_argument('--dataset', type=str, default='data/claims_dev.jsonl', required=False)
parser.add_argument('--label-prediction', type=str, default ='predictions/predicted_label_dev.jsonl', required=False)
parser.add_argument('--filter', type=str, choices=['structured', 'unstructured'])
args = parser.parse_args()
corpus = {doc['doc_id']: doc for doc in jsonlines.open(args.corpus)}
dataset = jsonlines.open(args.dataset)
label_prediction = jsonlines.open(args.label_prediction)
pred_labels = []
true_labels = []
LABELS = {'CONTRADICT': 0, 'NOT_ENOUGH_INFO': 1, 'SUPPORT': 2}
for data, prediction in zip(dataset, label_prediction):
assert data['id'] == prediction['claim_id']
if args.filter:
prediction['labels'] = {doc_id: pred for doc_id, pred in prediction['labels'].items()
if corpus[int(doc_id)]['structured'] is (args.filter == 'structured')}
if not prediction['labels']:
continue
claim_id = data['id']
for doc_id, pred in prediction['labels'].items():
pred_label = pred['label']
#true_label = data['evidence'].get(doc_id)[0]['label'] #{es['label'] for es in data['evidence'].get(doc_id) or []}
true_label = {es['label'] for es in data['evidence'].get(doc_id) or []}
#print(LABELS[pred_label])
#print(true_label)
assert len(true_label) <= 1, 'Currently support only one label per doc'
true_label = next(iter(true_label)) if true_label else 'NOT_ENOUGH_INFO' # if one other document has a different label, change it to that??
pred_labels.append(LABELS[pred_label])
true_labels.append(LABELS[true_label])
print(pred_labels)
print(true_labels)
print(f'Accuracy {round(sum([pred_labels[i] == true_labels[i] for i in range(len(pred_labels))]) / len(pred_labels), 4)}')
print(f'Macro F1: {f1_score(true_labels, pred_labels, average="macro").round(4)}')
print(f'Macro F1 w/o NEI: {f1_score(true_labels, pred_labels, average="macro", labels=[0, 2]).round(4)}')
print()
print(' [C N S ]')
print(f'F1: {f1_score(true_labels, pred_labels, average=None).round(4)}')
print(f'Precision: {precision_score(true_labels, pred_labels, average=None).round(4)}')
print(f'Recall: {recall_score(true_labels, pred_labels, average=None).round(4)}')
print()
print('Confusion Matrix:')
print(confusion_matrix(true_labels, pred_labels))
| true
|
13e068a124e8148cdeeaea2e06d2d79fb7a27aaa
|
Python
|
nikuzuki/I111_Python_samples
|
/2/20.py
|
UTF-8
| 140
| 2.84375
| 3
|
[] |
no_license
|
# べき乗の計算
a = 1
for i in range(1, 10000001): # 1~10,000,000
a *= 293
b = a % 1000 # 下三桁
print("answer="+str(b))
| true
|
f16eefd79ff2116ef2333352eb7a40496dd0ab72
|
Python
|
Gabriel-ino/python_basics
|
/senha.py
|
UTF-8
| 481
| 4.09375
| 4
|
[] |
no_license
|
senha = str(input('Crie uma senha: '))
cont = 1
while True:
ent = str(input('Digite aqui a sua senha para entrar:'))
while ent != senha:
ent = str(input('Senha errada, tente novamente.'))
cont += 1
if cont > 3:
print('BLOQUEADO! VOCÊ ESTÁ IMPEDIDO DE ACESSAR.')
quit()
if ent == senha:
print('Senha correta, seja bem-vindo.')
break
print(f'Você precisou de {cont} tentativas para acertar sua senha')
| true
|
512059e7466758e086532102b9d8d5b3debc517a
|
Python
|
shadmanrafid/Machine-Learning
|
/Decision_Tree.py
|
UTF-8
| 7,419
| 3.15625
| 3
|
[] |
no_license
|
import csv
import sys
import os
def str_column_to_int(Input_list, i):
for row in Input_list:
row[i] = int(row[i].strip())
def make_prediction(node, row):
if row[node['field']] < node['value']:
# predicting which class the data will belong to
if type(node['left_branch']) is dict:
return make_prediction(node['left_branch'], row)
else:
return node['left_branch']
else:
if type(node['right_branch']) is dict:
return make_prediction(node['right_branch'], row)
else:
return node['right_branch']
def group_at_split(Input_list, field_num, value):
# creating group at the plit node
left_branch, right_branch = list(), list()
for row in Input_list:
if row[field_num] < value:
left_branch.append(row)
else:
right_branch.append(row)
return left_branch, right_branch
def terminal_group_class(group_of_items, fields):
# deterimining what class a terminal group of data items will belong to
item_classes = list()
max = 0
group_class = None
for row in group_of_items:
item_classes.append(row[fields - 1])
for iterator in item_classes:
freq = item_classes.count(iterator)
if max < freq:
max = freq
group_class = iterator
return group_class
def calculate_gini_index(split_group, classes, fields):
# calculating gini inex
total_frequency = 0;
for group in split_group:
total_frequency += len(group)
total_frequency = float(total_frequency)
gini_index = 0.0
for group in split_group:
group_size = len(group)
if group_size == 0:
continue
group_gini = 0.0
for class_id in classes:
occurences = [row[fields - 1] for row in group].count(class_id)
occurences = occurences / group_size
group_gini = occurences ** 2
gini_index += (1.0 - group_gini) * (group_size / total_frequency)
return gini_index
def find_split_point(Input_list, fields):
# determining at what value the group of nodes will be split
classes = []
for row in Input_list:
classes.append(row[fields - 1])
classes = set(classes)
classes = list(classes)
temp_score, temp_value, temp_index, temp_grouping = float('inf'), float('inf'), float('inf'), None
for field_num in range(fields - 1):
for row in Input_list:
split_group = group_at_split(Input_list, field_num, row[field_num])
current_gini_index = calculate_gini_index(split_group, classes, fields)
if current_gini_index < temp_score:
temp_index, temp_value, temp_score, temp_grouping = field_num, row[
field_num], current_gini_index, split_group
split_node = {}
split_node['field'] = temp_index
split_node['value'] = temp_value
split_node['branch_grouping'] = temp_grouping
return split_node
def split_at_current_node(splitting_node, max_depth, min_reecords, tree_depth, fields):
# splitting the value at the current decision node
left_branch = splitting_node['branch_grouping'][0]
right_branch = splitting_node['branch_grouping'][1]
del (splitting_node['branch_grouping'])
if len(left_branch) == 0 or len(right_branch) == 0:
res = terminal_group_class(left_branch + right_branch, fields)
splitting_node['left_branch'] = splitting_node['right_branch'] = res
return
if tree_depth >= max_depth:
splitting_node['left_branch'] = terminal_group_class(left_branch, fields)
splitting_node['right_branch'] = terminal_group_class(right_branch, fields)
return
if len(left_branch) <= min_reecords:
splitting_node['left_branch'] = terminal_group_class(left_branch, fields)
else:
splitting_node['left_branch'] = find_split_point(left_branch, fields)
split_at_current_node(splitting_node['left_branch'], max_depth, min_reecords, tree_depth + 1, fields)
if len(right_branch) <= min_reecords:
splitting_node['right_branch'] = terminal_group_class(left_branch, fields)
else:
splitting_node['right_branch'] = find_split_point(right_branch, fields)
split_at_current_node(splitting_node['right_branch'], max_depth, min_reecords, tree_depth + 1, fields)
def build_decision_tree(Input_list, max_depth, min_records, fields):
# building the decision tree
root = find_split_point(Input_list, fields)
split_at_current_node(root, max_depth, min_records, 1, fields)
return root
def calculate_accuracy(test_results, accurate_results):
correct_predictions = 0
for i in range(len(accurate_results)):
if accurate_results[i] == test_results[i]:
correct_predictions += 1
return (correct_predictions/len(accurate_results)) *100
if __name__ == '__main__':
Input_file = os.path.basename(sys.argv[1])
Test_data_file = os.path.basename(sys.argv[2])
Input_list = []
Test_data = []
Accurate_results = []
# taking input from train csv file
with open(Input_file) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',') #os.path.basename(path)
for row in readCSV:
Input_list.append(row)
# formatting input for processing
fields = len(Input_list[0])
for i in range(fields):
str_column_to_int(Input_list, i)
# taking input from test csv file
with open(Test_data_file) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
Test_data.append(row)
test_fields = len(Test_data[0])
# formatting the test data
for i in range(test_fields):
str_column_to_int(Test_data, i)
pseudo_accurate_results = []
# accuracy_test_file = Input_file[:11] + 'example_predictions.csv'
# with open(accuracy_test_file) as csvfile:
# readCSV = csv.reader(csvfile, delimiter=',')
# for row in readCSV:
# pseudo_accurate_results.append(row)
#
# accuracy_fields = len(pseudo_accurate_results[0])
# for i in range(accuracy_fields):
# str_column_to_int(pseudo_accurate_results, i)
#
# for i in range(len(pseudo_accurate_results)):
# Accurate_results.append(pseudo_accurate_results[i][0])
test_results = []
root = build_decision_tree(Input_list, 100, 3, fields)
for row in Test_data:
test_results.append(make_prediction(root, row))
# accuracy = calculate_accuracy(test_results, Accurate_results)
# print(accuracy)
# creating the output csv file
i = Input_file.find('_')
output_file = 'blackbox'+Input_file[8:10] + '_predictions.csv'
output_list = []
for i in range(len(test_results)):
l = list()
l.append(test_results[i])
output_list.append(l)
# writeFile = open(output_file, 'w')
# writer= csv.writer(writeFile)
# writing to the output csv file
with open(output_file, 'w') as writeFile:
writer = csv.writer(writeFile)
for i in range(len(test_results)):
l = list()
l.append(test_results[i])
writer.writerow(l)
| true
|
416f03774482b9df4aef166db3fb1cf3438d7998
|
Python
|
bn-d/project_euler
|
/000-100/047/main.py
|
UTF-8
| 883
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import sys
import math
sys.path.insert(0, '../../')
import util
def primes_num(n):
cur = n
cur_div = 2
count = 0
while True:
if cur % cur_div == 0:
count += 1
while cur % cur_div == 0:
cur = int(cur / cur_div)
if cur == 1:
break
if cur_div > math.sqrt(cur):
count += 1
break
if cur_div == 2:
cur_div += 1
else:
cur_div += 2
return count
if __name__ == '__main__':
cur = 644
count = 0
while True:
if util.is_prime(cur):
count = 0
else:
if primes_num(cur) == 4:
count += 1
else:
count = 0
if count == 4:
print(cur - 3)
break
cur += 1
| true
|
3bf83198b99783e87691d7e73e2e3bbdacbb5f9d
|
Python
|
PhilipWafula/k2-connect-python
|
/k2connect/webhooks.py
|
UTF-8
| 3,715
| 2.75
| 3
|
[
"Python-2.0"
] |
permissive
|
"""
This module handles the creation of webhook subscriptions. It creates
a subscription to receive webhooks for a particular event_type.
"""
from k2connect import exceptions
from k2connect import json_builder
from k2connect import service
from k2connect import validation
WEBHOOK_SUBSCRIPTION_PATH = 'webhook-subscription'
class WebhookService(service.Service):
"""
The WebhookService class contains methods for the creation of a webhook
subscription.
Example:
# initialize webhook service
>>> k2-connect.WebhookService
>>> k2-connect.create_subscription('buygoods_transaction_reversed',
>>>................................'https://myapplication/webhooks',
>>>................................os.getenv('SOME_UNCRACKABLE_SECRET'))
"""
def __init__(self, base_url):
"""
:param base_url:
:type base_url: str
"""
super(WebhookService, self).__init__(base_url)
def create_subscription(self,
bearer_token,
event_type,
webhook_endpoint,
webhook_secret):
"""
Creates a subscription to a webhook service.
Returns a request response object < class, 'requests.models.Response'>
:param bearer_token: Access token to be used to make calls to
the Kopo Kopo API
:type bearer_token: str
:param event_type:Type of subscription event. Should be one of:
buygoods_transaction_received, buygoods_transaction_reversed,
settlement_transfer_completed, customer_created
:type event_type: str
:param webhook_endpoint: HTTP end point to send the webhook.
:type webhook_endpoint: str
:param webhook_secret: Secret used to encrypt the request payload using HMAC.
:type webhook_secret: str
:return: requests.models.Response
"""
# event types
event_types_to_check = ['b2b_transaction_received',
'buygoods_transaction_received',
'buygoods_transaction_reversed',
'merchant_to_merchant_transaction_received',
'settlement_transfer_completed',
'customer_created'
]
# build subscription url
subscription_url = self._build_url(WEBHOOK_SUBSCRIPTION_PATH)
# define headers
headers = dict(self.headers)
# validate string arguments
validation.validate_string_arguments(bearer_token,
event_type,
webhook_endpoint,
webhook_secret)
headers['Authorization'] = 'Bearer ' + bearer_token + ''
if not any(check in event_type for check in event_types_to_check):
raise exceptions.InvalidArgumentError('Event type not recognized by k2-connect')
# validate webhook endpoint
validation.validate_url(webhook_endpoint)
# define subscription payload
subscription_payload = json_builder.webhook_subscription(event_type=event_type,
webhook_endpoint=webhook_endpoint,
webhook_secret=webhook_secret)
return self._make_requests(headers=headers,
method='POST',
url=subscription_url,
payload=subscription_payload)
| true
|
a1a62b3666088dae910431c0375a4d8fe708cb7a
|
Python
|
HenriqueSamii/Assessment-Fundamentos-de-Programa-o-com-Python
|
/AssesmentPython2.py
|
UTF-8
| 336
| 3.921875
| 4
|
[] |
no_license
|
#Usando o Thonny, escreva um programa em Python que some todos os números pares de 1 até um dado n, inclusive.
#O dado n deve ser obtido do usuário. No final, escreva o valor do resultado desta soma.
holder = 0
geter = int(input("Número - "))
for items in range(0,geter+1,2):
#print(items)
holder += items
print(holder)
| true
|
223fbd331a4ac305111f5fca14b82ffb9a37f7da
|
Python
|
FERARMAO/github_ci
|
/main.py
|
UTF-8
| 116
| 2.546875
| 3
|
[] |
no_license
|
"""
Add function example
"""
def add(var1, var2):
"""My two numbers adding function"""
return var1 + var2
| true
|
d0a5dadab996207dc4f2ed5076d450eaea8dbfe0
|
Python
|
john-zcliu/Kattis-1
|
/plowking.py
|
UTF-8
| 363
| 2.765625
| 3
|
[] |
no_license
|
n, m = map(int, input().split())
ans = 0
currNode = 2
currWeight = 1
while currNode <= n:
ans += currWeight
currWeight = currWeight+1
m = m-1
edgesWasted = max(0, currNode-2)
edgesNeeded = n-currNode
edgesWasted = min(edgesWasted, m-edgesNeeded)
m -= edgesWasted
currWeight += edgesWasted
currNode = currNode+1
print(ans)
| true
|
f3b27fc720442e6b60e6746a4d0b03e31bfc94b3
|
Python
|
edizquierdo/ToddALife2020
|
/src/diff_steep_runscript.py
|
UTF-8
| 291
| 2.53125
| 3
|
[] |
no_license
|
import os
import time
import sys
reps = int(sys.argv[1])
n=15
k=6
for steep_checks in range(1, n+1):
print("Number of genes being checked in steepLearn: {}".format(steep_checks))
os.system('time python simulate.py '+str(k)+' '+str(reps)+' '+str(steep_checks)+' &')
time.sleep(1)
| true
|
c4d8fc45490f1341a9e06418ec13ad22f9ee725f
|
Python
|
ishmam-hossain/problem-solving
|
/leetcode/1261_find_elements_in_contaminated_bin_tree.py
|
UTF-8
| 953
| 3.78125
| 4
|
[] |
no_license
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class FindElements:
def __init__(self, root: TreeNode):
self.elements = set()
def fix_tree(head):
if head is None:
return
if head.left is not None:
val = (head.val * 2) + 1
head.left.val = val
self.elements.add(val)
if head.right is not None:
val = (head.val * 2) + 2
head.right.val = val
self.elements.add(val)
fix_tree(head.left)
fix_tree(head.right)
root.val = 0
fix_tree(root)
def find(self, target: int) -> bool:
return target in self.elements
# Your FindElements object will be instantiated and called as such:
# obj = FindElements(root)
# param_1 = obj.find(target)
| true
|
c2a295f3eb8f70ddfc47b38d610ea30535580115
|
Python
|
chuandong/python
|
/tcpser.py
|
UTF-8
| 547
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/python
from socket import *
from time import ctime
HOST = '127.0.0.1'
PORT = 60536
ADDR = (HOST, PORT)
BUFSIZE = 1024
sersocket = socket(AF_INET, SOCK_STREAM, 0)
sersocket.bind(ADDR)
sersocket.listen(5)
while True:
print('waiting connection ...', ADDR)
tcpclisocket, addr = sersocket.accept()
while True:
data = tcpclisocket.recv(BUFSIZE).decode()
if not data:
break
tcpclisocket.send(('[%s] %s' %(ctime(), data)).encode())
print(data)
tcpclisocket.close()
sersocket.close()
| true
|
67349e2c227e95b44fc6bd31eed61af3355754b9
|
Python
|
malcolmmmm/sscanner
|
/sscanner.py
|
UTF-8
| 1,960
| 2.875
| 3
|
[] |
no_license
|
import requests
def main(social, infile, outfile):
with open(infile,'r') as f:
name=f.read().split('\n')
possible_user = list()
for i in range(1,len(name)):
try:
url = (social+name[i])
r = requests.post(url)
if r.status_code == 404:
possible_user.append(str(name[i]))
print('test['+str(i)+'] '+name[i]+' is not in use')
else:
print('test['+str(i)+'] '+name[i]+' is in use')
except:
print('An error has occured trying next instance....')
possible_user="\n".join(possible_user)
with open(outfile,'w') as f:
f.write(possible_user)
print('''
===============================================================
# __ _ _ __ #
#/ _\ ___ ___(_) __ _| | / _\ ___ __ _ _ __ _ __ ___ _ __ #
#\ \ / _ \ / __| |/ _` | | \ \ / __/ _` | '_ \| '_ \ / _ \ '__|#
#_\ \ (_) | (__| | (_| | | _\ \ (_| (_| | | | | | | | __/ | #
#\__/\___/ \___|_|\__,_|_| \__/\___\__,_|_| |_|_| |_|\___|_| #
# By:Malcolm McDonough #
================================================================
Want to see if somebody owns an account on social media?
Select your option:
[1]FaceBook
[2]Twitter
[3]Instagram
''')
while True:
social=input("Enter your choice:")
if social == str(1) or social == str(2) or social == str(3):
if social == str(1):
social = 'https://facebook.com/'
elif social == str(2):
social = 'https://twitter.com/'
elif social == str(3):
social = 'https://instagram.com/'
break
else:
print("you entered a incorrect option...")
while True:
infile=input("Enter the worldlist file path(must be a .txt file:")
if '.txt' not in infile:
print('must be a .txt file')
else:
break
while True:
outfile=input("Enter the file where the info will be stored(must be a .txt file):")
if '.txt' not in outfile:
print('must be a .txt file')
else:
break
main(social,infile,outfile)
print('process complete check results in:'+outfile)
| true
|
e26229f020d99ab3574bc5ff877607dc8ecc3e5d
|
Python
|
hussainsultan/filedrop
|
/app/auth.py
|
UTF-8
| 1,692
| 2.6875
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
from flask import current_app, request, abort
from functools import wraps
from ipaddress import ip_address as ip_address_orig
from ipaddress import ip_network as ip_network_orig
def ip_address(address):
"""Wrap ip_network method from ipaddress module to automatically
force utf8 string (it's required as this library is backported from
Python 3).
"""
return ip_address_orig(address.decode('utf8'))
def ip_network(address, strict=True):
"""Wrap ip_network method from ipaddress module to automatically
force utf8 string (it's required as this library is backported from
Python 3).
"""
return ip_network_orig(address.decode('utf8'), strict)
def is_authenticated(request):
"""Returns whether or not the specified IP address is allowed to
upload files.
"""
# Defer to custom auth function if one exists
if current_app.config['UPLOADS_CUSTOM_AUTH'] is not None:
authed = current_app.config['UPLOADS_CUSTOM_AUTH'](request)
if authed is not None:
return authed
# Check for authentication by IP
if current_app.config['UPLOADS_ALLOW_NETS'] is not None:
remote_addr = ip_address(request.remote_addr)
for allowed_net in current_app.config['UPLOADS_ALLOW_NETS']:
if remote_addr in ip_network(allowed_net):
return True
return False
def private(func):
"""
Decorator that is responsible for authenticating users based on IP
address.
"""
@wraps(func)
def decorated_view(*args, **kwargs):
if is_authenticated(request):
return func(*args, **kwargs)
else:
abort(403)
return decorated_view
| true
|
97d50632f36a4d2bb1ff37d209b62840e843a704
|
Python
|
wyjwl/lintcode
|
/35searchInsert.py
|
UTF-8
| 179
| 3.46875
| 3
|
[] |
no_license
|
def searchInsert(nums, target):
if len(nums) == 0:
return 0
for i in range(0, len(nums)):
if nums[i] >= target:
return i
return len(nums)
| true
|
dbc3c3c7d37ce98899f56370c484bf11703c55e3
|
Python
|
oxovu/point_clouds_optimization
|
/triang.py
|
UTF-8
| 1,375
| 2.890625
| 3
|
[] |
no_license
|
from scipy.spatial import Delaunay
import numpy as np
import matplotlib.pyplot as plt
import pcl
from mpl_toolkits.mplot3d import Axes3D
from plotXYZ import plot_bounds
def main():
# оптимизированное облако
cloud1 = pcl.load('data/lamppost2.pcd')
# начальное облако
cloud2 = pcl.load('data/lamppost.pcd')
points1 = cloud1.to_array()
points2 = cloud2.to_array()
# построение триангуляции
tri = Delaunay(points1)
x = []
y = []
z = []
for point in points1:
x.append(float(point[0]))
y.append(float(point[1]))
z.append(float(point[2]))
x = np.array(x)
y = np.array(y)
z = np.array(z)
# граффическое изображение
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1, projection='3d')
#
# plot_bounds(ax, x, y, z)
#
# ax.plot_trisurf(x, y, z, triangles=tri.simplices)
# plt.show()
error = 0.0
i = 0
# подсчет общей ошибки
for point in points2:
i += 1
dist = tri.plane_distance(point)
min_dist = min(abs(dist))
print("step ", i, "\terror ", min_dist)
error += min_dist
print("\nerror sum ", error)
print("\nerror sum/points number ", error/points1.size)
if __name__ == "__main__":
main()
| true
|
db69d6b300b71d1687a8e9b9be743d4743312be8
|
Python
|
nixawk/hello-python3
|
/PEP/pep-008-02-A Foolish Consistency is the Hobgoblin of Little Minds.py
|
UTF-8
| 1,735
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/python
# A Foolish Consistency is the Hobgoblin of Little Minds
# One of Guido's key insights is that code is read much more often than
# it is written. The guidelines provided here are intended to improve
# the readability of code and make it consistent across the wide spectrum
# of Python code. As PEP 20 says, "Readability counts".
# A style guide is about consistency. Consistency with this style guide
# is important. Consistency with a project is more important. Consistency
# within one module or function is the most important.
# However, know when to be inconsistent -- sometimes style guide recommendations
# just aren't applicable. When in doubt, use your best judgment. Look at
# other examples and decide what looks best. And don't hesitate to ask!
# In particular: do not break backwards compatibility just to comply with this PEP !
# Some other good reasons to ignore a particular guideline:
# 1. When applying the guideline would make the code less readable,
# even for someone who is used to reading code that follows this PEP.
# 2. To be consistent with surrounding code that also breaks it (maybe
# for historic reasons) -- although this is also an opportunity to
# clean up someone else's mess (in true XP style).
# 3. Because the code in question predates the introduction of the
# guideline and there is no other reason to be modifying that code.
# 4. When the code needs to remain compatible with older versions of
# Python that don't support the feature recommended by the style guide.
## Referenced
# https://www.python.org/dev/peps/pep-0008/#a-foolish-consistency-is-the-hobgoblin-of-little-minds
# https://www.python.org/dev/peps/pep-0020
| true
|
d28760a2a14cfff28a6998f02de7b933e7fa0051
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p04029/s871525461.py
|
UTF-8
| 42
| 2.59375
| 3
|
[] |
no_license
|
n = int(input())
print(int((1/2)*n*(n+1)))
| true
|
ae009b186405f7807e23d0cd0d59a3ec0421cba3
|
Python
|
Jinmin-Goh/LeetCode
|
/Solved/0145/0145_recursive.py
|
UTF-8
| 824
| 3.453125
| 3
|
[] |
no_license
|
# Problem No.: 145
# Solver: Jinmin Goh
# Date: 20200126
# URL: https://leetcode.com/problems/binary-tree-postorder-traversal/
import sys
# recursive solution of postorder traversal
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def postorderTraversal(self, root: TreeNode) -> List[int]:
if not root:
return []
return self.find(root, [])
def find(self, root: TreeNode, tempAns: List[int]) -> List[int]:
if not root:
return tempAns
if root.left:
tempAns = self.find(root.left, tempAns)
if root.right:
tempAns = self.find(root.right, tempAns)
tempAns.append(root.val)
return tempAns
| true
|
8367d9f7bd7d401da3034238a780338cd29687c0
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02263/s724543127.py
|
UTF-8
| 449
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
if __name__ == '__main__':
a = input().split(' ')
b = []
for i in a:
if i == '+':
tmp = b.pop(-1) + b.pop(-1)
b.append(tmp)
elif i == '-':
p = b.pop(-1)
tmp = b.pop(-1) - p
b.append(tmp)
elif i == '*':
tmp = b.pop(-1) * b.pop(-1)
b.append(tmp)
else:
b.append( int(i) )
print(b[0])
| true
|
d167dce4eb44dc270bc64abd6db3ec7f67f94d57
|
Python
|
yzwgithub/TeachPython
|
/AI/AI基础/class_23/class_23_04.py
|
UTF-8
| 715
| 3.59375
| 4
|
[] |
no_license
|
# 导入绘图模块
import matplotlib.pyplot as plt
# 构建数据
GDP = [12406.8, 13908.57, 9386.87, 9143.64]
# 中文乱码的处理
plt.rcParams['font.sans-serif'] = ['SimHei']
# 解决负号'-'显示为方块的问题
plt.rcParams['axes.unicode_minus'] = False
# 绘图
plt.barh(range(4), GDP, align='center', color='orange', alpha=0.5)
# 添加轴标签
plt.xlabel('GDP')
# 添加标题
plt.title('直辖市GDP对比')
# 添加刻度标签
plt.yticks(range(4), ['北京市', '上海市', '天津市', '重庆市'])
# 设置Y轴的刻度范围
plt.xlim([5000, 17000])
# 为每个条形图添加数值标签
for x, y in enumerate(GDP):
plt.text(y + 100, x, '%s' % y, va='center')
# 显示图形
plt.show()
| true
|
28be9c865a8756194e23fcdb502c249fbef71e75
|
Python
|
ahwitz/conductAtHome
|
/beat.py
|
UTF-8
| 2,152
| 2.625
| 3
|
[] |
no_license
|
from __future__ import division
import pygame.midi
import music21
import time
from threading import Thread, Timer
from multiprocessing import Process, Queue
import serial
import re
import math
import sys
import traceback
import collections
def serialTempoWatcher(initTempo, eventQueue):
ser = serial.Serial("/dev/ttyACM0", 9600)
line = ""
hold = False
historyVector = [0] * 20
waitThreshold = 20
waitCount = waitThreshold + 1
origTime = time.time()
thresholdCount = 0
thresholdThreshold = 2
while(1):
if shutdown == True:
break
try:
for c in ser.read():
if c == '\n' or c == '\r':
if len(re.findall(",", line)) != 2:
hold = True
if not hold:
#move all this to a separate function
line = line.split(",")
line = [int(x) for x in line]
curSum = sum(line)
historyVector = shift(1, historyVector)
historyAvg = sum(historyVector)/len(historyVector)
historyVector[len(historyVector) - 1] = curSum
if thresholdCount:
if curSum < 0:
thresholdCount += 1
if thresholdCount >= thresholdThreshold:
waitCount = 0
eventQueue.put(deltaTime)
thresholdCount = 0
elif curSum < 0 and historyAvg > 0 and waitCount > waitThreshold:
newTime = time.time()
deltaTime = newTime - origTime
origTime = newTime
thresholdCount = 1
waitCount+=1
hold = True
line = ""
else:
hold = False
line += str(c)
except ValueError as e:
#move this and IndexError to a separate function as well
ex_type, ex, tb = sys.exc_info()
#print "Encountered a ValueError", e
line = ''
traceback.print_tb(tb)
except TypeError as e:
ex_type, ex, tb = sys.exc_info()
#print "Encountered a TypeError", e
line = ''
traceback.print_tb(tb)
class tempoCalculator(object):
def __init__(self):
print "Initiating calculator."
self.lastBeatTimestamp = time.time()
def beat(self):
curBeat = time.time()
print "current tempo:", 60 / (curBeat - self.lastBeatTimestamp)
self.lastBeatTimestamp = curBeat
calc = tempoCalculator()
while(1):
a = raw_input("Beat?")
calc.beat()
| true
|
a8d7c44cec679dfbe0f48bebb5ee3efa703bc4e0
|
Python
|
sunildkumar/ConnectFour
|
/connect_four.py
|
UTF-8
| 3,727
| 3.296875
| 3
|
[] |
no_license
|
import numpy as np
from Piece import Piece
from PlacementState import PlacementState
class ConnectFour:
def __init__(self):
self.COLS = 7
self.ROWS = 6
self.MIN_COL_INDEX = 0
self.MAX_COL_INDEX = self.COLS-1
self.MIN_COL_CAPACITY = 0
self.MAX_COL_CAPACITY = 6
self.WINNING_LENGTH = 4
self.board = np.full((self.ROWS,self.COLS),Piece.EMPTY)
self.col_capacity = np.zeros(self.COLS, dtype = 'I' )
#getter for the np array that represents the board
def get_board(self):
return self.board
def get_capacity(self):
return self.col_capacity
def get_piece(self,row,col):
return self.board[row][col]
def board_to_string(self):
board_string = ""
for row in reversed(range(self.ROWS)):
row_string = ""
for col in range(self.COLS):
row_string = row_string + self.board[row][col].value
board_string = board_string + "\n" + row_string
return board_string
def place_piece(self, col, piece):
if col < self.MIN_COL_INDEX or col > self.MAX_COL_INDEX:
return (PlacementState.fail,(-1,-1))
elif self.col_capacity[col] == self.MAX_COL_CAPACITY:
return (PlacementState.fail, (-1,-1))
else: #place piece
row = self.col_capacity[col]
self.col_capacity[col] += 1
self.board[row][col] = piece
return (PlacementState.success, (row,col))
def check_for_winner(self, row, col):
if self.is_horizontal_win(row,col) or self.is_vertical_win(row,col) or self.is_left_diagonal_win(row,col) or self.is_right_diagonal_win(row,col):
return True
else:
return False
def is_horizontal_win(self, row, col):
for i in range(4):
piece_list = [(row-3 +i, col),(row-2 +i,col),(row-1 + i, col),(row + i, col)]
if self.check_in_a_row(piece_list):
return True
return False
def is_vertical_win(self, row, col):
for i in range(4):
piece_list = [(row,col-3+i),(row,col-2+i),(row,col-1+i),(row,col+i)]
if self.check_in_a_row(piece_list):
return True
return False
def is_right_diagonal_win(self,row, col):
for i in range(4):
piece_list = [(row-3+i,col-3+i),(row-2+i,col-2+i),(row-1+i,col-1+i),(row+i,col+i)]
if self.check_in_a_row(piece_list):
return True
return False
def is_left_diagonal_win(self,row, col):
for i in range(4):
piece_list = [(row+3-i,col-3+i),(row+2-i,col-2+i),(row+1-i,col-1+i),(row-i,col+i)]
if self.check_in_a_row(piece_list):
return True
return False
def check_in_a_row(self,piece_list):
#check that all pieces are valid
for coords in piece_list:
if coords[0] >= 0 and coords[0]<self.ROWS and coords[1] >= 0 and coords[1] <self.COLS:
pass #piece if good
else:
return False
piece_set = set()
for piece in piece_list:
piece_set.add(self.get_piece(piece[0],piece[1]).value)
return len(piece_set) ==1
#only to be called after checking for a win as the board can be full with a winning position
def is_tie(self):
for index in self.col_capacity:
if index == self.MAX_COL_CAPACITY:
pass
else:
return False
return True
| true
|
b5f811b938c8fd4f6f85d4aeb99f9ed1986a729e
|
Python
|
rot11/nginx_logs_analyzer
|
/timeX.py
|
UTF-8
| 1,186
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# time.py
#
# Copyright 2014 Stazysta <Stazysta@STAZYSTA-KOMP>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# 14/Jul/2014:16:05:15 +0200
#
import time
def nextHour(secs):
#prev = time.gmtime(secs)[3]
#while ((prev+1)!=time.gmtime(secs)[3]):
# secs+=1
return time.gmtime(secs)[3]
def main():
t = time.strptime('14/Jul/2014:16:05:15', '%d/%b/%Y:%H:%M:%S')
print time.mktime(t)
#print nextHour(time.time())
return 0
if __name__ == '__main__':
main()
| true
|
511bdb5f275d5a91f36e6409fe8a837db197c293
|
Python
|
dsdshcym/LeetCode-Solutions
|
/algorithms/set_matrix_zeroes.py
|
UTF-8
| 775
| 3.109375
| 3
|
[] |
no_license
|
#+TAG: NEEDS_IMPROVE
class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
if (matrix == []):
return matrix
N = len(matrix)
M = len(matrix[0])
zero_row = []
zero_column = []
for i in xrange(N):
for j in xrange(M):
if (matrix[i][j] == 0):
zero_row.append(i)
zero_column.append(j)
for i in xrange(N):
for j in xrange(M):
if (i in set(zero_row)) or (j in set(zero_column)):
matrix[i][j] = 0
m = [[0, 1], [1, 0]]
t = Solution()
t.setZeroes(m)
print m
| true
|
dd904c643ac6aa94aab28a356e882b8986335e4a
|
Python
|
speedbug78/BlueOS
|
/Tools/GUI/BlueOS_tasks.py
|
UTF-8
| 1,204
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
import BlueOS_schedule
import BlueOS_timeline
import BlueOS_support
import random
# Container class to hold task related information
class Tasks:
def __init__( self, canvas ):
self.task_info = {}
self.task_schedule = []
self.timeline_block = BlueOS_timeline.Timeline_Block( canvas )
def update_schedule( self ):
self.task_schedule = BlueOS_schedule.calc_schedule( self.task_info )
self.timeline_block.draw_timeline( self.task_schedule, self.task_info )
def add_task( self, name, info ):
self.task_info[name] = info
color = self.random_color()
self.task_info[name]["color"] = color
self.update_schedule()
ram_size = self.task_info[name]["RAM"]
flash_size = self.task_info[name]["FLASH"]
BlueOS_support.flash_block.add_item ( name, 0, flash_size + "K", color )
BlueOS_support.ram_block.add_item ( name, 0, ram_size + "K", color )
BlueOS_support.flash_block.update()
BlueOS_support.ram_block.update()
def random_color( self ):
rcolor = ( random.randint( 0, 255 ), random.randint( 0, 255 ), random.randint( 0, 255 ))
return "#%02x%02x%02x" % rcolor
| true
|
b4accf31bf3d5ca0e1e590ae104ffd1d4ad3c187
|
Python
|
rufi156/kryptoS52019-2020
|
/8elgamal/elgamal.py
|
UTF-8
| 5,128
| 2.5625
| 3
|
[] |
no_license
|
import random
import argparse, sys, os
GEN = "elgamal.txt"
PRIV = "private.txt"
PUB = "public.txt"
PLAIN = "plain.txt"
CRYPTO = "crypto.txt"
DECRYPT = "decrypt.txt"
MSG = "message.txt"
SIG = "signature.txt"
VER = "verify.txt"
def NWD(a, b):
if b != 0:
return NWD(b, a % b )
return a
def modInverse(a, mod):
m0 = mod
y = 0
x = 1
if mod == 1 :
return 0
while a > 1 :
q = a // mod
t = mod
mod = a % mod
a = t
t = y
y = x - q * y
x = t
if x < 0 :
x = x + m0
return x
def encrypt(p, g, B, strMsg):
m = int(strMsg.encode().hex(), 16)
k = random.randint(0, p)
gk = pow( g, k, p )
Bk = m*pow( B, k, p)
encryptedStr = str(gk) + ' ' + str(Bk)
return encryptedStr
def decrypt(p, b, cipher):
cipherArray = cipher.split()
gk = int(cipherArray[0])
mBk = int(cipherArray[1])
Bk = pow(gk, b, p)
m = mBk//Bk
hexm = hex(m)[2:]
decryptedText = bytes.fromhex(hexm).decode()
return decryptedText
def sigGen(p, g, b, m):
while 1:
k = random.randint(1, p-1)
if NWD(k, p-1) == 1:
break
r = pow(g,k,p)
invk = modInverse(k, p-1)
s = invk*(m-b*r)%(p-1)
return r,s
def sigVer(p, g, B, r, x, m):
v1 = pow(g,m,p)
v2 = pow(B,r,p)%p * pow(r,x,p)%p
return v1 == v2
def genKeys(genFile, outPrivFile, outPubFile):
elga = open(genFile, "r")
elgalines = elga.readlines()
p = int(elgalines[0])
g = int(elgalines[1])
x = random.randint(1, p)
h = pow( g, x, p )
epriv = open(outPrivFile, "w")
epriv.write('%s\n%s\n%s' % (str(p), str(g), str(x)))
epub = open(outPubFile, "w")
epub.write('%s\n%s\n%s' % (str(p), str(g), str(h)))
elga.close()
epriv.close()
epub.close()
def encryptMsg(pubFile, msgFile, outFile):
epub = open(pubFile, "r")
epublines = epub.readlines()
pubkp = int(epublines[0])
pubkg = int(epublines[1])
pubkh = int(epublines[2])
eplain = open(msgFile, "r")
eplainlines = eplain.readlines()
message = str(eplainlines[0])
if len(message) >= pubkp:
sys.exit("ERROR: m<p test failed.")
cipher = encrypt(pubkp, pubkg, pubkh, message)
ecrypto = open(outFile, "w")
ecrypto.write('%s' % (str(cipher)))
epub.close()
eplain.close()
ecrypto.close()
def decryptMsg(privFile, msgFile, outFile):
epriv = open(privFile, "r")
eprivlines = epriv.readlines()
privkp = int(eprivlines[0])
privkx = int(eprivlines[2])
ecrypt = open(msgFile, "r")
ecryptlines = ecrypt.readlines()
encryptMsg = str(ecryptlines[0])
decrypted = decrypt(privkp, privkx, encryptMsg)
edecrypt = open(outFile, "w")
edecrypt.write('%s' % (decrypted))
epriv.close()
ecrypt.close()
edecrypt.close()
def genSignature(privFile, msgFile, outFile):
epriv = open(privFile, "r")
eprivlines = epriv.readlines()
privkp = int(eprivlines[0])
privkg = int(eprivlines[1])
privkx = int(eprivlines[2])
message = open(msgFile, "r")
messagelines = message.readlines()
content = int(messagelines[0].encode().hex(), 16)
rr, ss = sigGen(privkp, privkg, privkx, content)
signature = open(outFile, "w")
signature.write('%s\n%s' % (str(rr), str(ss)))
epriv.close()
message.close()
signature.close()
def verifySig(pubFile, msgFile, sigFile, outFile):
epub = open(pubFile, "r")
epublines = epub.readlines()
pubkp = int(epublines[0])
pubkg = int(epublines[1])
pubky = int(epublines[2])
message = open(msgFile, "r")
messagelines = message.readlines()
content = int(messagelines[0].encode().hex(), 16)
signature = open(sigFile, "r")
siglines = signature.readlines()
sigr = int(siglines[0])
sigs = int(siglines[1])
isvalid = sigVer(pubkp, pubkg, pubky, sigr, sigs, content)
print("Weryfikacja: %s" % isvalid)
verify = open(outFile, "w")
verify.write('Weryfikacja: %s' % isvalid)
signature.close()
message.close()
epub.close()
verify.close()
def main(args):
modeList = [k for k, v in vars(args).items() if v]
if "k" in modeList:
genKeys(GEN, PRIV, PUB)
elif "e" in modeList:
encryptMsg(PUB, PLAIN, CRYPTO)
elif "d" in modeList:
decryptMsg(PRIV, CRYPTO, DECRYPT)
elif "s" in modeList:
genSignature(PRIV, MSG, SIG)
elif "v" in modeList:
verifySig(PUB, MSG, SIG, VER)
else:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Encode/decode/sign/verify elgemal")
actionType = parser.add_mutually_exclusive_group(required=True)
actionType.add_argument("-k", action="store_true", help="generate pub,priv keys")
actionType.add_argument("-e", action="store_true", help="encrypt")
actionType.add_argument("-d", action="store_true", help="decrypt")
actionType.add_argument("-s", action="store_true", help="sign")
actionType.add_argument("-v", action="store_true", help="verify signature")
args = parser.parse_args()
main(args)
| true
|
42070bbc859e0b5da6a873fb565df36b3b71b785
|
Python
|
cms-sw/cms-bot
|
/generate-json-performance-charts
|
UTF-8
| 4,593
| 2.578125
| 3
|
[] |
no_license
|
#! /usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
from os import listdir
from os import path
import re
import json
#------------------------------------------------------------------------------------------------------------
# This script reads a list of the workflows, steps and parameters for which you want to see the graphs
# It generates a json file with the correct structure and links to each graph, this json file is used
# to create the visualization
#------------------------------------------------------------------------------------------------------------
def get_wfs_ordered(base_dir):
workflows={}
check=re.compile("^[0-9]+.")
for wf in listdir(base_dir):
if check.match(wf):
wf_number = float(re.sub('_.*$', '', wf))
workflows[wf_number]=wf
return [ workflows[wf_number] for wf_number in sorted(workflows.keys()) ]
def add_images_to_step(wf,step):
imgs = []
for img_name in listdir('%s/%s/%s' % (BASE_DIR,wf['wf_name'],step['step_name'])):
if (img_name in RESULT_FILE_NAMES ):
img = {}
img['name'] = img_name
img['url'] = '%s/%s/%s/%s' % (BASE_URL,wf['wf_name'],step['step_name'],img['name'])
imgs.append(img)
print(img['name'])
step['imgs'] = imgs
def add_steps_to_wf(wf):
steps = []
for step_name in sorted(listdir('%s/%s' % (BASE_DIR,wf['wf_name']))):
if path.isdir('%s/%s/%s'% (BASE_DIR,wf['wf_name'],step_name) ):
step = {}
step['step_name'] = step_name
add_images_to_step(wf,step)
steps.append(step)
print(step_name)
wf['steps'] = steps
def get_workflows():
workflows = []
for wf_name in get_wfs_ordered(BASE_DIR):
if path.isdir('%s/%s/'% (BASE_DIR,wf_name) ) and not 'bootstrap' in wf_name:
print('Adding %s' % wf_name)
wf = {}
wf['wf_name'] = wf_name
add_steps_to_wf(wf)
workflows.append(wf)
print()
return workflows
def print_workflows(wfs):
for wf in wfs:
print(wf['wf_name'])
for step in wf['steps']:
print('\t %s' % step['step_name'])
for img in step['imgs']:
print(img)
def add_workflow(results,wf_name):
for wf in results['wfs']:
if wf['wf_name'] == wf_name:
return wf
new_wf = {}
new_wf['wf_name'] = wf_name
results['wfs'].append(new_wf)
return new_wf
def add_step(workflow,step_name):
if not workflow.get('steps'):
workflow['steps'] = []
for step in workflow['steps']:
if step['step_name'] == step_name:
return step
new_step = {}
new_step['step_name'] = step_name
workflow['steps'].append(new_step)
return new_step
def add_param(step,param_name):
if not step.get('imgs'):
step['imgs'] = []
for p in step['imgs']:
if p['name'] == param_name:
return p
new_param = {}
new_param['name'] = param_name
step['imgs'].append(new_param)
return new_param
def add_url_to_param(workflow,step,param):
step_number = step['step_name'].split('_')[0]
url = BASE_URL.replace('WORKFLOW',workflow['wf_name']).replace('STEP',step_number).replace('PARAM',param['name'])
url = url.replace('+','%2B')
print(url)
param['url'] = url
#-----------------------------------------------------------------------------------
#---- Parser Options
#-----------------------------------------------------------------------------------
parser = OptionParser(usage="usage: %prog PLOTS_LIST \n PLOTS_LIST list of plots that you want to visualize")
(options, args) = parser.parse_args()
#-----------------------------------------------------------------------------------
#---- Start
#-----------------------------------------------------------------------------------
if (len(args)<1):
print('you need to specify a list of plots')
parser.print_help()
exit()
WF_LIST = args[0]
GRAPH_PARAMS = '&from=-15days&fontBold=true&fontSize=12&lineWidth=5&title=PARAM&yMin=0'
BASE_URL = 'https://cmsgraph.cern.ch/render?target=IBRelVals.slc6_amd64_gcc481.CMSSW_7_1_X.WORKFLOW.STEP.PARAM&height=800&width=800%s'%GRAPH_PARAMS
result = {}
lines = open(WF_LIST, "r").readlines()
result['wfs'] = []
for l in lines:
if l.startswith("#"):
continue
else:
l = l.replace('\n','')
parts = l.split(' ')
wf_name = parts[0]
step_name = parts[1]
param_name = parts[2]
workflow = add_workflow(result,wf_name)
step = add_step(workflow,step_name)
param = add_param(step,param_name)
add_url_to_param(workflow,step,param)
print(result)
out_json = open("plots_summary.json", "w")
json.dump(result,out_json,indent=4)
out_json.close()
| true
|
14c596f2216e6ca752184737af2e51620dc756b3
|
Python
|
DesLandysh/My100daysPath
|
/009_of_100/day_9_Blind_Bit_program_for_one_PC_multiusers.py
|
UTF-8
| 616
| 3.921875
| 4
|
[] |
no_license
|
# blind bit program
def clear():
print("\n"*20)
print("Welcome to the blind bit program")
blind_bit = {}
next_person = True
while next_person:
name = input("Enter your name: ").capitalize()
bit = int(input("What's your bit? $"))
blind_bit[name] = bit
if input("Is there another person to bit? yes/no :").lower() == "yes":
clear()
else:
next_person = False
high = 0
winner = ""
for key in blind_bit:
if high < blind_bit[key]:
high = blind_bit[key]
winner = key
else:
continue
clear()
print(f"\nAnd the winner is {winner} who bit ${high}.")
| true
|
6d5f2490fef9894328193b887f7571ee604fc05a
|
Python
|
Dershowitz011/randphone
|
/randphone.py
|
UTF-8
| 2,113
| 3.71875
| 4
|
[] |
no_license
|
"""Generate a random valid US phone number."""
import random
def phone():
valid = False
while not valid:
# Generate a random phone number.
number = [random.randint(0, 9) for i in range(10)]
# Assume it is valid.
valid = True
# The format of an area code is NXX, where N is any digit 2 through 9 and X
# is any digit 0 through 9.
if (number[0] == 0) or (number[0] == 1):
valid = False
# When the second and third digits of an area code are the same, that code
# is called an easily recognizable code (ERC). ERCs designate special
# services; e.g., 888 for toll-free service.
if number[1] == number[2]:
valid = False
# N11: These 8 ERCs, called service codes, are not used as area codes.
# (This is redundant with the previous rule.)
if (number[1] == 1) and (number[2] == 1):
valid = False
# N9X: The 80 codes in this format, called expansion codes, have been
# reserved for use during the period when the current 10-digit NANP number
# format undergoes expansion.
if number[1] == 9:
valid = False
# N11s are not used as area codes.
if number[1] == 1 and number[2] == 1:
valid = False
# 37X and 96X: Two blocks of 10 codes each have been set aside by the INC
# for unanticipated purposes where it may be important to have a full range
# of 10 contiguous codes available.
if ((number[0] == 3 and number[1] == 7) or
(number[0] == 9 and number[1] == 6)):
valid == False
# First digit of exchange code cannot be 0 or 1.
if (number[3] == 0) or (number[3] == 1):
valid = False
# Second and third digits of exchange code cannot both be 1.
if (number[4] == 1) and (number[5] == 1):
valid = False
return "({}) {}-{}".format(
"".join([str(d) for d in number[:3]]),
"".join([str(d) for d in number[3:6]]),
"".join([str(d) for d in number[6:]]))
| true
|
2eeec35929bdf5b4263bed87cfd9d9090e1454e9
|
Python
|
TimeWz667/LitReviewer
|
/reviewer/summary.py
|
UTF-8
| 986
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
from io import BytesIO
import matplotlib.pyplot as plt
from wordcloud import *
__author__ = 'TimeWz667'
__all__ = ['make_word_cloud']
EscapeWords = ['background','methods','results','conclusions','included',
'used','used','using','use','compared','may','associated',
'will','conducted','however','identified','will','different',
'difference','can','provide','based','approach','across','factors',
'two','potential', 'many', 'much', 'more', 'could', 'would']
def make_word_cloud(txt):
stopwords = set(STOPWORDS)
for word in EscapeWords:
stopwords.add(word)
wc = WordCloud(max_font_size=50, max_words=100, stopwords=stopwords).generate(txt)
file = BytesIO()
wc.to_image().save(file, 'png')
file.seek(0)
return file
def plot_to_file():
file = BytesIO()
plt.savefig(file, format='png')
file.seek(0)
return file
def make_trend_plot(years):
plt.bar()
return
| true
|
f5266c85c7fc1fea8c0138ce081e177f3d351ac2
|
Python
|
williamroque/profero
|
/src/pagamentos_garantias_package/profero/presentation/slides/dados-operacao/slide.py
|
UTF-8
| 8,402
| 2.625
| 3
|
[] |
no_license
|
from pptx.util import Cm, Pt
from pptx.enum.shapes import MSO_SHAPE
from pptx.enum.text import PP_ALIGN, MSO_ANCHOR
from pptx.dml.color import RGBColor
from profero.framework.presentation.slide import Slide as FSlide
from profero.framework.presentation.row import Row
from profero.framework.presentation.cell import Cell
from profero.presentation.slides.common.header import HeaderRow
from profero.presentation.slides.common.note import NoteCell
import re
NOTE = """
➢ Valores com base em {}
""".strip()
class TableCell(Cell):
def __init__(self, inputs, slide_width, props, parent_row):
super().__init__(
inputs,
{
'width': slide_width,
'x_offset': 0
},
'table', 0,
parent_row
)
self.row_count = 0
self.props = props
def render(self, slide):
table_width = Cm(13.06)
table_height = Cm(11.18)
y_correction = Cm(-1)
primeira_serie = str(self.inputs.get('primeira-serie'))
segunda_serie = str(self.inputs.get('primeira-serie') + 1)
saldo_primeira = self.props[primeira_serie]['saldo-devedor']
saldo_segunda = self.props[segunda_serie]['saldo-devedor']
self.table = slide.shapes.add_table(
13, 3,
self.x_offset + self.width / 2 - table_width / 2,
self.parent_row.y_offset +\
self.parent_row.height / 2 -\
table_height / 2 +\
y_correction,
int(table_width), int(table_height)
).table
header_cell = self.table.cell(0, 0)
self.set_text(
header_cell,
'Dados',
alignment=PP_ALIGN.CENTER,
font_family='Calibri',
font_size=Pt(12),
bold=True,
color=RGBColor(255, 255, 255)
)
self.set_fill_color(header_cell, RGBColor(0x16, 0x36, 0x5C))
primeira_serie_cell = self.table.cell(0, 1)
self.set_text(
primeira_serie_cell,
'{}ª Série'.format(primeira_serie),
alignment=PP_ALIGN.CENTER,
font_family='Calibri',
font_size=Pt(12),
bold=True,
color=RGBColor(255, 255, 255)
)
self.set_fill_color(primeira_serie_cell, RGBColor(0x16, 0x36, 0x5C))
segunda_serie_cell = self.table.cell(0, 2)
self.set_text(
segunda_serie_cell,
'{}ª Série'.format(segunda_serie),
alignment=PP_ALIGN.CENTER,
font_family='Calibri',
font_size=Pt(12),
bold=True,
color=RGBColor(255, 255, 255)
)
self.set_fill_color(segunda_serie_cell, RGBColor(0x16, 0x36, 0x5C))
self.add_table_row(
'IF',
self.props[primeira_serie]['instrumento-financeiro'],
self.props[segunda_serie]['instrumento-financeiro']
)
self.add_table_row(
'ISIN',
self.props[primeira_serie]['isin'],
self.props[segunda_serie]['isin']
)
self.add_table_row('Série', primeira_serie, segunda_serie)
self.add_table_row(
'Cedente',
self.props[primeira_serie]['cedente'],
self.props[segunda_serie]['cedente']
)
self.add_table_row(
'Correção',
self.props[primeira_serie]['correcao'],
self.props[segunda_serie]['correcao']
)
self.add_table_row(
'Juros',
'{}%'.format(
self.props[primeira_serie]['juros'] * 100
).replace('.', ','),
'{}%'.format(
self.props[segunda_serie]['juros'] * 100
).replace('.', ',')
)
self.add_table_row(
'Data de Emissão',
self.props[primeira_serie]['data-emissao'],
self.props[segunda_serie]['data-emissao']
)
self.add_table_row(
'Vencimento',
self.props[primeira_serie]['vencimento'],
self.props[segunda_serie]['vencimento']
)
self.add_table_row('Subordinação', 'Sênior', 'Subordinada')
self.add_table_row(
'Valor de Emissão',
'R$ {:.2f} MM'.format(
self.props[primeira_serie]['valor-emissao'] / 1e+6
).replace('.', ','),
'R$ {:.2f} MM'.format(
self.props[segunda_serie]['valor-emissao'] / 1e+6
).replace('.', ',')
)
self.add_table_row(
'Saldo Devedor do CRI',
'R$ {:.2f} MM'.format(
saldo_primeira / 1e+6
).replace('.', ','),
'R$ {:.2f} MM'.format(
saldo_segunda / 1e+6
).replace('.', ',')
)
self.add_table_row(
'Saldo dos CRI',
'R$ {:.2f} MM'.format(
self.inputs.get('saldo-cri') / 1e+6
).replace('.', ','),
merge=True
)
def add_table_row(self, header, value_1, value_2=None, merge=False):
header_cell = self.table.cell(self.row_count + 1, 0)
self.set_text(
header_cell,
str(header),
alignment=PP_ALIGN.LEFT,
font_family='Calibri',
font_size=Pt(12),
color=RGBColor(0, 0, 0)
)
value_1_cell = self.table.cell(self.row_count + 1, 1)
self.set_text(
value_1_cell,
str(value_1),
alignment=PP_ALIGN.CENTER,
font_family='Calibri',
font_size=Pt(12),
color=RGBColor(0, 0, 0)
)
value_2_cell = self.table.cell(self.row_count + 1, 2)
if value_2 != None:
self.set_text(
value_2_cell,
str(value_2),
alignment=PP_ALIGN.CENTER,
font_family='Calibri',
font_size=Pt(12),
color=RGBColor(0, 0, 0)
)
if merge:
value_1_cell.merge(value_2_cell)
header_cell.text_frame.paragraphs[0].runs[0].font.bold = True
value_1_cell.text_frame.paragraphs[0].runs[0].font.bold = True
self.row_count += 1
class Slide(FSlide):
def __init__(self, inputs, index, props, table_of_contents_slide, parent_presentation):
super().__init__(
inputs,
'dados-operacao', 6,
index,
None,
parent_presentation,
'Características da operação',
table_of_contents_slide
)
primeira_serie = str(self.inputs.get('primeira-serie'))
segunda_serie = str(self.inputs.get('primeira-serie') + 1)
saldo_primeira = props[primeira_serie]['saldo-devedor']
saldo_segunda = props[segunda_serie]['saldo-devedor']
self.inputs.update('saldo-primeira', saldo_primeira)
self.inputs.update('saldo-segunda', saldo_segunda)
self.inputs.update('saldo-cri', saldo_primeira + saldo_segunda)
slide_height = parent_presentation.presentation.slide_height
slide_width = parent_presentation.presentation.slide_width
note_height = Cm(2.04)
header_row = HeaderRow(
inputs,
{
'height': .25 * slide_height,
'y_offset': Cm(0)
}, 0,
self.title,
slide_width, slide_height,
self
)
self.add_row(header_row)
table_row = Row(
inputs,
{
'height': .75 * slide_height - note_height,
'y_offset': header_row.y_offset + header_row.height
},
'table', 1,
self
)
table_cell = TableCell(inputs, slide_width, props, table_row)
table_row.add_cell(table_cell)
self.add_row(table_row)
note_row = Row(
inputs,
{
'height': note_height,
'y_offset': table_row.y_offset + table_row.height
},
'note', 2,
self
)
note_cell = NoteCell(
inputs,
slide_width,
NOTE.format(props['date']),
note_row
)
note_row.add_cell(note_cell)
self.add_row(note_row)
| true
|
42c9f91b2c32d9cc14ca80bc51e7b3456c8b1cf5
|
Python
|
scarecrow1123/Advent-Of-Code
|
/2015/Day13/13.py
|
UTF-8
| 1,014
| 3.125
| 3
|
[] |
no_license
|
import re
ip = open("13.txt").read()
lines = ip.splitlines()
arr = {"Alice": {"me": 0}, "Bob": {"me": 0}, "Carol": {"me": 0}, "David": {"me": 0}, "Eric": {"me": 0}, "Frank": {"me": 0}, "George": {"me": 0}, "Mallory": {"me": 0}, "me": {"Alice": 0, "Bob": 0, "Carol": 0, "David": 0, "Eric": 0, "Frank": 0, "George": 0, "Mallory": 0}}
for line in lines:
tokens = line.split(" ")
point = re.findall("\d+", line)[0]
if line.find("gain") != -1:
point = int(point)
elif line.find("lose") != -1:
point = 0 - int(point)
arr[tokens[0]][tokens[-1].strip(".")] = point
lst = ["Alice", "Bob", "Carol", "David", "Eric", "Frank", "George", "Mallory", "me"]
from itertools import permutations
perm = permutations(lst)
arrangements = []
max_happ = 0
for idx, j in enumerate(perm):
arrangements.append(j)
happiness = 0
for i in j:
left = j[(j.index(i)-1)%9]
right = j[(j.index(i)+1)%9]
happiness += arr[i][left] + arr[i][right]
if happiness > max_happ:
max_happ = happiness
print max_happ
| true
|
0922a86c318b54376b9fc4576c816b8eba9dc419
|
Python
|
sysboy/aoc
|
/2020/day12/part01.py
|
UTF-8
| 874
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
""" AOC day 12 """
from collections import deque
import sys
ins = []
with open(sys.argv[1]) as fd:
for line in fd:
ins.append([line[0],int(line[1:])])
facing = deque(['E','S','W','N'],maxlen=4)
location = [0,0]
x = 0
y = 1
for i in ins:
direction = i[0]
steps = i[1]
if direction == 'L':
facing.rotate(int(steps/90))
if direction == 'R':
facing.rotate(int(steps/-90))
tempdir = direction
if direction == 'F':
tempdir = facing[0]
if tempdir == 'N':
location[y] = location[y] + steps
if tempdir == 'S':
location[y] = location[y] - steps
if tempdir == 'E':
location[x] = location[x] + steps
if tempdir == 'W':
location[x] = location[x] - steps
print(f'location = {location}, result = {abs(location[0]) + abs(location[1])}')
| true
|
96bf7e02291bc8ce157c88a487bb644437480d3d
|
Python
|
ImNaman/CL3
|
/A2/a2.py
|
UTF-8
| 896
| 3.21875
| 3
|
[] |
no_license
|
import threading
import xml.etree.ElementTree as et
def getdata(filename):
xmltree=et.parse(filename)
root= xmltree.getroot()
print root.tag
a=[]
for child in root:
a.append(int(child.text))
print child.tag, child.text
print a
return a
def partition(first, last):
pivot=a[first]
i=first+1
j=last
while i<=j:
while i<len(a) and a[i]<=pivot:
i+=1
while a[j]>pivot:
j-=1
if i<j:
a[i], a[j]=a[j], a[i]
a[first], a[j]=a[j], pivot
return j
def quicksort(first, last):
if first<=last:
mid=partition(first, last)
print threading.current_thread().getName(), "found mid at ", mid
quicksort(first, mid-1)
quicksort(mid+1, last)
t1=threading.Thread(target=quicksort, args=(first, mid-1))
t2=threading.Thread(target=quicksort, args=(mid+1, last))
t1.start()
t2.start()
t1.join()
t2.join()
a=getdata("input.xml")
quicksort(0, len(a)-1)
print a
| true
|
c9396fe1e482df4ca62fbfbaa994d862376330dd
|
Python
|
sheh2431/CV_hw-2
|
/hw2_4/new_baseline.py
|
UTF-8
| 3,693
| 2.75
| 3
|
[] |
no_license
|
import os
import sys
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import torchvision.models as models
from torchvision import transforms
from torchvision.datasets import ImageFolder
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from torchvision.models import resnet18
def get_dataloader(folder,batch_size=32):
# Data preprocessing
trans = transforms.Compose([
transforms.ToTensor(), # [0, 255] -> [0.0, 1.0]
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_path, test_path = os.path.join(folder,'train'), os.path.join(folder,'valid')
# Get dataset using pytorch functions
train_set = ImageFolder(train_path, transform=trans)
test_set = ImageFolder(test_path, transform=trans)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False)
print ('==>>> total trainning batch number: {}'.format(len(train_loader)))
print ('==>>> total testing batch number: {}'.format(len(test_loader)))
return train_loader, test_loader
if __name__ == "__main__":
# TODO
folder = sys.argv[1]
train_loader, val_loader = get_dataloader(folder, batch_size=32)
use_cuda = torch.cuda.is_available()
#### resnet16
extractor = resnet18(pretrained=True)
num_ftrs = extractor.fc.in_features
extractor.fc = nn.Linear(num_ftrs,100)
extractor.load_state_dict(torch.load('./checkpoint/resnet.pth'))
#extractor.cuda()
extractor.eval()
features = []
labels = []
features_valid = []
labels_valid = []
labels_val_num = np.zeros(10)
with torch.no_grad():
for batch, (img, label) in enumerate(train_loader,1):
#img.cuda()
#feat = extractor(img.cuda())
feat = extractor(img)
feat = feat.view(img.size(0),100,-1)
feat = torch.mean(feat,2)
feat = feat.cpu().numpy()
label = label.numpy()
for f, l in zip(feat, label):
features.append(f)
labels.append(l)
#### Validation
with torch.no_grad():
for batch, (img, label) in enumerate(val_loader,1):
#img.cuda()
#feat = extractor(img.cuda())
feat = extractor(img)
feat = feat.view(img.size(0),100,-1)
feat = torch.mean(feat,2)
feat = feat.cpu().numpy()
label = label.numpy()
for f, l in zip(feat, label):
features_valid.append(f)
labels_valid.append(l)
#### t-SNE
tsne_own = TSNE(n_components=2).fit_transform(features, labels)
tsne_own_vali = TSNE(n_components=2).fit_transform(features_valid, labels_valid)
colors = ['#0072BD', '#D95319', '#EDB120', '#7E2F8E', '#77AC30', '#4DBEEE', '#A2142F', '#7FFFAA', '#2F4F4F', '#FF1493']#9400D3
plt.title("T-SNE of RES-NET in Training")
for i, class_num in enumerate(labels):
if class_num<10:
plt.scatter(tsne_own[i,0],tsne_own[i,1], c=colors[class_num])
plt.savefig("t-sne_resnet_train.png")
plt.show()
plt.close()
plt.title("T-SNE of RES-NET in Validation")
for i, class_num in enumerate(labels_valid):
if class_num<10:
plt.scatter(tsne_own_vali[i,0],tsne_own_vali[i,1], c=colors[class_num])
plt.savefig("t-sne_resnet_validation.png")
plt.show()
plt.close()
| true
|
25b1e0bb8a702c955f5d09b4b4c6d54b73057c22
|
Python
|
taiswelling/ifpi_algoritimos
|
/SEMANA 3 E 4-questões URI/URI_1019.py
|
UTF-8
| 146
| 3.296875
| 3
|
[] |
no_license
|
N = int(input(''))
hora= N//3600
resto = N%3600
minutos = resto//60
resto2 = resto%60
print('{}:{}:{}'. format(hora,minutos, resto2))
| true
|
d06687af82d5cd910207fcbde9f142af900d20be
|
Python
|
FranArleynDynamicDuo/problemas_modelos_lineales
|
/Problema_2/main.py
|
UTF-8
| 5,132
| 2.890625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
from simulacion import iniciar_simulacion
from commons.estadistica import error_95_prcnt
def problema(numero_simulaciones):
print ""
print "********************************************************************************"
print "********************************** Problema 2 **********************************"
print "********************************************************************************"
print ""
maximo_de_tiempo = 200
maximo_servidores = 4
lista_porcentaje_declinaron = []
lista_esperanza_cliente = []
promedio_total_porcentaje_declinaron = 0
promedio_total_esperanza_cliente = 0
promedio_total_cajeros1_lista = 0
promedio_total_cajeros2_lista = 0
promedio_total_cajeros3_lista = 0
promedio_total_cajeros4_lista = 0
lista_cajeros1 = []
lista_cajeros2 = []
lista_cajeros3 = []
lista_cajeros4 =[]
print "----------------------------------------------------------------"
print "------------------- Preparando la simulacion! ------------------"
print "----------------------------------------------------------------"
print "Parametros: "
print "----------------------------------------------------------------"
print "(a) tiempo_maximo %d" % (maximo_de_tiempo)
print "(b) maximo_servidores %d" % (maximo_servidores)
print "----------------------------------------------------------------"
print ""
for i in range(numero_simulaciones):
x = iniciar_simulacion(maximo_de_tiempo, maximo_servidores)
lista_porcentaje_declinaron.append(x[0])
lista_esperanza_cliente.append(x[1])
lista_cajeros1.append(x[2])
lista_cajeros2.append(x[3])
lista_cajeros3.append(x[4])
lista_cajeros4.append(x[5])
promedio_total_porcentaje_declinaron += x[0]
promedio_total_esperanza_cliente += x[1]
promedio_total_cajeros1_lista += x[2]
promedio_total_cajeros2_lista += x[3]
promedio_total_cajeros3_lista += x[4]
promedio_total_cajeros4_lista += x[5]
promedio_total_porcentaje_declinaron /= numero_simulaciones
promedio_total_esperanza_cliente /= numero_simulaciones
promedio_total_cajeros1_lista /= numero_simulaciones
promedio_total_cajeros2_lista /= numero_simulaciones
promedio_total_cajeros3_lista /= numero_simulaciones
promedio_total_cajeros4_lista /= numero_simulaciones
m_error_95_decl = error_95_prcnt(
lista_porcentaje_declinaron,
promedio_total_porcentaje_declinaron)
print ""
print "----------------------------------------------------------------------"
print "El promedio de porcentaje de declinación TOTAL es: %0.2f" % (promedio_total_porcentaje_declinaron)
print "----------------------------------------------------------------------"
print "El intervalo de confianza de 95 por ciento de la declinación esta entre %f y %f" % (promedio_total_porcentaje_declinaron - m_error_95_decl, promedio_total_porcentaje_declinaron + m_error_95_decl)
print "----------------------------------------------------------------------"
print ""
m_error_95_esp = error_95_prcnt(
lista_esperanza_cliente,
promedio_total_esperanza_cliente)
print ""
print "----------------------------------------------------------------------"
print "El tiempo esperado que un cliente pasa en el sistema TOTAL es: %0.2f" % (promedio_total_esperanza_cliente)
print "----------------------------------------------------------------------"
print "El intervalo de confianza de 95 por ciento de la esperanza esta entre %0.4f y %0.4f" % (promedio_total_esperanza_cliente - m_error_95_esp, promedio_total_esperanza_cliente + m_error_95_esp)
print "----------------------------------------------------------------------"
print ""
promedio_total_cajerosd_lista = [promedio_total_cajeros1_lista,
promedio_total_cajeros2_lista,
promedio_total_cajeros3_lista,
promedio_total_cajeros4_lista]
lista_cajeros = [lista_cajeros1,
lista_cajeros2,
lista_cajeros3,
lista_cajeros4]
for i in range(maximo_servidores):
m_error_95_esp = error_95_prcnt(
lista_cajeros[i],
promedio_total_cajerosd_lista[i])
print ""
print "----------------------------------------------------------------------"
print "El tiempo esperado de porcentaje de desocupacion del cajero %d TOTAL es: %0.2f" % (i,promedio_total_cajerosd_lista[i])
print "----------------------------------------------------------------------"
print "El intervalo de confianza de 95 por ciento de este cajero esta entre %0.4f y %0.4f" % (promedio_total_cajerosd_lista[i] - m_error_95_esp, promedio_total_cajerosd_lista[i] + m_error_95_esp)
print "----------------------------------------------------------------------"
print ""
| true
|
59ded83b07f2bff2cc27082e25fcc4fa85e10925
|
Python
|
kuznesashka/dlcourse.ai
|
/Assignment 2/model.py
|
UTF-8
| 4,408
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from layers import FullyConnectedLayer, ReLULayer, softmax_with_cross_entropy, l2_regularization
class TwoLayerNet:
""" Neural network with two fully connected layers """
def __init__(self, n_input, n_output, hidden_layer_size, reg):
"""
Initializes the neural network
Arguments:
n_input, int - dimension of the model input
n_output, int - number of classes to predict
hidden_layer_size, int - number of neurons in the hidden layer
reg, float - L2 regularization strength
"""
self.reg = reg
self.RELU_1 = ReLULayer()
self.RELU_2 = ReLULayer()
self.FullyConnected_1 = FullyConnectedLayer(n_input, hidden_layer_size)
self.FullyConnected_2 = FullyConnectedLayer(hidden_layer_size, n_output)
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients
on a batch of training examples
Arguments:
X, np array (batch_size, input_features) - input data
y, np array of int (batch_size) - classes
"""
# Before running forward and backward pass through the model,
# clear parameter gradients aggregated from the previous pass
# TODO Set parameter gradient to zeros
# Hint: using self.params() might be useful!
# TODO Compute loss and fill param gradients
# by running forward and backward passes through the model
y1 = self.FullyConnected_1.forward(X)
y2 = self.RELU_1.forward(y1)
y3 = self.FullyConnected_2.forward(y2)
y_result = self.RELU_2.forward(y3)
loss, d_out1 = softmax_with_cross_entropy(y_result, y)
d_out2 = self.RELU_2.backward(d_out1)
d_out3 = self.FullyConnected_2.backward(d_out2)
dW2 = self.FullyConnected_2.params()['W'].grad
dB2 = self.FullyConnected_2.params()['B'].grad
d_out4 = self.RELU_1.backward(d_out3)
d_out_result = self.FullyConnected_1.backward(d_out4)
dW1 = self.FullyConnected_1.params()['W'].grad
dB1 = self.FullyConnected_1.params()['B'].grad
# After that, implement l2 regularization on all params
# Hint: self.params() is useful again!
loss_l1, dW1_l = l2_regularization(self.FullyConnected_1.params()['W'].value, self.reg)
loss_l2, dW2_l = l2_regularization(self.FullyConnected_2.params()['W'].value, self.reg)
loss_l3, dB1_l = l2_regularization(self.FullyConnected_1.params()['B'].value, self.reg)
loss_l4, dB2_l = l2_regularization(self.FullyConnected_2.params()['B'].value, self.reg)
self.FullyConnected_1.params()['W'].grad = dW1 + dW1_l
self.FullyConnected_2.params()['W'].grad = dW2 + dW2_l
self.FullyConnected_1.params()['B'].grad = dB1 + dB1_l
self.FullyConnected_2.params()['B'].grad = dB2 + dB2_l
return loss+loss_l1+loss_l2+loss_l3+loss_l4
def predict(self, X):
"""
Produces classifier predictions on the set
Arguments:
X, np array (test_samples, num_features)
Returns:
y_pred, np.array of int (test_samples)
"""
# TODO: Implement predict
# Hint: some of the code of the compute_loss_and_gradients
# can be reused
y1 = self.FullyConnected_1.forward(X)
y2 = self.RELU_1.forward(y1)
y3 = self.FullyConnected_2.forward(y2)
predictions = self.RELU_2.forward(y3)
if predictions.ndim == 1:
predictions_new = predictions - np.max(predictions)
else:
maximum = np.max(predictions, axis=1)
predictions_new = predictions - maximum[:, np.newaxis]
predictions_new = np.exp(predictions_new)
predictions_sum = np.sum(predictions_new, axis=(predictions.ndim - 1))
if predictions.ndim == 1:
probabilities = predictions_new / predictions_sum
else:
probabilities = predictions_new / predictions_sum[:, np.newaxis]
pred = np.argmax(probabilities, axis=1)
return pred
def params(self):
# TODO Implement aggregating all of the params
return {'W1': self.FullyConnected_1.params()['W'], 'W2': self.FullyConnected_2.params()['W'],
'B1': self.FullyConnected_1.params()['B'], 'B2': self.FullyConnected_2.params()['B']}
| true
|
8ea9969315e91d256302085ba735891034d67bc3
|
Python
|
jeffreyjxh/dse14-finding-venusian-volcanoes
|
/ballooncalc.py
|
UTF-8
| 1,952
| 2.796875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 03 11:29:28 2016
@author: Chaggai
"""
def ballooncalc(mpayload, hbal, molarmgas, buoyancyperc):
import VenusAtmosphere as atm
#get atmospheric contstants
Tatm, Patm, rhoatm, GravAcc=atm.VenusAtmosphere30latitude(hbal)
Patm=Patm*10**5
rhogas = Patm/((8314.4598/molarmgas)*Tatm)
Vbal= mpayload/(rhoatm-rhogas)*(buoyancyperc/100.)
mtot=(rhogas*Vbal+mpayload)/0.2
found=False
while found==False:
Vbalnew=mtot/(rhoatm-rhogas)*(buoyancyperc/100.)
mtotnew=(rhogas*Vbalnew+mpayload)/0.2
if abs(mtotnew-mtot)<1e-3:
found=True
else:
mtot=mtotnew
mgas=rhogas*Vbalnew
return(mtotnew, Vbalnew,mgas)
def fullbuoyancy(mgas, mtot, Vbal, expancruise):
import VenusAtmosphere as atm
Vbalnew=Vbal*(100.-expancruise)/100.
rhogasnew=mgas/Vbalnew
#find what alt buoyancy is 100%
#assumign the voume can increase by 10% but not decrease
found=False
altbuoy=0
while not found:
Tatm, Patm, rhoatm, GravAcc=atm.VenusAtmosphere30latitude(altbuoy)
Lift=Vbalnew*(rhoatm-rhogasnew)
if (Lift-mtot)<1e-1:
found=True
else:
altbuoy+=1e-2
return (altbuoy)
import VenusAtmosphere as atm
hcruise = 50000 #km
Cl =1.5
mpayload = 90.
V=40.
hforce=hcruise/1000.
mtotnew, Vbalnew,mgas=ballooncalc(mpayload, hforce, 2.016, 100)
A=14
found=False
while not found:
Tatm, Patm, rhoatm, GravAcc=atm.VenusAtmosphere30latitude(hforce)
Patm=Patm*10**5
rhogas = Patm/((8314.4598/2.016)*Tatm)
Lift = GravAcc*Vbalnew*(rhoatm-rhogas)
altbuoy=fullbuoyancy(mgas,mtotnew,Vbalnew,10.)
cord=(Vbalnew/2.4)**(1./3.)
S=cord*A*cord
LiftWing=1/2.*S*Cl*V**2*rhoatm
if abs(Lift-LiftWing)<1e-1:
found=True
else:
hforce-=1e-2
print hforce
| true
|
49763d334f29dfbc162cb1dd0e26792d8dcd7beb
|
Python
|
TariqAHassan/EasyMoney
|
/tests/easy_tests.py
|
UTF-8
| 14,074
| 2.828125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env python3
"""
Public API Unit Testing for EasyMoney
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
# Imports
import os
import sys
import unittest
import pandas as pd
# Allow access to modules
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
# Import the tool
from easymoney.money import EasyPeasy
from easymoney.easy_pandas import items_null
# Set the Data Path
data_path = str(os.getcwd()).split("/tests")[0] + "/easymoney/sources/data"
# Create an instance of the tool
ep = EasyPeasy(fuzzy_threshold=85, data_path=data_path)
class OptionTests(unittest.TestCase):
"""
Test Battery for EasyMoney/money"s EasyPeasy().options() method.
"""
def test_options_list(self):
"""
General: test the EasyPeasy().options() method.
Specific: test rformat = "list".
"""
# Request a list of nations for which there is exchange rate information.
exchange_options_list = ep.options(info="exchange", rformat="list", pretty_print=False)
# Assert exchange_options_list is, in fact, a list
self.assertEqual(isinstance(exchange_options_list, list), True)
# Assert that the length of exchange_options_list is nontrivial.
self.assertEqual(len(exchange_options_list) > 0, True)
# Request a list of nations for which there is inflation information.
inflation_options_list = ep.options(info="inflation", rformat="list", pretty_print=False)
# Assert inflation_options_list is, in fact, a list
self.assertEqual(isinstance(inflation_options_list, list), True)
# Assert that the length of inflation_options_list is nontrivial.
self.assertEqual(len(inflation_options_list) > 0, True)
def test_options_df_exchange(self):
"""
General: test the EasyPeasy().options() method.
Specific: test rformat = "table".
"""
exchange_options_df = ep.options(info="exchange", rformat="table", pretty_print=False)
# Assert exchange_options_df is a Pandas DataFrame
self.assertEqual(isinstance(exchange_options_df, pd.DataFrame), True)
# Assert the number of rows in exchange_options_df is > 0.
self.assertEqual(exchange_options_df.shape[0] > 0, True)
def test_options_df_inflation(self):
"""
General: test the EasyPeasy().options() method.
Specific: test rformat = "table".
"""
# Request a Pandas DataFrame with all information information.
inflation_options_df = ep.options(info="inflation", rformat="table", pretty_print=False)
# Assert inflation_options_list is a Pandas DataFrame
self.assertEqual(isinstance(inflation_options_df, pd.DataFrame), True)
# Assert the number of rows in inflation_options_list is > 0.
self.assertEqual(inflation_options_df.shape[0] > 0, True)
def test_options_df_overlap(self):
"""
General: test the EasyPeasy().options() method.
Specific: test rformat = "table".
"""
# Request a Pandas DataFrame with all overlap information (between exchange rate and inflation).
overlap_options_df = ep.options(rformat='table', table_overlap_only=True, pretty_print=False)
# Assert overlap_options_df is a Pandas DataFrame
self.assertEqual(isinstance(overlap_options_df, pd.DataFrame), True)
# Assert the number of rows in overlap_options_df is > 0.
self.assertEqual(overlap_options_df.shape[0] > 0, True)
def test_options_df_all_dates(self):
"""
General: test the EasyPeasy().options() method.
Specific: test range_table_dates = False.
"""
# Request a Pandas DataFrame with all data information
all_dates_options_df = ep.options(pretty_print=False, range_table_dates=False)
# Assert there are more than lists of two (i.e, [min, max] in the InflationRange column
self.assertEqual(max([len(l) for l in all_dates_options_df["InflationDates"] if isinstance(l, list)]) > 2, True)
# Assert there are more than lists of two (i.e, [min, max] in the ExchangeRange column
self.assertEqual(max([len(l) for l in all_dates_options_df["ExchangeDates"] if isinstance(l, list)]) > 2, True)
class FunctionalityTests(unittest.TestCase):
"""
Test Battery for all Public Methods in EasyMoney/money"s EasyPeasy() Class, excluding options().
"""
def __init__(self, *args, **kwargs):
"""
Thanks to karthikr on SO: http://stackoverflow.com/a/17353262/4898004.
Very helpful workaround.
"""
super(FunctionalityTests, self).__init__(*args, **kwargs)
# Request a list of nations for which there is exchange rate information.
self.exchange_options_list = ep.options(info="exchange", rformat="list", pretty_print=False)
# Request a list of nations for which there is inflation information.
self.inflation_options_list = ep.options(info="inflation", rformat="list", pretty_print=False)
# Request a Pandas DataFrame with all inflation information.
self.inflation_options_df = ep.options(info="inflation", rformat="table", pretty_print=False)
# Construct a {Alpha2: [min(inflation_data), max(inflation_data)]} dict w.r.t. the inflation_options_df.
self.inflation_dict = dict(zip(self.inflation_options_df["Alpha2"], self.inflation_options_df["InflationDates"]))
# Request a Pandas DataFrame with all overlap information (between exchange rate and inflation).
self.overlap_options_df = ep.options(info="all", rformat="table", table_overlap_only=True, pretty_print=False)
# Construct a {Alpha2: [min(inflation_data), max(inflation_data)]} dict w.r.t. the overlap_options_df.
self.overlap_dict = dict(zip(self.overlap_options_df["Alpha2"], self.overlap_options_df["InflationDates"]))
def test_region_map(self):
"""
General: Test the EasyPeasy().region_map() method.
Specific:
(a) CAD --> Alpha2
(b) CAD --> Alpha3
(c) FR --> Currency
(c) FR --> Natural
"""
# (1) Request "CAN" be mapped to its ISO ALpha2 Code.
CAD_alpha2 = ep.region_map(region="CAN", map_to="alpha_2")
# Assert (1) is "CA"
self.assertEqual(CAD_alpha2, "CA")
# (2) Request "CAN" be mapped to its ISO ALpha3 Code.
CAD_alpah3 = ep.region_map(region="CAN", map_to="alpha_3")
# Assert (2) is "CAN"
self.assertEqual(CAD_alpah3, "CAN")
# (3) Request "FR" be mapped to its Currency Code.
FR_currency = ep.region_map(region="FR", map_to="name")
# Assert (3) is "France"
self.assertEqual(FR_currency, "France")
def test_currency_converter_all(self):
"""
General: Test the EasyPeasy().currency_converter() method.
Specific: testing that all curriency1 --> curriency2 possibilities return numeric results.
"""
# LCU --> EUR and EUR --> LCU
for c in self.exchange_options_list:
lcu_to_eur = ep.currency_converter(100, c, "EUR", date="latest")
eur_to_lcu = ep.currency_converter(100, "EUR", c, date="latest")
def test_currency_converter_EUR_USD(self):
"""
General: Test the EasyPeasy().currency_converter() method.
Specific: Test Converting between USD and EUR against a known value.
"""
# (1) On Sept 2, 2016, 100 EUR = 111.93 USD (based on ECB data).
sept2_2016_eur_to_usd = ep.currency_converter(100, "EUR", "USD", date="02/09/2016")
# Assert (1) is True.
self.assertEqual(sept2_2016_eur_to_usd, 111.93)
# (2) On Sept 2, 2016, 100 USD = 89.34 EUR (based on ECB data).
sept2_2016_usd_to_eur = ep.currency_converter(100, "USD", "EUR", date="02/09/2016")
# Assert (2) is True.
self.assertEqual(sept2_2016_usd_to_eur, 89.34)
def test_inflation_rate(self):
"""
General: test the EasyPeasy().inflation_rate() method.
Specific: test that all region referenced in options have:
(a) (numeric) inflation rate information.
(b) A dictionary of CPI information for each region can be returned.
(i) that this dictionary has the same number of keys as years provided.
(ii) that all values in this dictionary are numeric.
"""
# Initialize
rate = None
rate_dict = None
# Iterate though the inflation_dict dict.
for region, drange in self.inflation_dict.items():
if not items_null(drange):
# Reqest the inflation rate for all possible regions.
rate = ep.inflation(region, int(drange[0]), int(drange[1]))
# Assert the returned result is numeric.
self.assertEqual(isinstance(rate, (int, float)), True)
# Request a dictionary of CPI information.
rate_dict = ep.inflation(region, int(drange[0]), int(drange[1]), return_raw_cpi_dict=True)
# Asser rate_dict is, in fact, a dictionary.
self.assertEqual(isinstance(rate_dict, dict), True)
# Assert the number of keys is equal to the number of years provided.
self.assertEqual(len(rate_dict.keys()), len(set(drange)))
# Assert that the values in rate_dict are numeric.
self.assertEqual(all([isinstance(i, (float, int)) for i in rate_dict.values()]), True)
def test_inflation_calculator(self):
"""
General: Test the EasyPeasy().inflation_calculator() method.
Specific: Check that 100 dollars can be adjusted for inflation on the range indicated in options().
"""
# Initialize
real_dollars = None
# Iterate though the inflation_dict dict.
for region, drange in self.inflation_dict.items():
if not items_null(drange):
real_dollars = ep.inflation_calculator(100, region, int(drange[0]), int(drange[1]))
self.assertEqual(isinstance(real_dollars, (int, float)), True)
# (1) 100 (1990 USD) =~= 181.40 (2015 USD).
# Similar result obtained at: http://www.bls.gov/data/inflation_calculator.htm).
US_inflation_1990_to_2015 = ep.inflation_calculator(100, "US", 1990, 2015)
# Assert (1) is True.
self.assertEqual(US_inflation_1990_to_2015, 181.4)
# (2) 100 (1990 CAD) =~= 161.56 (2015 CAD).
# Similar answer obtained at: http://www.bankofcanada.ca/rates/related/inflation-calculator/.
CA_inflation_1990_to_2015 = ep.inflation_calculator(100, "CA", 1990, 2015)
# Assert (2) is True.
self.assertEqual(CA_inflation_1990_to_2015, 161.56)
def test_normalize(self):
"""
General: test the EasyPeasy().normalize() method.
Specific:
(a) many different combinations of region, from_year and base requests.
(b) USD to EUR; inlfation: 2010 --> 2015; exchange_date = 2015-12-01.
(c) CAD to USD; inlfation: 2005 --> 2012; exchange_date = 30/11/2012.
"""
# Initialize
normalized_amount = None
# Systematic attempt to break the normalize() method.
for base in self.overlap_options_df.Alpha3.tolist():
for region, drange in self.overlap_dict.items():
if not items_null(drange):
for d in drange:
normalized_amount = ep.normalize(100, region, int(d), base_currency=base)
# Assert normalized_amount is numeric.
self.assertEqual(isinstance(normalized_amount, (float, int)), True)
# (1) 100 (2010 USD) =~= 108.70 (2015 USD) =~= 102.55 (01/12/2015 EUR).
norm_USD_to_EUR = ep.normalize(100, region="US"
, from_year=2010
, to_year=2015
, base_currency="EUR"
, exchange_date="01/12/2015")
# Assert (1) is True.
self.assertEqual(norm_USD_to_EUR, 102.55)
# (2) 100 (2005 CAD) =~= 113.74 (2012 CAD) =~= 114.46 (30/11/2012 USD).
norm_CAD_to_USD = ep.normalize(100, region="CA"
, from_year=2005
, to_year=2012
, base_currency="USD"
, exchange_date="30/11/2012")
# Assert (2) is True.
self.assertEqual(norm_CAD_to_USD, 114.46)
# Notes:
# For (1), a similar exchange rate calculation can be obtained at (i); similarly for (2) at (ii).
# For both (1) and (2) a similar exchange rate calculation result can be obtained at (iii).
# (i) http://www.bls.gov/data/inflation_calculator.htm
# (ii) http://www.bankofcanada.ca/rates/related/inflation-calculator/
# (iii) http://www.bankofcanada.ca/rates/exchange/10-year-converter/
def test_fuzzy_search(self):
"""
General: Test Fuzzy Search instance of EasyPeasy().
Specific: test EasyPeasy().normalize() method.
"""
# (2) 100 (2005 CAD) =~= 113.74 (2012 CAD) =~= 114.46 (2012-11-30 USD).
# Use "Canadian" as a subsitue for "CAD".
norm_CAD_to_USD = ep.normalize(100, region="Canadian"
, from_year=2005
, to_year=2012
, base_currency="USD"
, exchange_date="30/11/2012")
# Assert (2) is True.
self.assertEqual(norm_CAD_to_USD, 114.46)
# Run Tests
unittest.main()
| true
|
425b99d2126835db78b63125d6f2769108c8437e
|
Python
|
christopherlorenz/CS420_project
|
/test_codes/lu_row_pivot.py
|
UTF-8
| 1,401
| 2.640625
| 3
|
[] |
no_license
|
import numpy as np
size = 4
A = np.random.rand(size, size)
U = np.array(A)
L = np.eye(size)
P = np.eye(size)
P_new = np.eye(size)
for i in range(size):
# if need to permute => permute and store permutation on P
"""
if U[i,i] < 1e-5:
max_pivot = i
for j in range(i+1,size):
if abs(U[j,i]) > abs(U[max_pivot,i]): max_pivot = j
temp_row = np.array(U[i])
U[i] = U[max_pivot]
U[max_pivot] = temp_row
P[i,i] = 0
P[max_pivot,max_pivot] = 0
P[i,max_pivot] = 1
P[max_pivot,i] = 1
"""
# Finding maximum value of columng i
max_pivot = i
for j in range(i+1,size):
if abs(U[j,i]) > abs(U[max_pivot,i]): max_pivot = j
# Permuting
temp_row = np.array(U[i])
U[i] = U[max_pivot]
U[max_pivot] = temp_row
temp_row = np.array(L[i,0:i])
L[i,0:i] = L[max_pivot,0:i]
L[max_pivot,0:i] = temp_row
# Computing new combined permutation matrix
P_new = np.eye(size)
P_new[i,i] = 0
P_new[max_pivot,max_pivot] = 0
P_new[i,max_pivot] = 1
P_new[max_pivot,i] = 1
print 'P_new =\n', P_new
P = np.dot(P_new, P)
# once permuted, continue with elimination
for j in range(i+1,size):
L[j,i] = U[j,i] / U[i,i]
for k in range(i,size):
if k==i:
U[j,k] = 0.0
else:
U[j,k] -= L[j,i] * U[i,k]
print A
print U
print L
print P
print np.dot(np.transpose(P),np.dot(L,U))
print np.dot(np.transpose(P),np.dot(L,U)) - A
| true
|
916999fefde9da2ebfd49e57ab7f1aef36a2650e
|
Python
|
anis-campos/macgyver
|
/gui/level/level.py
|
UTF-8
| 2,532
| 3.234375
| 3
|
[] |
no_license
|
from typing import List
import pygame
from pygame.sprite import Group
from gui import BLUE
from gui.tiles.floor import Floor
from gui.tiles.floor_type import FloorType
from gui.tiles.wall import Wall
from model.labyrinth import Labyrinth, TileType
class Level:
""" This is a generic super-class used to define a level.
Create a child class for each level with level-specific
info. """
_labyrinth: Labyrinth
@property
def guardian(self):
raise NotImplemented
"""
Lists of sprites used in all levels.
"""
tile_list: Group
def __init__(self, level_file_name: str = None):
""" Constructor. Pass in a handle to player. """
self.tile_list = pygame.sprite.Group()
# Loading level file
self._labyrinth = Labyrinth()
self._labyrinth.load(level_file_name)
self.width = self._labyrinth.nb_col
self.height = self._labyrinth.nb_row
# Go through the array above and add platforms
for tile in self._labyrinth.mazeMap:
if tile.type == TileType.WALL:
tile_gui = Wall(tile)
elif tile.type == TileType.HALL:
tile_gui = Floor(tile)
elif tile.type == TileType.START:
tile_gui = Floor(tile, floor_type=FloorType.START)
self.start = tile_gui
elif tile.type == TileType.END:
tile_gui = Floor(tile, floor_type=FloorType.END)
self.end = tile_gui
else:
continue
self.tile_list.add(tile_gui)
# Update everything on this level
def update(self):
""" Update everything in this level."""
self.tile_list.update()
def draw(self, screen):
""" Draw everything on this level. """
# Draw the background
# We don't shift the background as much as the sprites are shifted
# to give a feeling of depth.
screen.fill(BLUE)
# Draw all the sprite lists that we have
self.tile_list.draw(screen)
_walls: List[Wall] = None
@property
def walls(self) -> List[Wall]:
if self._walls is None:
self._walls = [tile for tile in self.tile_list if isinstance(tile, Wall)]
return self._walls
_floors: List[Floor] = None
@property
def floors(self) -> List[Floor]:
if self._floors is None:
self._floors = [tile for tile in self.tile_list if isinstance(tile, Floor)]
return self._floors
| true
|
dccec16f525a3d380fc0c0fac207938476913047
|
Python
|
duyminhnguyen97/GenePrioritization
|
/SNP_finder/tool.py
|
UTF-8
| 3,955
| 2.84375
| 3
|
[] |
no_license
|
import pandas as pd
# function to get gene
def get_gene(input_set, data_set):
snp_input = pd.read_csv('../SNP_finder/input/split_by_chr/' + input_set, engine = 'python')
data_input = pd.read_csv('../SNP_finder/train_data/' + data_set, engine = 'python')
result = pd.DataFrame(data = None, columns = ['snp_gap_id', 'gene_name', 'score'])
header = list(snp_input.columns.values)
# get gene + score
for k in range (1,8):
trait_input = snp_input[snp_input[header[k]] == 1].reset_index(drop = True)
train_data = data_input[data_input[header[k]] == 1].reset_index(drop = True)
result_1 = pd.DataFrame(data = None, columns = ['snp_gap_id', 'gene_name', 'score'])
result_1['snp_gap_id'] = trait_input['snp_gap_id']
for i in range(len(trait_input['chr'])):
for j in range(len(train_data['chr'])):
if trait_input.chr_pos[i] > train_data.midpoint[j]:
if j == (len(train_data['chr']) - 1):
dis1 = trait_input.chr_pos[i] - train_data.midpoint[(j-1)]
if dis1 < train_data.radius[(j-1)]:
result_1.gene_name[i] = train_data.gene_name[(j-1)]
result_1.score[i] = 100
break
else:
score1 = (train_data.radius[(j-1)] / dis1)*100
result_1.gene_name[i] = train_data.gene_name[(j-1)]
result_1.score[i] = score1
break
else:
continue
if j == 0:
dis2 = train_data.midpoint[j] - trait_input.chr_pos[i]
if dis2 < train_data.radius[j]:
result_1.gene_name[i] = train_data.gene_name[j]
result_1.score[i] = 100
break
else:
score2 = (train_data.radius[j] / dis2)*100
result_1.gene_name[i] = train_data.gene_name[j]
result_1.score[i] = score2
break
dis1 = trait_input.chr_pos[i] - train_data.midpoint[(j-1)]
dis2 = train_data.midpoint[j] - trait_input.chr_pos[i]
if dis1 < train_data.radius[(j-1)]:
result_1.gene_name[i] = train_data.gene_name[(j-1)]
result_1.score[i] = 100
break
if dis2 < train_data.radius[j]:
result_1.gene_name[i] = train_data.gene_name[j]
result_1.score[i] = 100
break
else:
score1 = (train_data.radius[(j-1)] / dis1)*100
score2 = (train_data.radius[j] / dis2)*100
if score1 > score2:
result_1.gene_name[i] = train_data.gene_name[(j-1)]
result_1.score[i] = score1
break
else:
result_1.gene_name[i] = train_data.gene_name[j]
result_1.score[i] = score2
break
result = result.append(result_1)
return result
# call function
chr1 = get_gene('input_1.csv', 'training1.csv').sort_values(by = ['snp_gap_id'])
chr2 = get_gene('input_2.csv', 'training2.csv').sort_values(by = ['snp_gap_id'])
chr3 = get_gene('input_3.csv', 'training3.csv').sort_values(by = ['snp_gap_id'])
chr4 = get_gene('input_4.csv', 'training4.csv').sort_values(by = ['snp_gap_id'])
chr5 = get_gene('input_5.csv', 'training5.csv').sort_values(by = ['snp_gap_id'])
chr6 = get_gene('input_6.csv', 'training6.csv').sort_values(by = ['snp_gap_id'])
chr7 = get_gene('input_7.csv', 'training7.csv').sort_values(by = ['snp_gap_id'])
chr8 = get_gene('input_8.csv', 'training8.csv').sort_values(by = ['snp_gap_id'])
chr9 = get_gene('input_9.csv', 'training9.csv').sort_values(by = ['snp_gap_id'])
chr10 = get_gene('input_10.csv', 'training10.csv').sort_values(by = ['snp_gap_id'])
chr11 = get_gene('input_11.csv', 'training11.csv').sort_values(by = ['snp_gap_id'])
chr12 = get_gene('input_12.csv', 'training12.csv').sort_values(by = ['snp_gap_id'])
print(chr1)
print(chr2)
print(chr3)
print(chr4)
print(chr5)
print(chr6)
print(chr7)
print(chr8)
print(chr9)
print(chr10)
print(chr11)
print(chr12)
#final = pd.DataFrame(data = None, columns = ['snp_gap_id', 'gene_name', 'score'])
final = pd.concat([chr1,chr2,chr3,chr4,chr5,chr6,chr7,chr8,chr9,chr10,chr11,chr12]).reset_index(drop = True)
final = final.sort_values(by = ['snp_gap_id'])
print(final)
export_csv = final.to_csv('result.csv', index = None, header = True)
| true
|
f7c615252dfb39a549d618067b066f9c4a99314f
|
Python
|
cold-turkey-developer/refactoring_2nd_ed_example
|
/Ch1/test/test_statement.py
|
UTF-8
| 748
| 3
| 3
|
[] |
no_license
|
import unittest
from utils import import_json_file
from statement import statement
class TestStatement(unittest.TestCase):
def setUp(self):
self.invoice = import_json_file("invoices.json")
self.plays = import_json_file("plays.json")
def test_statement_correct(self):
result_expected = (
"청구 내역 (고객명: BigCo)\n"
"\tHamlet: $650.0 (55석)\n"
"\tAs You Like It: $580.0 (35석)\n"
"\tOthello: $500.0 (40석)\n"
"총액: $1,730.0\n"
"적립 포인트: 47점"
)
result = statement(self.invoice[0], self.plays)
self.assertMultiLineEqual(result_expected, result)
if __name__ == "__main__":
unittest.main()
| true
|
840313531a84e497e851ed4560d842ac51a04ad7
|
Python
|
Rodolfo-SFA/FirstCodes
|
/ex048.py
|
UTF-8
| 108
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
s = 0
for c in range(1, 500):
if (c % 2) > 0 and (c % 3) == 0:
print(c)
s += c
print(s)
| true
|
2a707957c4ad38620afc051dd5cabe1bd34be5d6
|
Python
|
abrehamgezahegn/somehow-I-python
|
/oop/operator_overloading.py
|
UTF-8
| 1,029
| 3.984375
| 4
|
[] |
no_license
|
class Vector:
def __init__(self, a,b):
self.a = a
self.b = b
def __add__(self , other):
return Vector(self.a + other.a , self.b + other.b)
def __mul__(self, other):
a = self.a * other.a
b = self.b * other.b
return Vector(a,b)
def get_vector(self):
print(f"({self.a},{self.b})")
v1 = Vector(1,4)
v2 = Vector(3,5)
# addition operator
v3 = v1 + v2
v3.get_vector()
#multiply operator
v4 = v1 * v2
v4.get_vector()
# Addition p1 + p2 p1.__add__(p2)
# Subtraction p1 - p2 p1.__sub__(p2)
# Multiplication p1 * p2 p1.__mul__(p2)
# Power p1 ** p2 p1.__pow__(p2)
# Division p1 / p2 p1.__truediv__(p2)
# Floor Division p1 // p2 p1.__floordiv__(p2)
# Remainder (modulo) p1 % p2 p1.__mod__(p2)
# Bitwise Left Shift p1 << p2 p1.__lshift__(p2)
# Bitwise Right Shift p1 >> p2 p1.__rshift__(p2)
# Bitwise AND p1 & p2 p1.__and__(p2)
# Bitwise OR p1 | p2 p1.__or__(p2)
# Bitwise XOR p1 ^ p2 p1.__xor__(p2)
# Bitwise NOT ~p1 p1.__invert__()
| true
|
77c149a646f358236dcd349ab5f95ac81e5133c6
|
Python
|
Xiang-Gu/Should-ALL-Temporal-Difference-Learning-Use-Emphasis
|
/MountainCar_Control/Tile_Coding/Tilecoder.py
|
UTF-8
| 3,620
| 3.515625
| 4
|
[] |
no_license
|
# 2-D tile coding
# It is used to construct (binary) features for 2-D continuous state/input space + 1-D discrete action spacce
# for linear methods to do function approximation
import math
import numpy as np
# The following three lines are subject to change according to user need
# -- numTilings, x_start, x_range, y_start, y_range, num_actions
# Number of total tilings
numTilings = 8
# The starting point of each component of input and its range
# Please enter FLOATS (e.g use 0.0 for 0)
x_start, x_range = -1.2, 1.7
y_start, y_range = -0.07, 0.14
# Number of tiles per tiling in each dimension dimension
# (e.g. 8 means each tile covers 1/8 of the bounded distance in x-dimension)
x_num_partition = 7
y_num_partition = 7
# Number of actions available for each state
num_actions = 3
# Number of total tiles per tiling
# Notice we need one more in each dimension to ensure all tilings cover the input space
num_tiles_per_tiling = (x_num_partition + 1) * (y_num_partition + 1)
# Find the active tile indices of input (in1, in2) and store it to tileIndices
def tilecode(in1, in2, tileIndices):
assert len(tileIndices) == numTilings
# Change coordinate of input to be based on the first tiling, where the origin is set to
# the left-bottom point of the first tiling
in1 = in1 - x_start
in2 = in2 - y_start
# Compute the offset in each dimension per tiling
x_offset_per_tiling = x_range / x_num_partition / numTilings
y_offset_per_tiling = y_range / y_num_partition / numTilings
# The height and width of each tile
y_tile = y_range / y_num_partition
x_tile = x_range / x_num_partition
# Compute active tile indices for each tiling
for idx in range(numTilings):
# Compute the coordiate of input under the new tiling coordinate system
x_offset, y_offset = idx * x_offset_per_tiling, idx * y_offset_per_tiling
x, y = in1 + x_offset, in2 + y_offset
# index = base + num_rows * 11/row + num_col
index = idx * num_tiles_per_tiling + math.floor(y / y_tile) * (x_num_partition + 1) + math.floor(x / x_tile)
# A sanity check: the index of tile ranges from 0 (the first tile in the first tiling)
# to numTilings * num_tiles_per_tiling - 1 (the last tile in the last tiling)
assert 0 <= index <= numTilings * num_tiles_per_tiling - 1
# Write result back to tileIndices
tileIndices[idx] = int(index)
# Get the feature vector of a single input=(in1, in2, action)
def feature(state, action):
# Precondition check
assert x_start <= state[0] <= (x_start + x_range) and y_start <= state[1] <= (y_start + y_range) and action in {-1, 0, 1}, 'input out of space'
in1, in2 = state[0], state[1]
tileIndices = [-1] * numTilings
tilecode(in1, in2, tileIndices)
result = np.zeros(num_tiles_per_tiling * numTilings * num_actions)
for index in tileIndices:
result[index + num_tiles_per_tiling * numTilings * (action+1)] += 1
return result
# Test Code
def printTileCoderIndices(in1, in2, action):
tileIndices = [-1] * numTilings
tilecode(in1, in2, tileIndices)
print('Active tile indices for input (' + str(in1) + ',' + str(in2) + ', ' + str(action) + ') are : ' + str(tileIndices))
print('feature for this input is: ' + str(feature([in1,in2,action])))
if __name__ == '__main__':
printTileCoderIndices(0.2, 0.01, 0)
printTileCoderIndices(0.0, 0.07, 0)
printTileCoderIndices(-0.5, 0.03, 1)
printTileCoderIndices(-0.25, -0.01, 0)
| true
|
7beb0ad3ebbe8026973409862812fe054b1cbcde
|
Python
|
suxin1/image_processing
|
/image_derivatives.py
|
UTF-8
| 860
| 2.6875
| 3
|
[] |
no_license
|
from PIL import Image
import numpy as np
import pylab as plt
from scipy.ndimage import filters
import os
from utils.imtools import get_imlist
imlist = get_imlist(os.getcwd() + "/images")
def derivatives(image):
im = np.array(image.convert('L'))
sigma = 1
imx = np.zeros(im.shape)
# filters.sobel(im, 1, imx)
filters.gaussian_filter(im, (sigma, sigma), (0, 1), imx)
imy = np.zeros(im.shape)
# filters.sobel(im, 0, imy)
filters.gaussian_filter(im, (sigma, sigma), (1, 0), imy)
magnitude = np.sqrt(imx**2 + imy**2)
# magnitude = 255 - magnitude
plt.figure()
plt.gray()
plt.subplot(141)
plt.imshow(im)
plt.subplot(142)
plt.imshow(imx)
plt.subplot(143)
plt.imshow(imy)
plt.subplot(144)
plt.imshow(magnitude)
plt.show()
image = Image.open(imlist[3])
derivatives(image)
| true
|
ed8167d4924bb178d7f3b1123b4d74b7e7a8f4e8
|
Python
|
jeisson-Tellez/ciclo_1
|
/Actividades echas clase 3/archivo_clase_3.py
|
UTF-8
| 5,337
| 3.859375
| 4
|
[] |
no_license
|
#___________________CAPTURA DE DATOS________________________________________________
# # primera forma de captura de datos en python
# # se utilizas un print y luego el imput guardandolo en una variable
# #print("Escriba su nombre")
# #Nombre=input()
# #segunda forma, se guarda todo en una sola linea
# #nombre=input("Escriba su nombre: ")
# # # Ejercicio 4 de la clase 2, modificado para la clase 3 con la funcion input
# # # primero debo identificar el problema, nunca comienzo a programar sin saber el problema
# # # problema: cuantas cajas de refresco van a sobrar si se comprar 9 cajas y cada caja trae 24 refredco
# # # y quiero que todos lo invitados tomer la misma cantidad de refresco
# # #defino el algoritmo o instruccion
# # # 1. debo declarar las variables
# # Numero_Cajas_Refresco_adquiridas=9
# # Numero__refresco_por_Caja=24
# # Numero_Invitados=56
# # # 2. Realizo las ecuaciones pertinentes
# # Total_resfresco_adquiridos=Numero_Cajas_Refresco_adquiridas*Numero__refresco_por_Caja
# # print(Total_resfresco_adquiridos/Numero_Invitados)
# # print(Total_resfresco_adquiridos//Numero_Invitados)
# # print(Total_resfresco_adquiridos%Numero_Invitados)
# # ejercicio con Input
# Numero_Cajas_Refresco_adquiridas= int( input("numero de cajas adquiridas: "))
# Numero__refresco_por_Caja= int( input("numero de refrescos por caja: "))
# Numero_Invitados= int( input("numero de invitados: "))
# # 2. Realizo las ecuaciones pertinentes
# Total_resfresco_adquiridos=Numero_Cajas_Refresco_adquiridas*Numero__refresco_por_Caja
# print(Total_resfresco_adquiridos/Numero_Invitados)
# print(Total_resfresco_adquiridos//Numero_Invitados)
# print(Total_resfresco_adquiridos%Numero_Invitados)
#_____________________DEFINIR FUNCIONES_________________________________________
# #se utiliza para reutilizar un bloque de codigo, un script, formando una funcion, se usa "def"
# #lo que hace es ejecutar todas las tareas predefinidas en un codigo
# def refrescos_sobrantes(): #se pone el nombre de la funcion, los argumentos todavia no se cuales son?
# # es necesario que se marque una identacion, puesto que no ejecuta si no la tiene
# Numero_Cajas_Refresco_adquiridas= int( input("numero de cajas adquiridas: "))
# Numero__refresco_por_Caja= int( input("numero de refrescos por caja: "))
# Numero_Invitados= int( input("numero de invitados: "))
# # # 2. Realizo las ecuaciones pertinentes
# Total_resfresco_adquiridos=Numero_Cajas_Refresco_adquiridas*Numero__refresco_por_Caja
# print(Total_resfresco_adquiridos/Numero_Invitados)
# print(Total_resfresco_adquiridos//Numero_Invitados)
# print(Total_resfresco_adquiridos%Numero_Invitados)
# # MUY IMPORTANTE!! la identacion ayuda a python a definir el bloque de codigo de la funcion
# # solo se ejecuta dentro del codigo lo que esta identado, en el caso anterior una vez
# # al llamar la funcion, esta se pone sin identar y ejecuta el codigo identado.
# refrescos_sobrantes()
# # esto ayuda a optimizar el codigo fuente
#___________________ARGUMENTOS DE UNA FUNCION__________________________________
# # Los argumentos son las entradas que estan predefinidas en una funcion
# #estas son necesarias para desarrollar las instrucciones o el bloque de codigo
# # por lo cual, al llamar la funcion esta pedira los datos de entrada
# def refrescos_sobrantes(Numero_Cajas_Refresco_adquiridas,Numero__refresco_por_Caja,Numero_Invitados ):
# Total_resfresco_adquiridos=Numero_Cajas_Refresco_adquiridas*Numero__refresco_por_Caja
# print(Total_resfresco_adquiridos/Numero_Invitados)
# print(Total_resfresco_adquiridos//Numero_Invitados)
# print(Total_resfresco_adquiridos%Numero_Invitados)
# #lo que vienen acontinuacion es un Ejemplo de la funcion con los argumentos definidos
# refrescos_sobrantes(9,12,25)
#_____________________________RETURN______________________________________________________
# seguimos con la funcion, en esta parte lo que hacemos es lo siguiente#
# la funcion print() nos ayuda imprimiendo en consola los elementos que yo quiera
#Sin embargo, es necesario sacarla de mi funcion, dado que limita a solo impriir en consla la funcion
# entonces, para sacar el print() y obtener igualmente datos al correr el programa
# uso la palabra "Return", esta lo que hace es que al final de realizar todas las instrucciones
#instrucciones obtenidas por la funcion que hicimos, me devuelva un valor que yo defino
def refrescos_sobrantes(Numero_Cajas_Refresco_adquiridas,Numero__refresco_por_Caja,Numero_Invitados ):
""" Esta funcion me dice cosas muy interesantes que despues escribo
"""
Total_resfresco_adquiridos=Numero_Cajas_Refresco_adquiridas*Numero__refresco_por_Caja
Total_resfresco_adquiridos/Numero_Invitados
Total_resfresco_adquiridos//Numero_Invitados
return Total_resfresco_adquiridos%Numero_Invitados
print(refrescos_sobrantes(4,6,2))
# Ahora bien, es para tener en cuenta lo siguiente:
# al ejecutar la funcion creada, no se imprime nada en consola, para verla, entonces, que hace return
# pues esa funcion le dice a la funcion que debe imprirse del codigo cuando se lo indiquen con un print
# por lo cual," print(refrescos_sobrantes(4,6,2)) " retorna lo que establece en el return
# imprime el primer return que de la linea de codigo de la funcion creada
refrescos_sobrantes
| true
|
1a05b78b47e53370747ea2e29f9179388649c95e
|
Python
|
OCM-Lab-PUC/switch-chile
|
/python_utility_scripts/db_timepoints_sampler.py
|
UTF-8
| 2,170
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
# Copyright 2016 The Switch-Chile Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
# Operations, Control and Markets laboratory at Pontificia Universidad
# Católica de Chile.
"""
Script to populate the timescales_population_timepoints table in the
switch_chile OCM database. The only inputs required are the connection
parameters and the timeseries scenario id for which the timepoints want to be
populated.
"""
import psycopg2, sys
from getpass import getpass
# The timeseries scenario id must be inputted to generate the timepoints
# corresponding to that set
sample_ts_scenario_id = 4
username = 'bmaluenda'
passw = getpass('Enter database password for user %s' % username)
try:
# Remember to enter and/or modify connection parameters accordingly to your
# setup
con = psycopg2.connect(database='switch_chile', user=username,
host='localhost', port='5915',
password=passw)
print "Connection to database established..."
except:
sys.exit("Error connecting to the switch_chile database...")
cur = con.cursor()
cur.execute("SELECT * FROM chile_new.timescales_sample_timeseries WHERE sample_ts_scenario_id = %s ORDER BY 2" % sample_ts_scenario_id)
timeseries_table = cur.fetchall()
for row in timeseries_table:
initial_timestamp = row[6]
sample_ts_id = row[1]
population_ts_id = row[3]
num_tps = row[4]
rows_to_insert = []
# Populate timepoints
cur.execute("SELECT * FROM chile_new.timescales_population_timepoints WHERE population_ts_id = %s ORDER BY 2" % population_ts_id)
all_tps = cur.fetchall()
values_str = ','.join(cur.mogrify("(%s,%s,DEFAULT,%s)", (all_tps[i][0],sample_ts_id,population_ts_id)) for i in range(num_tps))
try:
query = "INSERT INTO chile_new.timescales_sample_timepoints VALUES "+values_str+";"
cur.execute(query)
con.commit()
except psycopg2.DatabaseError, e:
if con:
con.rollback()
print 'Error %s' % e
print "success"
if cur:
cur.close()
if con:
con.close()
| true
|
56e0f5bac2e764890fabf7746f8f8a83c255044e
|
Python
|
alexandrosanat/graph-neural-networks
|
/zacharys_karate_club/model.py
|
UTF-8
| 9,366
| 2.65625
| 3
|
[] |
no_license
|
from collections import namedtuple
import networkx as nx
from networkx import read_edgelist, set_node_attributes, to_numpy_matrix
import pandas as pd
from numpy import array
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
DataSet = namedtuple(
'DataSet',
field_names=['X_train', 'y_train', 'X_test', 'y_test', 'network']
)
def load_karate_club():
network = read_edgelist(
'data/zkc.edgelist',
nodetype=int)
attributes = pd.read_csv(
'data/features.csv',
index_col=['node'])
for attribute in attributes.columns.values:
set_node_attributes(
network,
values=pd.Series(
attributes[attribute],
index=attributes.index).to_dict(),
name=attribute
)
X_train, y_train = map(array, zip(*[
([node], data['role'] == 'Administrator')
for node, data in network.nodes(data=True)
if data['role'] in {'Administrator', 'Instructor'}
]))
X_test, y_test = map(array, zip(*[
([node], data['community'] == 'Administrator')
for node, data in network.nodes(data=True)
if data['role'] == 'Member'
]))
return DataSet(
X_train, y_train,
X_test, y_test,
network)
class SpektralRule(nn.Module):
"""Applies the spektral rule to the Adjacency matrix"""
def __init__(self, A, input_units, output_units, activation='tanh'):
super(SpektralRule, self).__init__()
self.linear_layer = nn.Linear(input_units, output_units) # Define a linear layer
nn.init.xavier_normal_(self.linear_layer.weight)
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
else:
self.activation = nn.Identity()
# This is a on-off calculation
I = torch.eye(A.shape[1]) # Create identity matrix
A_hat = A + I # Adding self loops to the adjacency matrix
A_hat = A_hat.to(torch.double)
D = torch.diag(torch.pow(torch.sum(A_hat, dim=0), -0.5), 0) # Inverse degree Matrix
self.A_hat = torch.matmul(torch.matmul(D, A_hat), D) # Applying spectral rule
self.A_hat.requires_grad = False # Non trainable parameter
def forward(self, X):
aggregation = torch.matmul(self.A_hat, X)
# Propagation through the linear layer that will have the number of hidden nodes specified
linear_output = self.linear_layer(aggregation.to(torch.float))
propagation = self.activation(linear_output)
return propagation.to(torch.double)
class FeatureModel(nn.Module):
"""Class is used to apply the spektral rule and calculate the convolutions."""
def __init__(self, A, hidden_layer_config, initial_input_size):
super(FeatureModel, self).__init__()
# self.hidden_layer_config = hidden_layer_config
self.moduleList = list() # List to keep track of convolutional layers
self.initial_input_size = initial_input_size # Define this here so it can be changed downstream
for input_size, activation in hidden_layer_config:
# Define the requested number of convolutions
self.moduleList.append(SpektralRule(A, self.initial_input_size, input_size, activation))
# Change the input size to the previous layer's input size for the next iteration
self.initial_input_size = input_size
# Create a sequential model from the input hidden layer configuration
self.sequentialModule = nn.Sequential(*self.moduleList)
def forward(self, X):
feature_output = self.sequentialModule(X) # Apply the sequential model
return feature_output
class LogisticRegressor(nn.Module):
""" Model to be used for the final prediction."""
def __init__(self, input_units, output_units):
super(LogisticRegressor, self).__init__()
self.Linear = nn.Linear(input_units, output_units, bias=True)
nn.init.xavier_normal_(self.Linear.weight)
self.sigmoid = nn.Sigmoid()
def forward(self, X):
linear_output = self.Linear(X.to(torch.float))
return self.sigmoid(linear_output)
class ClassifierModel(nn.Module):
"""Class serves as factory for the last node classification layer."""
def __init__(self, input_size, output_size):
super(ClassifierModel, self).__init__()
self.classifier = LogisticRegressor(input_units=input_size,
output_units=output_size)
def forward(self, X):
classified = self.classifier(X)
return classified
class HybridModel(nn.Module):
"""
Final model used to train and predict.
"""
def __init__(self, A, hidden_layer_config, initial_input_size, output_nodes):
super(HybridModel, self).__init__()
self.featureModel = FeatureModel(A, hidden_layer_config, initial_input_size)
# This parameter will be updated with the last layer's input size
self.featureModelOutputSize = self.featureModel.initial_input_size
self.classifier = ClassifierModel(self.featureModelOutputSize, output_nodes)
self.featureModelOutput = None
def forward(self, X):
outputFeature = self.featureModel(X)
classified = self.classifier(outputFeature)
self.featureModelOutput = outputFeature
return classified
def train(model, epochs, criterion, optimizer, features):
"""Used to train the model."""
cumLoss = 0
losses = list()
for j in range(epochs):
two_loss = 0
for i, node in enumerate(X_train_flattened):
# Forward pass - get prediction for relevant node only
output = model(features)[node]
# Get the label for the node
ground_truth = torch.reshape(y_train[i], output.shape)
# For every mini-batch during training we need to explicitly set the gradients to zero
# before backpropagation because PyTorch accumulates gradients on subsequent backward passes
optimizer.zero_grad()
# Calculate loss
loss = criterion(output, ground_truth)
# print("loss: ", loss.data)
two_loss += loss.item()
# Backpropagation
loss.backward()
# Perform parameter update based on the current gradient
optimizer.step()
losses.append(two_loss)
cumLoss += two_loss
print('avg loss: ', cumLoss / epochs)
# Save model
torch.save(model.state_dict(), "./gcn.pth")
plt.plot(losses)
def test(model, features, X_test_flattened):
# model = HybridModel(A, hidden_layer_config, identity.shape[1])
# model.load_state_dict(torch.load("./gcn.pth"))
model.eval()
correct = 0
masked_output = list()
for i, node in enumerate(X_test_flattened):
output = model(features)[node]
masked_output.append(output.ge(0.5))
return masked_output
if __name__ == '__main__':
# Load data
zkc = load_karate_club()
X_train_flattened = torch.flatten(torch.from_numpy(zkc.X_train))
X_test_flattened = torch.flatten(torch.from_numpy(zkc.X_test))
y_train = torch.from_numpy(zkc.y_train).to(torch.float)
# Initial Transformation
A = to_numpy_matrix(zkc.network)
A = torch.from_numpy(np.array(A))
identity = torch.eye(A.shape[1])
identity = identity.to(torch.double)
identity.requires_grad = False
# Use node distances as features additional to the identity matrix
X_2 = np.zeros((A.shape[0], 2))
node_distance_instructor = nx.shortest_path_length(zkc.network, target=33)
node_distance_administrator = nx.shortest_path_length(zkc.network, target=0)
for node in zkc.network.nodes():
X_2[node][0] = node_distance_administrator[node]
X_2[node][1] = node_distance_instructor[node]
X_2 = torch.cat((identity, torch.from_numpy(X_2)), 1)
X_2.requires_grad = False
# Model configuration
hidden_layer_config = [(4, 'tanh'),
(2, 'tanh')]
output_nodes = 1 # We're only trying to predict between 2 classes
model = HybridModel(A, hidden_layer_config, X_2.shape[1], output_nodes)
output = model(X_2)
# print(zkc.y_test)
# print(output)
criterion = nn.BCELoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
# featureoutput = None
train(model, 10000, criterion, optimizer, X_2)
after = None
masked = test(model, X_2, X_test_flattened)
masked = [i.item() for i in masked]
print(masked)
test_gt = torch.from_numpy(zkc.y_test)
test_gt = [i.item() for i in test_gt]
counter = 0
tp = 0
fp = 0
fn = 0
tn = 0
correct = zip(masked, test_gt)
for (masked, gt) in list(correct):
if masked == gt and masked is True:
tp += 1
if masked == gt and masked is False:
tn += 1
if masked is False and gt is True:
fn += 1
if masked is True and gt is False:
fp += 1
accuracy = (tp + tn) / (tp+fp+fn+tn)
precision = tp/(tp+fp)
recall = tp/(tp+fn)
print('accuracy ', accuracy)
print('precision ', precision)
print('recall ', recall)
| true
|