blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
cc001ebeea64f8ffee05e0218af1dcca0d8603ed | Python | Seetha1231/datastucture | /queue.py | UTF-8 | 218 | 3.375 | 3 | [] | no_license | class queue :
def __init__(self):
self.ar=[]
def enqueue(self,n):
self.ar.insert(0,n)
return len(self.ar)
def dequeue(self):
try:
return (self.ar.pop(),len(self.ar))
except IndexError:
return (-1,0)
| true |
25a521c324fe0f7d47e09f57cd879a31dc0a8218 | Python | Denpeer/TweetMatch | /WebServer/App/data/tweet_mining.py | UTF-8 | 5,005 | 2.953125 | 3 | [] | no_license | import tweepy
from tweepy import Cursor
import unicodecsv
from unidecode import unidecode
import sys
import os
# The following 3 functions simulate a progress bar in the terminal output
# title = string shown before progress bar
# 0 <= x <= 100 amount of progress made
def startProgress(title):
global progress_x
sys.stdout.write(title + ": [" + "-"*40 + "]" + chr(8)*41)
sys.stdout.flush()
progress_x = 0
def progress(x):
global progress_x
x = int(x * 40 // 100)
sys.stdout.write("#" * (x - progress_x))
sys.stdout.flush()
progress_x = x
def endProgress():
sys.stdout.write("#" * (40 - progress_x) + "]\n")
sys.stdout.flush()
def downloadTweets(tweetData):
# Read authentication keys from .dat file
print("trying to find twitter authentication keys from: "+ os.getcwd() + "/keys.dat")
keys = open("keys.dat","r")
# Authentication and connection to Twitter API.
consumer_key = keys.readline()[:-1]
consumer_secret = keys.readline()[:-1]
access_key = keys.readline()[:-1]
access_secret = keys.readline()[:-1]
# Close authentication file
keys.close()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
# Usernames whose tweets we want to gather.
users = ["realDonaldTrump","tedcruz","LindseyGrahamSC","SpeakerRyan","BarackObama","GovGaryJohnson","BernieSanders","HillaryClinton","DrJillStein"]
with open(tweetData, 'wb') as file:
writer = unicodecsv.writer(file, delimiter = ',', quotechar = '"')
# Write header row.
writer.writerow(["politician_name",
"politician_username",
"tweet_text",
"tweet_retweet_count",
"tweet_favorite_count",
"tweet_hashtags",
"tweet_hashtags_count",
"tweet_urls",
"tweet_urls_count",
"tweet_user_mentions",
"tweet_user_mentions_count",
"tweet_by_trump"])
# For each Twitter username in the users array
for user in users:
# Gather info specific to the current user.
user_obj = api.get_user(user)
user_info = [user_obj.name,
user_obj.screen_name]
startProgress("Downloading tweets from: " + user)
# Maximum amounts of tweets to retrieve
max_tweets = 1000;
# Count the amount of tweets retrieved
count = 0;
# Get 1000 most recent tweets for the current user.
for tweet in Cursor(api.user_timeline, screen_name = user).items(max_tweets):
# Show progress
progress(count/(max_tweets/100))
# Increase count for tweets
count += 1
# Remove all retweets.
if tweet.text[0:3] == "RT ":
continue
# Get info specific to the current tweet of the current user.
tweet_info = [unidecode(tweet.text),
tweet.retweet_count,
tweet.favorite_count]
# Below entities are stored as variable-length dictionaries, if present.
hashtags = []
hashtags_data = tweet.entities.get('hashtags', None)
if(hashtags_data != None):
for i in range(len(hashtags_data)):
hashtags.append(unidecode(hashtags_data[i]['text']))
urls = []
urls_data = tweet.entities.get('urls', None)
if(urls_data != None):
for i in range(len(urls_data)):
urls.append(unidecode(urls_data[i]['url']))
user_mentions = []
user_mentions_data = tweet.entities.get('user_mentions', None)
if(user_mentions_data != None):
for i in range(len(user_mentions_data)):
user_mentions.append(unidecode(user_mentions_data[i]['screen_name']))
tweet_by_trump = 0
if(user_obj.screen_name=='realDonaldTrump'):
tweet_by_trump = 1
more_tweet_info = [', '.join(hashtags),
len(hashtags),
', '.join(urls),
len(urls),
', '.join(user_mentions),
len(user_mentions),tweet_by_trump]
# Write data to CSV.
writer.writerow(user_info + tweet_info + more_tweet_info)
endProgress()
print("Wrote tweets by %s to CSV." % user)
if __name__ == "__main__":
downloadTweets('tweets.csv') | true |
1954245c80d333896cd285713521a9c6a28882ec | Python | cse031sust02/my-python-playground | /oop/polymorphism.py | UTF-8 | 3,861 | 4.53125 | 5 | [] | no_license | # What is Polymorphism? :
# =============================
#
# Polymorphism is derived from two Greek words: poly(many) and
# morphs(forms). So polymorphism means "many forms".
# Polymorphism gives a way to use a class exactly like its parent so
# there’s no confusion with mixing types. But each child class can
# define its own unique behavior and still share the same behavior
# of its parent class.
# More details/discussions :
# =============================
#
# - https://www.youtube.com/watch?v=P1vH3Pfw6BI
# - https://stackoverflow.com/a/11502482/3158021
# - https://www.edureka.co/blog/polymorphism-in-python/
# - https://www.programiz.com/python-programming/polymorphism
# Example :
# ===========
#
# The classic example is the Shape class and all the classes that can
# inherit from it (square, circle, etc). With polymorphism, each of
# these classes will have different underlying data. By making the
# class responsible for its code as well as its data, we can achieve
# polymorphism. Every class would have its own Draw() function and
# the client code could simply do:
# >> shape.Draw()
# src : https://stackoverflow.com/a/1031385/3158021
# Polymorphism with Functions and Objects:
# ===========================================
#
# We can create a function that can take any object, allowing for
# polymorphism.
# Let’s take an example by creating a function which will take an
# object and will do something to do that object. Here we are passing
# objects of diferrent classes but still able to call the same method
# as the Classes have that method.
class Dog:
def desc(self):
print("I am a Dog!")
class Cat:
def desc(self):
print("I am a Cat!")
def announce(animal_obj):
animal_obj.desc()
dog1 = Dog()
cat1 = Cat()
announce(dog1)
announce(cat1)
# Polymorphism with class methods:
# =================================
# We can use the concept of polymorphism while creating class methods.
# Python allows different classes to have methods with the same name.
#
# The below example shows that, Python is using these class methods in
# a way without knowing exactly what class type each of these objects
# is. That is, Python is using these methods in a polymorphic way.
class Bangladesh:
def say_name(self):
print("Hi! I am bangladesh")
def capital(self):
print("My capital is Dhaka")
class Qatar:
def say_name(self):
print("Hi! I am Qatar")
def capital(self):
print("My capital is Doha")
bd = Bangladesh()
qt = Qatar()
for country in (bd, qt):
country.say_name()
country.capital()
# Polymorphism with Inheritance:
# ===================================
# Polymorphism can be carried out through inheritance, with subclasses
# making use of base class methods or overriding them.
# Method Overriding :
# ----------------------
# In OOP, Method overriding is the ability to allows a subclass to
# provide a specific implementation of a method that is already
# provided by its parent class.
# Polymorphism allows us to access these overridden methods that have
# the same name as the parent class.
class Parent:
def __init__(self, name):
self.name = name
def say_name(self):
print("Hi! I am the {}".format(self.name))
def show(self):
print("I am the parent")
class Child(Parent):
def show(self): # <- method overriding
print("I am the child")
p = Parent("Abu Hasan")
p.say_name()
p.show()
c = Child('Hasan Ahmed')
c.say_name()
c.show()
# Method Overloading :
# -----------------------
# Some other OOP languages have the Method Overloading feature, where
# two or more methods in one class have the same method name but
# different parameters. Python does not support method overloading.
# We can however try to achieve similar feature using *args or
# optional arguments. | true |
b4c76c0b05ec1aea23c122d6b2f1b148ef0f8d49 | Python | AartiBhagtani/Algorithms | /ctci/trees/Least_Common_Ancestor.py | UTF-8 | 637 | 3.78125 | 4 | [] | no_license | # Least common ancestor
# problem 4.8
class Node:
def __init__(self, data):
self.val = data
self.left = None
self.right = None
def LCA(node, data1, data2):
if node == None:
return None
if node.val == data1 or node.val == data2:
return node
left = LCA(node.left, data1, data2)
right = LCA(node.right, data1, data2)
if(left and right):
return node
if left == None and right == None:
return None
if left != None:
return left
else:
return right
root = Node(1)
root.left = Node(2)
root.left.left = Node(3)
# root.left.right = Node(7)
# root.right = Node(3)
print(LCA(root, 1, 2).val)
| true |
1248c203cc7292631559b564bd11ca06aa83884c | Python | vgswn/AI | /ai/Codes/Project/first/mlhello.py | UTF-8 | 178 | 2.515625 | 3 | [] | no_license | from sklearn import tree
features=[[140,1],[130,1],[150,0],[170,0]]
labels=[0,0,1,1]
clf=tree.DecisionTreeClassifier()
clf=clf.fit(features,labels)
print(clf.predict([[120, 0]])) | true |
ebabb3b644cd7f4adf7eeaad49133c9d5bbd89b3 | Python | Sniper970119/MemoryAssistInPython | /src/Server/SystemTools/ConfFileRead/configFileRead.py | UTF-8 | 1,252 | 2.5625 | 3 | [
"MIT",
"ICU"
] | permissive | # -*- coding:utf-8 -*-
from src.Server.Conf.config import *
from src.Server.SystemTools.ConfFileRead.Tools import readConfigFile
from src.Server.SystemTools.ConfFileRead.Tools import saveConfigFile
class ConfigFileRead():
def __init__(self, fileName='./conf/server.ini'):
self.readConfigFileTools = readConfigFile.ReadConfigFile(fileName=fileName)
self.saveConfigFileTools = saveConfigFile.SaveConfigFile(fileName=fileName)
pass
def readFile(self, configMainName, configSubName):
"""
读取配置文件
:param configMainName: 配置文件主名称
:param configSubName: 配置文件副名称
:return: 配置信息的值
"""
message = self.readConfigFileTools.readConfigFile(configMainName=configMainName, configSubName=configSubName)
return message
def saveFile(self, configMainName, configSubName, value):
"""
保存到配置文件
:param configMainName: 配置文件主名称
:param configSubName: 配置文件副名称
:param value: 配置文件的值
:return:
"""
self.saveConfigFileTools.saveConfigFile(configMainName=configMainName, configSubName=configSubName, value=value)
| true |
8f25326be0b603ab2cfa97f3c8f12b7982863f67 | Python | clovery410/mycode | /python/chapter-2/lab8-rlist-1.py | UTF-8 | 856 | 3.53125 | 4 | [] | no_license | class Rlist(object):
class EmptyList(object):
def __len__(self):
return 0
empty = EmptyList()
def __init__(self, first, rest=empty):
self.first = first
self.rest = rest
def rlist_to_list(rlist):
"""Take an RLIST and returns a Python list with the same elements.
>>> rlist = Rlist(1, Rlist(2, Rlist(3, Rlist(4))))
>>> rlist_to_list(rlist)
[1, 2, 3, 4]
>>> rlist_to_list(Rlist.empty)
[]
"""
##iterative
# lst = []
# while rlist is not Rlist.empty:
# lst.append(rlist.first)
# rlist = rlist.rest
# return lst
##recursive
if rlist is Rlist.empty:
return []
result, rest = [rlist.first], rlist_to_list(rlist.rest)
result.extend(rest)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| true |
544e4608219caabd9d33f1947787949764c389fd | Python | nolleh/leetcode | /algorithm/l2/17.letter-combinations-of-a-phone-number.py | UTF-8 | 841 | 3.25 | 3 | [] | no_license | class Solution:
def permu(self, ind, digits, ans, ds, mapping):
if len(ds) == len(digits):
ans.append(ds)
return
for i in range(len(mapping[digits[ind]])):
ds += list(mapping[digits[ind]])[i]
self.permu(ind + 1, digits, ans, ds, mapping)
ds = ds[:-1]
def letterCombinations(self, digits: str) -> List[str]:
if not digits:
return []
mapping = {
"2": "abc",
"3": "def",
"4": "ghi",
"5": "jkl",
"6": "mno",
"7": "pqrs",
"8": "tuv",
"9": "wxyz",
}
if len(digits) == 1:
return [x for x in mapping[digits]]
ans = []
ds = ""
self.permu(0, digits, ans, ds, mapping)
return ans
| true |
c1c96ec0c396544695181a0fb50a390866ede067 | Python | WaugZ/opencv_py_tutorial | /hello.py | UTF-8 | 579 | 2.625 | 3 | [] | no_license | import numpy as np
import cv2
import matplotlib.pyplot as plt
m = cv2.imread("C:\\Users\\38345\\Desktop\\map.jpg")
h, w = m.shape[:2]
m = cv2.resize(m, (int(w / 2), int(h / 2)))
h, w = m.shape[:2]
# plt.imshow(m)
# plt.show()
src_pt = np.float32([[0, 0], [100, 0], [0, 100], [100, 100]])
# src_pt1 = np.array([[0, 0], [100, 0], [0, 100], [100, 100]])
dst_pt = np.float32([[5, 5], [90, 5], [5, 90], [90, 90]])
# dst_pt = src_pt
t = cv2.getPerspectiveTransform(src_pt, dst_pt)
dst = cv2.warpPerspective(m, t, (w, h))
cv2.imshow("dst", dst)
cv2.waitKey()
# print("Hello World.")
| true |
33d6dae491c3f96420da9cbf536846d61fd77935 | Python | ValeraB1100/lesson_for_python | /lwsson3/task5.py | UTF-8 | 1,391 | 4.03125 | 4 | [] | no_license | # 5. Программа запрашивает у пользователя строку чисел, разделенных пробелом. При нажатии Enter должна
# выводиться сумма чисел. Пользователь может продолжить ввод чисел, разделенных пробелом и снова нажать Enter.
# Сумма вновь введенных чисел будет добавляться к уже подсчитанной сумме. Но если вместо числа вводится специальный
# символ, выполнение программы завершается. Если специальный символ введен после нескольких чисел, то вначале нужно
# добавить сумму этих чисел к полученной ранее сумме и после этого завершить программу.
def func(sum, lst):
for i in range(len(lst)):
sum += int(lst[i])
return sum
sm = 0
a = input("введите числа через пробел: ").split()
while a != "q":
sm = func(sm, a)
print(sm)
a = input("введите числа через пробел или q для выхода: ").split()
if "q" in a:
a.remove("q")
sm = func(sm, a)
print(sm)
break | true |
75e1cde011aaa22700d67abe7fef4accd6c19f49 | Python | eleleung/automating-simulations | /gpar_tinker.py | UTF-8 | 838 | 2.515625 | 3 | [] | no_license | import sys
import random
dir_name = "/Users/EleanorLeung/Documents/thesis"
x_pos = 8
def change_velocity(particle_num: int, new_x: float, new_y: float, new_z: float):
with open(f'{dir_name}/week9/envs/{particle_num}/gpar.para', 'r') as file:
data = file.readlines()
data[1] = f'{new_x} {new_y} {new_z} ### 3D velocity of a companion galaxy\n'
data[2] = '0.0 0.0 ### Disk inclination angles (theta,phai)\n'
with open(f'{dir_name}/week9/envs/{particle_num}/gpar.para', 'w') as file:
file.writelines(data)
with open(f'{dir_name}/week9/envs/{particle_num}/results_{x_pos}/params.txt', 'a+') as file:
file.writelines(data)
if __name__ == '__main__':
change_velocity(
particle_num=sys.argv[1],
new_x=sys.argv[2],
new_y=sys.argv[3],
new_z=sys.argv[4]
) | true |
17bd1c3e8ce85c6f3caa9263feb11b023e416eed | Python | xuxingtiancai/python | /util/LinkedList.py | UTF-8 | 1,445 | 3.421875 | 3 | [] | no_license | __author__ = 'xuxing'
import unittest
#node
class ListNode:
def __init__(self, val):
self.val = val
self.next = None
def __str__(self):
return str(self.val)
#iter
class ListNodeIter():
def __init__(self, node):
self.node = node
def __iter__(self):
return self
def next(self):
current = self.node
try:
self.node = self.node.next
except:
raise StopIteration
return current
class LinkedList:
def __init__(self, values = []):
nodes = [ListNode(val) for val in values]
if len(nodes) == 0:
self.head = None
self.tail = None
self.len = 0
return
def connect(x, y):
x.next = y
return y
reduce(connect, nodes).next = None
self.head = nodes[0]
self.tail = nodes[-1]
self.len = len(nodes)
def __str__(self):
return ','.join(str(node) for node in iter(self))
def __len__(self):
return self.len
def __iter__(self):
return ListNodeIter(self.head)
def append(self, node):
if not node:
return
node.next = None
if not self.head:
self.head = node
self.tail = node
self.len = 1
else:
self.tail.next = node
self.len += 1
def remove(self, node):
pass
| true |
0c6f1e1d02ce7483578d8fd9f78f3739a2c083cb | Python | aitorlomu/SistemasGestores | /EjerciciosPython2_AitorLopez/1.py | UTF-8 | 581 | 3.640625 | 4 | [] | no_license | a=float(input('Introduce el valor de a '))
b=float(input('Introduce el valor de b '))
c=float(input('Introduce el valor de c '))
d=float(input('Introduce el valor de d '))
e=float(input('Introduce el valor de e '))
f=float(input('Introduce el valor de f '))
comp = a * e - b * d
if comp != 0 :
x = (e * c - b * f) / comp
y = (a * f - d * c) / comp
print('La solucion al sistema es x= %d e y= %d' % (x, y))
else :
m = d / a
if m * c == f :
print('El sistema tiene infinitas soluciones')
else:
print('El sistema no tiene soluciones')
| true |
a5b373a99b5a7cffdaf9eefd39b92e6aafc56917 | Python | jklynch/mothur-evaluate-ml | /evaluate_svm.py | UTF-8 | 7,769 | 2.6875 | 3 | [] | no_license | """
usage: python evaluate_svm.py shared-file-path design-file-path
"""
import argparse
import numpy as np
import matplotlib.pylab as pylab
import sklearn.svm
import sklearn.preprocessing
import sklearn.grid_search
import sklearn.cross_validation
import sklearn.metrics
import mothur_files
def evaluate_svm():
argparser = argparse.ArgumentParser()
argparser.add_argument("shared_file_path", help="<path to shared file>")
argparser.add_argument("design_file_path", help="<path to design file>")
args = argparser.parse_args()
print("shared file path: {0.shared_file_path}".format(args))
print("design file path: {0.design_file_path}".format(args))
shared_data = mothur_files.load_shared_file(args.shared_file_path)
design_data = mothur_files.load_design_file(args.design_file_path)
scaler = sklearn.preprocessing.StandardScaler()
# the scaler returns a copy by default
X = scaler.fit_transform(shared_data.otu_frequency)
y = design_data.class_number_for_row[:,0]
y_labels = [design_data.class_number_to_name[n] for n in sorted(design_data.class_number_to_name.keys())]
C_range = 10.0 ** np.arange(-3, 3)
gamma_range = 10.0 ** np.arange(-5, -3)
degree_range = np.arange(1, 5)
coef0_range = np.arange(-3.0, 3.0)
support_vector_machine(X, y, y_labels, "linear", dict(C=C_range), shared_data)
support_vector_machine(X, y, y_labels, "rbf", dict(gamma=gamma_range, C=C_range), shared_data)
support_vector_machine(X, y, y_labels, "poly", dict(C=C_range, degree=degree_range, coef0=coef0_range), shared_data)
support_vector_machine(X, y, y_labels, "sigmoid", dict(C=C_range, coef0=coef0_range), shared_data)
"""
This function fits a SVM model but no feature selection is done here. This
is really just to determine the classification performance.
"""
def support_vector_machine(X, y, y_labels, kernel, param_grid, shared_data):
sss = sklearn.cross_validation.StratifiedShuffleSplit(
y, test_size=0.5
)
train_index, test_index = next(iter(sss))
X_train = X[train_index, :]
X_test = X[test_index, :]
y_train = y[train_index]
y_test = y[test_index]
clf = sklearn.grid_search.GridSearchCV(
sklearn.svm.SVC(kernel=kernel),
param_grid=param_grid,
verbose=False
)
clf.fit(
X_train,
y_train,
#cv = sklearn.cross_validation.LeaveOneOut(len(train_index))
cv=10
)
print("Best parameters set found on development set:")
print('')
print(clf.best_estimator_)
print('')
#print("Grid scores on development set:")
#print('')
#for params, mean_score, scores in clf.grid_scores_:
# print("%0.3f (+/-%0.03f) for %r" % (
# mean_score, scores.std() / 2, params))
#print('')
print("Detailed classification report for kernel {}:".format(kernel))
print('')
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print('')
y_true, y_pred = y_test, clf.predict(X_test)
print(sklearn.metrics.classification_report(y_true, y_pred, target_names=y_labels))
print('')
#print("The best {} SVM classifier is: {}".format(kernel, grid.best_estimator_))
#print('best classifier score: {}'.format(grid.best_score_))
#classifier = grid.best_estimator_
#print("support_vectors_.shape: {}".format(classifier.support_vectors_.shape))
#print("support_.shape: {}".format(classifier.support_.shape))
#print("n_support_: {}".format(classifier.n_support_))
#print("dual_coef_.shape: {}".format(classifier.dual_coef_.shape))
#print("coef_.shape: {}".format(classifier.coef_.shape))
if kernel == 'linear':
rfe(clf.best_estimator_, X_test, y_test, shared_data.otu_column_names)
def evaluate_linear_svm(X, y):
print("y.shape {}".format(y.shape))
# use 10-fold cross validation
k = 5
## repeat 100 times?
##N = 100
# use random permutations of indices to select training and test sets
# observation_indices will have the same shape as y
observation_indices = np.array(np.arange(y.shape[0]))
permuted_observation_indices = np.random.permutation(y.shape[0])
print("observation_indices.shape {}".format(observation_indices.shape))
test_set_size = int(y.shape[0] / k)
print("test set size {}".format(test_set_size))
# make and array of fold indices, eg 3-fold array for 12 elements:
# [1 2 3 1 2 3 1 2 3 1 2 3]
k_fold_indices = np.mod(observation_indices, k)
print("k_fold_indices {} {}".format(k_fold_indices.shape, k_fold_indices))
# here is the list of Cs we will try
C_list = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
score_for_C = np.zeros((k*len(C_list), 2))
n = -1
for C in C_list:
for fold in np.arange(k):
training_indices = permuted_observation_indices[ observation_indices != fold ]
testing_indices = permuted_observation_indices[ observation_indices == fold ]
svc = sklearn.svm.SVC(C=C, kernel='linear')
svc.fit(X[training_indices, :], y[training_indices])
score = svc.score(X[testing_indices, :], y[testing_indices])
print('C:{} linear svm score: {}'.format(C, score))
n += 1
score_for_C[n, 0] = C
score_for_C[n, 1] = score
# plot results
pylab.plot(score_for_C[:,0], score_for_C[:,1])
pylab.show()
def rfe(trained_svm, X, y, otu_column_names):
remaining_otu_list = np.arange(len(otu_column_names))
removed_feature_list = []
while len(remaining_otu_list) > 0:
#svc = sklearn.svm.SVC(C=0.01, kernel='linear')
trained_svm.fit(X[:, remaining_otu_list], y)
#w_squared = svc.coef_.sum(axis=0)**2
w_squared = (trained_svm.coef_**2).sum(axis=0)
w_squared_min_ndx = np.argmin(w_squared)
otu_to_remove_ndx = remaining_otu_list[w_squared_min_ndx]
otu_to_remove = otu_column_names[otu_to_remove_ndx]
#print('removing {}'.format(otu_to_remove))
remaining_otu_list = np.delete(remaining_otu_list, w_squared_min_ndx)
removed_feature_list.append(otu_to_remove)
removed_feature_list.reverse()
# calculate a rank value by removing each feature
#trained_svm.fit(X, y)
#all_features_score = trained_svm.score_
#print('linear SVM score {}'.format(all_features_score))
print('features ranked by linear SVM-RFE:')
print(' n OTU')
for n, otu_name in enumerate(removed_feature_list[:50]):
print('{:2d} {}'.format(n, otu_name))
#svc = sklearn.svm.SVC(C=0.01, kernel='linear')
#otu_ndx = otu_column_names.index(otu_name)
#print('otu_ndx for {}: {}'.format(otu_name, otu_ndx))
#reduced_otu_list = range(len(otu_column_names))
#reduced_otu_list.remove(otu_ndx)
#svc.fit(X[:n_train, np.array([1, otu_ndx])], y[:n_train])
#score = svc.score(X[n_train:, np.array([1, otu_ndx])], y[n_train:])
#print('{:2d} {} {:4.2f}'.format(n, otu_name, all_features_score/score))
def rfe_(X, y):
cv = sklearn.cross_validation.StratifiedKFold(y=y, n_folds=10)
rfesvm = sklearn.svm.SVC(
kernel='rbf',
C=100.0,
gamma=1e-5,
)
rfesvm.fit(X, y)
print("SVM classifier: {}".format(rfesvm))
print('classifier score: {}'.format(rfesvm.score_))
print("support_vectors_.shape: {}".format(rfesvm.support_vectors_.shape))
print("support_.shape: {}".format(rfesvm.support_.shape))
print("n_support_: {}".format(rfesvm.n_support_))
print("dual_coef_.shape: {}".format(rfesvm.dual_coef_.shape))
print("coef_.shape: {}".format(rfesvm.coef_.shape))
if __name__ == '__main__':
evaluate_svm()
| true |
406966b45e33e0908ef84983f4f67ade0a64a7a7 | Python | Manaether/AdventOfCode | /2020/04/main.py | UTF-8 | 2,472 | 2.875 | 3 | [] | no_license | import time
import re
ecl_value = ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
def validateECL(value):
return value in ecl_value
def validatePID(value):
return re.compile("^[0-9]{9}$").match(value)
def validateEYR(value):
return int(value) <= 2030 and int(value) >= 2020
def validateHCL(value):
return re.compile("#[0-9a-f]{6}").match(value) is not None
def validateBYR(value):
return int(value) <= 2002 and int(value) >= 1920
def validateIYR(value):
return int(value) <= 2020 and int(value) >= 2010
def validateHGT(value):
if(value.endswith("cm")):
if(int(value.split("cm")[0]) >= 150 and int(value.split("cm")[0]) <= 193):
return True
elif(value.endswith("in")):
if(int(value.split("in")[0]) >= 59 and int(value.split("in")[0]) <= 76):
return True
return False
required_params = {
"ecl": validateECL,
"pid": validatePID,
"eyr": validateEYR,
"hcl": validateHCL,
"byr": validateBYR,
"iyr": validateIYR,
"hgt": validateHGT
}
def parsePassports(inputs):
passports = []
current = {}
for line in inputs:
if(line == "\n"):
passports.append(current)
current = {}
continue
for param in line.split(" "):
current.update({param.split(":")[0]: param.split(":")[1].strip()})
passports.append(current)
return passports
def step1(inputs):
valid = 0
passports = parsePassports(inputs)
for passport in passports:
if (all(param in passport.keys() for param in required_params.keys())):
valid += 1
return valid
def step2(inputs):
valid = 0
passports = parsePassports(inputs)
for passport in passports:
if (all(param in passport.keys() and required_params[param](passport[param]) for param in required_params.keys())):
valid += 1
return valid
steps = [step1, step2]
def printResult(step, inputs):
if inputs is not None:
print('Step{} result is {} !'.format(
step,
steps[step-1](inputs)
))
else:
print('Step{} failed !'.format(step))
if __name__ == "__main__":
start = time.time()
input = open('./input.txt', 'r')
inputs = []
for line in input.readlines():
inputs.append(line)
input.close()
printResult(1, inputs)
printResult(2, inputs)
print("time : {} ms".format((time.time() - start)*1000))
| true |
37010e9ac49dc7c5a44540e4575af6b1f62eb129 | Python | WisChang005/technews_tw | /tests/crawlers/test_inside.py | UTF-8 | 961 | 2.75 | 3 | [
"MIT"
] | permissive | import json
import logging
import pytest
from technews.crawlers import inside
crawlers = inside.Inside()
@pytest.mark.parametrize("browser_page", [0, 1, 3, 5])
def test_inside_page_response(browser_page):
news_data = crawlers.get_news(browser_page)
_print_first_news_data(news_data)
assert "timestamp" in news_data
assert "news_page_title" in news_data
assert "news_contents" in news_data
def test_inside_page_load_verification():
page1_data = crawlers.get_news()
page_more_data = crawlers.get_news(5)
assert len(page1_data["news_contents"]) < len(page_more_data["news_contents"])
def _print_first_news_data(news_data):
for news_i in news_data["news_contents"]:
logging.debug(news_data["news_contents"][news_i])
break
def test_news_content_to_json_file():
news_data = crawlers.get_news(3)
with open("tests/samples/inside_samples.json", "w") as f:
json.dump(news_data, f, indent=2)
| true |
6746f49daedbe3f2ffe823f6ab7964e6cfc547b9 | Python | gothack329/sirius.py | /create_dict.py | UTF-8 | 294 | 2.71875 | 3 | [] | no_license | from itertools import product
import os
keywords = ['121','0']
lens = len(keywords)
dic = open('password.txt','a+')
for i in range(2,6):
outlist = list(product(keywords[:lens],repeat=i))
for j in outlist:
result = ''.join([v for v in j])
dic.write(result+'\n')
dic.close()
os._exit(1)
| true |
b86f32604e1297571f9673e248b911bb92215b3a | Python | mluzarow/lotto | /lotto.py | UTF-8 | 36,045 | 3.09375 | 3 | [] | no_license | from bs4 import BeautifulSoup # For making the soup
import urllib2 # For comms
import turtle # For drawing
import os # Dealing with the OS
import sys # Exit
import logging # Error logging
import re # Minor text parsing
from colorama import init
from colorama import Fore, Back, Style
#region Defines
VERSION_MAJOR = 0
VERSION_MINOR = 1
VERSION_SMALL = 0
STATES = list ()
WEB_PATH = "http://www.lottonumbers.com/%s-results-%d.asp"
APP_DATA_FOLDER = "Lotto Data"
APP_DATA_MASTER = "Master"
#endregion Defines
# Print help text
def printHelp ():
print "Arguments:"
print " --ss Show all Downloaded sets."
print " --ls Show all data in all sets."
print " --ds Download more data sets."
print " --ff Find Frequency of numbers in sets."
print " --fa Find average of numbers in sets."
print " --fw Find coupling data about numbers in sets."
print "-h, --help Show this help message."
print "-q, --quit Quit the program.\n\n"
#region Classes
class LottoSource (object):
def __init__ (self, state, code, years):
self.State = state
self.Code = code
self.Years = years
# Class for storing data on a set of lotto data
class LottoSet (object):
def __init__ (self, state=None, year=None, numbers=None):
self.state = state
self.year = year
self.numbers = numbers
def getLargestNumber (self):
# Return None if numbers have not been filled yet
if len (self.numbers) <= 0:
return (None)
bNum = 0
for num in self.numbers:
if num > bNum:
bNum = num
return (bNum)
def getSmallestNumber (self):
# Return None if numbers have not been filled yet
if len (self.numbers) <= 0:
return (None)
sNum = 100
for num in self.numbers:
if num < sNum:
sNum = num
return (sNum)
# Class for storing data on specific numbers draw on a day
class LottoNumber (object):
def __init__ (self, date=None, month=None, numbers=None, extra=None):
self.date = date
self.month = month
self.numbers = numbers
self.extra = extra
#endregion Classes
#region Analysis
#####################################################################################################################
## Makes a dictionary of all valid lotto numbers paired with their frequency of occurance. ##
## ##
## -> List lotto : The list of all LottoSet classes from the save file and from page requests this session. ##
## -> Bool extra : Flag for including extra shot numbers in the analysis. Default False. ##
## ##
## <- Dict dic : Dictionary of lotto numbers and their frequency of occurance. ##
#####################################################################################################################
def findNumberFrequency (lotto, extra=False):
dic = dict ()
for set in lotto:
for nums in set.numbers:
for num in nums.numbers:
if dic.has_key (num):
dic[num] += 1
else:
dic[num] = 1
return (dic)
#####################################################################################################################
## Prints number frequencies in a small horizontal list. ##
## ##
## -> Dict dic : Dictionary of lotto numbers and their frequency of occurance. ##
## ##
## <- None ##
#####################################################################################################################
def printNumberFrequency (dic):
print "Displaying frequency of numbers in the current data sets.\n"
l = list ()
i = 0
t = list ()
for (key, value) in dic.iteritems ():
t.append ("[%2.d : %2.d]" % (key, value))
i += 1
if i == 10:
l.append (t)
t = list ()
i = 0
size = len (t)
# Fill in the rest of the spots with none
for i in range (0, 10 - size):
t.append (None)
l.append (t)
size = len (l)
for j in range (0, 10):
for i in range (0, size):
if l[i][j] == None:
print "",
else:
print l[i][j] + " ",
print "\n"
#####################################################################################################################
## Makes a dictionary of all lotto dates paired with the average value of that date. ##
## ##
## -> List lotto : The list of all LottoSet classes from the save file and from page requests this session. ##
## ##
## <- Dict dic : Dictionary of lotto dates and their number averages. ##
#####################################################################################################################
def findAverageValues (lotto):
dic = dict ()
for set in lotto:
for nums in set.numbers:
total = 0
for num in nums.numbers:
total += num
total /= 6
s = set.state + " " + nums.month + " " + str (nums.date) + " " + str (set.year)
dic[s] = total
return (dic)
#####################################################################################################################
## Makes a large list regarding how different numbers are linked together. ##
## ##
## -> List lotto : The list of all LottoSet classes from the save file and from page requests this session. ##
## ##
## <- Dict dic : Dictionary of number pairs and their frequency of occurance. ##
#####################################################################################################################
def findWebbing (lotto):
dic = dict ()
maxNumber = 53 # Biggest possible number
for n1 in range (1, maxNumber):
for n2 in range (n1 + 1, maxNumber):
dic[(n1, n2)] = 0
for set in lotto:
for nums in set.numbers:
for num in nums.numbers:
## For each number, check it against every other number in the numbers
for checknum in nums.numbers:
# Make sure tuple is (low val, high val)
comp = sorted ([num, checknum])
tup = (comp[0], comp[1])
# Add one to tuple freq
if dic.has_key (tup):
dic[tup] += 1
return (dic)
#####################################################################################################################
## Prints a long list of the number pair and it's frequency of occurance. List is split in order to fit into ##
## the Windows console buffer. ==More== displays on the screen when the buffer is full. Pressing any button ##
## will continue printing and freely remove past values from the buffer. ##
## ##
## -> Dict dic : Dictionary of number pairs and their frequency of occurance. ##
## ##
## <- None ##
#####################################################################################################################
def printWebbing (dic):
iter = 0
l = list ()
t = list ()
MAX_VAL = 270
maxNumber = 53 # Biggest possible number
for n1 in range (1, maxNumber):
for n2 in range (n1 + 1, maxNumber):
if dic.has_key ((n1, n2)):
t.append ("(%2.d - %2.d) : %2.d" % (n1, n2, dic[(n1, n2)]))
iter += 1
# If iter is >= MAX_VAL, a new column should be started
if iter >= MAX_VAL:
# If column is not full, pad with Nones
if len(t) < MAX_VAL:
for i in range (MAX_VAL - len(t), MAX_VAL):
t.append (None)
l.append (t)
t = list ()
iter = 0
# Make sure the last non-filled column is appended to l and filled
if len(t) < MAX_VAL:
for i in range (MAX_VAL - len(t), MAX_VAL):
t.append (None)
l.append (t)
# Print out columns
for i in range (0, MAX_VAL):
for j in range (0, len (l)):
if l[j][i] == None:
print " ",
else:
print l[j][i] + " ",
#print ""
# More at halfway point
if i == MAX_VAL / 2:
print "===================================== More ====================================="
while (1):
# Ask for the input and throw it away
raw_input ()
break
print "\n"
#####################################################################################################################
## Uses findWeb data in order to draw relationships on a Turtle canvas. ##
## ##
## -> Dict dic : Dictionary of number pairs and their frequency of occurance. ##
## ##
## <- None ##
#####################################################################################################################
def drawWebbing (dic):
t = initTurtle (400, 400)
xDiv = 200 / 10
yDiv = 200 / 5
x = 200
y = 200
n = 1
while (y < 200):
while (x < 200):
t.penup ()
t.setposition (x, y)
t.pendown ()
t.write (str (n), ("Arial", 12, "normal"))
n += 1
x += xDiv
x = 0
y += yDiv
turtle.done ()
#endregion Analysis
#region File IO
#####################################################################################################################
## Check that save directory and master file exits. If not, make them. ##
## ##
## -> None ##
## ##
## <- Bool : If master file exists, return True. ##
## Bool : If master file does not exist, return False ##
#####################################################################################################################
def checkDatafile ():
# Check for the directory
if not os.path.exists (APP_DATA_FOLDER):
# Directory is not there, this is the first or clean run; make the directory
os.makedirs (APP_DATA_FOLDER)
# Check in directory for master file
if not os.path.isfile (APP_DATA_FOLDER + "/" + APP_DATA_MASTER):
# Master file is missing; create it
with open (APP_DATA_FOLDER + "/" + APP_DATA_MASTER, 'w') as f:
f.write ("1234") # Future writes will overwrite this data
f.closed
return (False)
return (True)
#####################################################################################################################
## Read data in the master file into LottoSets and place into mew lotto list. ##
## ##
## -> None ##
## ##
## <- List lotto : The list of all LottoSet classes from this save file. ##
## NoneType : If master has tag "1234", returns None. ##
#####################################################################################################################
def readMaster ():
flc = True
lotto = list ()
line = "\n"
with open (APP_DATA_FOLDER + "/" + APP_DATA_MASTER, 'r') as f:
while line != "":
set = LottoSet ()
set.numbers = list ()
# Read header
if line == "\n":
line = f.readline ()
# Check the first line of the master for fresh file trigger
if flc:
flc = False
if line == "1234":
f.closed
return (None)
# Read header
line = line.split (' ')
set.state = line[0]
set.year = int (line[1])
line = f.readline ()
# Iterate through numbers
while line != '\n':
num = LottoNumber ()
# Split the line into the date | month and numbers
line = line.split (':')
# Slipt top in date and month
top = line[0].split ('|')
# Place data into struct
num.date = int (top[0].strip (' '))
num.month = top[1].strip (' ')
# Split numbers into seperate numbers
bottom = line[1].split (';')
# Place data into struct
num.numbers = [int (bottom[0]), int (bottom[1]), int (bottom[2]), int (bottom[3]), int (bottom[4]), int (bottom[5])]
# If there is no extra shot
if num.numbers.count < 7:
num.extra = None
else:
num.extra = int (bottom[6])
# Add current numbers to set list
set.numbers.append (num)
# Read next line
line = f.readline ()
lotto.append (set)
line = f.readline ()
return (lotto)
#####################################################################################################################
## Overwrite the master file with all data currently in the lotto list. ##
## ##
## -> List lotto : The list of all LottoSet classes from the save file and from page requests this session. ##
## ##
## <- None ##
#####################################################################################################################
def writeMaster (lotto):
with open (APP_DATA_FOLDER + "/" + APP_DATA_MASTER, 'w') as f:
# Iterate through the lotto sets
for set in lotto:
# Write header for set
f.write (set.state + " " + str (set.year) + "\n")
# Iterate though numbers
for num in set.numbers:
f.write (str (num.date) + "|" + num.month + ":")
# Iterate through a single batch of numbers
for subnum in num.numbers:
f.write (str (subnum) + ";")
# Write the extra number
f.write (str (num.extra) + "\n")
f.write ('\n')
f.closed
#endregion File IO
#region Web
#####################################################################################################################
## Read spruced soup and place data into LottoSets, which are added to the lotto list. ##
## ##
## -> BeautifulSoup parsedHTML : Spruced soup. ##
## -> List lotto : The list of all LottoSet classes from the save file and for future data from this page. ##
## -> LottoSet set : A LottoSet set of lotto data that the numbers will go into. ##
## ##
## <- List lotto: The updated lotto list. ##
#####################################################################################################################
def parsePageData (parsedHTML, lotto, lottoSet):
try:
# Find all sets of lotto results
data = parsedHTML.find_all ("div", "results")
# Find all lotto result headers
# NTS I don't understand why Beautiful Soup cannot find tags with links in them; it's the only thing
# it seems to have trouble with. Using regex instead.
header_comp = re.compile (".*\-lotto\-result\-[0-9]+\-[0-9]+\-[0-9]+.asp")
header = parsedHTML.find_all (href=header_comp)
# Make a list for stroing all the lotto numbers
lottoNumberList = list ()
lottoSet.numbers = list ()
# Go through each set of lotto results
for set in data:
num = LottoNumber ()
num.numbers = list ()
# Find each value
set_r = set.find_all ("div", "result")
# Go through each value, parse it, add it to list
for s in set_r:
num.numbers.append (int (s.string))
if len (num.numbers) > 6:
# Move extra shot to extra slot and remove from numbers
num.extra = num.numbers[6]
num.numbers.remove (num.numbers[6])
lottoNumberList.append (num)
i = 0
for head in header:
# Split word byt spaces
head = head.text.split (' ')
# Iterate through all possible numeric superscripts and remove them
for s in ["st", "nd", "rd", "th"]:
head[1] = head[1].replace (s, "")
# Add date to numbers
lottoNumberList[i].date = int (head[1])
# Add month to numbers
lottoNumberList[i].month = head[2]
# Add numbers to set
lottoSet.numbers.append (lottoNumberList[i])
i += 1
# Add data set tp lotteSet
lotto.append (lottoSet)
return lotto
except Exception, e:
logging.warning (str(e.code))
#####################################################################################################################
## Gets the spruced up page data from the given link, sends to parsePageData. ##
## ##
## -> String url : The URL that will be followed. ##
## -> List lotto : The list of all LottoSet classes from the save file and for future data from this page. ##
## -> LottoSet set : A LottoSet set of lotto data that the numbers will go into. ##
## ##
## <- List : The updated lotto list. ##
#####################################################################################################################
def getPageData (url, lotto, set):
try:
# Follow URL
response = urllib2.urlopen (url)
# GET HTML page data
html = response.read ()
# Spruce up the soup
parsedHTML = BeautifulSoup (html, "lxml")
return (parsePageData (parsedHTML, lotto, set))
except urllib2.HTTPError, e: # HTTP Error
logging.warning ("HTTPError = %s" % str(e.code))
return (None)
except urllib2.URLError, e: # URL Error
logging.waring ("URLError = %s" % str(e.code))
return (None)
except Exception, e:
logging.warning ("Something happened: %s" % e)
return (None)
#endregion Web
#region Console Control
#region DS
#####################################################################################################################
## Asks the user which of the state's valid years they would like to download lotto data from. ##
## ##
## -> LottoSource ls : Data regarding the chosen state (name, link, year list). ##
## ##
## <- Tuple (String WEB_PATH, : Constructed string link for connecting to the requested page. ##
## LottoSet) : Lotto set data to be sent to be filled with number data ##
#####################################################################################################################
def dsGetYear (ls):
numYears = len (ls.Years)
print "The following arae all the valid years with lotta data for the state [%s]. Select a year from the list by entering the number to the left of the entry. You can also select %d in order to download all available data.\n" % (ls.State, numYears)
i = 0
for year in ls.Years:
print "[%d] %d" % (i, year)
i += 1
print "[%d] Download all\n" % numYears
while (1):
arg = getInput ()
# If input is not a digit, try again
if not arg.isdigit ():
continue
# If input is "download all"
if arg == str (numYears):
#download evertthing loop
pass
# Check if input is out of bounds
if int (arg) >= numYears or int (arg) < 0:
continue
return ((WEB_PATH % (ls.Code, ls.Years[int (arg)]), LottoSet (ls.State, ls.Years[int (arg)])))
#lotto = getPageData (WEB_PATH % (ls.Code, ls.Years[int (arg)]), lotto, LottoSet (ls.State, ls.Years[int (arg)]))
#####################################################################################################################
## Asks the user which of state from the STATES list they would like to download lotto data for. ##
## ##
## -> None ##
## ##
## <- Tuple (from dsGetYear) ##
#####################################################################################################################
def dsGetState ():
numStates = len (STATES)
print "The following are all the valid states with lotto data. Select a state from the list by entering the number to the left of the entry. You can also select %d in order to download all available data.\n" % numStates
i = 0
for state in STATES:
print "[%d] %s" % (i, state.State)
i += 1
print "[%d] Download all\n" % i
while (1):
arg = getInput ()
# If input is not a digit, try again
if not arg.isdigit ():
continue
# If input is "download all"
if arg == str (numStates):
#download evertthing loop
pass
# Check if input is out of bounds
if int (arg) >= numStates or int (arg) < 0:
continue
return (dsGetYear (STATES[int (arg)]))
#####################################################################################################################
## Asks the user if they wish the download data, getting input from getInput (). ##
## ##
## -> None ##
## ##
## <- Tuple (from dsGetState, dsGetYear) ##
#####################################################################################################################
def dsGetDownloadInfo ():
print "\nDownload additional data sets? [y/n]"
while (1):
arg = getInput (True)
# Check for Y or N
if arg == "Y" or arg == "YES":
return (dsGetState ())
elif arg == "N" or arg == "NO":
print "Not downloading...\n"
return
else:
continue
#endregion DE
def listSets (lotto):
for set in lotto:
print "%s %d" % (set.state, set.year)
for nums in set.numbers:
print nums.month + " " + str (nums.date) + " : " + str (nums.numbers) + " E " + str (nums.extra)
def showSets (lotto):
i = 1
for set in lotto:
print "[%d] %s %d" % (i, set.state, set.year)
i += 1
print ""
#####################################################################################################################
## Gets data from the user via the console. ##
## ##
## -> Bool upperize : Flag for changing the input text to upper case before returning. Default to False. ##
## ##
## <- String args[0] : The input data from the user. ##
## NoneType : If argument is -h or --help, returns None. ##
#####################################################################################################################
def getInput (upperize = False):
# Get user input and split it by ' '
args = raw_input (">")
args = args.split(' ')
# If the args list is empty, return Mone
if len(args) == 0:
return (None)
# [-h] | [--help]
if args[0] == "-h" or args[0] == "--help":
printHelp ()
return (None)
# [-q] | [--quit]
elif args[0] == "-q" or args[0] == "quit":
sys.exit (0)
if upperize:
args[0] = args[0].upper ()
# Return the first phrase and throw out the rest
return (args[0])
#endregion Console Control
#region Initialization
#####################################################################################################################
## Initialize Turtle for turtle drawing ##
## ##
## -> Int x : The x-ax-s size of the graphics window / canvas. ##
## -> Int y : The y-axis size of the graphics window / canvas. ##
## ##
## <- Turtle t : Graphics object storing graphics settings. ##
#####################################################################################################################
def initTurtle (x, y):
# Wake the turtle
t = turtle.Turtle ()
# Max turtle speed
t.speed (0)
turtle.setup (x, y, 0, 0)
# Create turtle playground
#t.screen.screensize (x, y)
#turtle.setworldcoordinates(0, y, x, 0)
# Hide turtle head
t.hideturtle ()
# Hide turtle animations
t.tracer ()
return (t)
#####################################################################################################################
## Initializes state informations regarding links to lottonumbers.com using global variable STATES. ##
## ##
## -> None ##
## ##
## <- None ##
#####################################################################################################################
def initStates ():
STATES.append (LottoSource ("Illinois", "illinois-lotto", [2015, 2014, 2013, 2012, 2011, 2010, 2009]))
STATES.append (LottoSource ("New-York", "new-york-lotto", [2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007,
2006, 2005, 2004, 2003, 2002, 2001, 2000, 1999, 1998,
1997, 1996, 1995, 1994, 1993, 1992, 1991, 1990, 1989,
1988, 1987, 1986, 1985, 1984, 1983, 1982, 1981, 1980,
1979, 1978]))
STATES.append (LottoSource ("Texas", "lotto-texas", [2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007, 2006,
2005, 2004, 2003, 2002, 2001, 2000, 1999, 1998, 1997, 1996,
1995, 1994, 1993, 1992]))
STATES.append (LottoSource ("Florida", "florida-lotto", [2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007,
2006, 2005, 2004, 2003, 2002, 2001, 2000, 1999, 1998,
1997, 1996, 1995, 1994, 1993, 1992, 1991, 1990, 1989,
1988]))
#####################################################################################################################
## Initializes important constants and makes sure the save file exists. ##
## ##
## -> None ##
## ##
## <- List lotto : The list of all LottoSet classes from the save file. ##
#####################################################################################################################
def initLotto ():
# Check that the data file exists
if not checkDatafile ():
print "A valid master has not been found."
print "Master file has been created.\n"
# Check if master has data
print "Master file found. Checking...\n"
# Read master file
lotto = readMaster ()
print "Master has the following data sets:"
# Master has no data, do nothing
if lotto == None:
lotto = list ()
print "No data sets found.\n"
# Master has data, print it
else:
i = 1
for set in lotto:
print "[%d] %s %d" % (i, set.state, set.year)
i += 1
return (lotto)
#endregion Initialization
# Main entry
if __name__ == "__main__":
init (autoreset=True) # Colorama init
# Print program version
print "Lotto thing - V%d.%d.%d\n" % (VERSION_MAJOR, VERSION_MINOR, VERSION_SMALL)
# Print the help menu
printHelp ()
# Initialize the States
initStates ()
# Read the saved data into memory and show user what exists
lotto = initLotto ()
# watchdog
while (1):
# Catch weird errors by printing help text
try:
# Get the user's input
arg = getInput ()
# Check which flag the user has entered.
# [-h] | [--help]
if arg == "-h" or arg == "--help":
printHelp ()
continue
# [-q] | [--quit]
elif arg == "-q" or arg == "quit":
sys.exit (0)
# [--ls] List Sets
elif arg == "--ls":
listSets (lotto)
elif arg == "--ss":
showSets (lotto)
# [--ds] Download Sets
elif arg == "--ds":
(path, set) = dsGetDownloadInfo ()
lotto = getPageData (path, lotto, set)
writeMaster (lotto)
elif arg == "--ff":
dic = findNumberFrequency (lotto)
printNumberFrequency (dic)
elif arg == "--fa":
dic = findAverageValues (lotto)
# NTS add better printing function later
for (key, value) in dic.iteritems ():
print "%s : %d" % (key, value)
elif arg == "--fw":
dic = findWebbing (lotto)
printWebbing (dic)
elif arg == "--drawWeb":
dic = findWebbing (lotto)
drawWebbing (dic)
else:
continue
except Exception, e:
print e | true |
52f6ea64a2941db45e3d40173a943797f4704e9b | Python | UBC-MDS/522-Workflows-Group-414 | /src/split_and_clean.py | UTF-8 | 4,922 | 3.4375 | 3 | [
"CC-BY-2.5",
"MIT"
] | permissive | # authors: Tejas Phaterpekar
# Date written: 01-25-2020
# This script takes in the ASD adults dataset.It then splits the data into training and test sets,
# before proceeding to clean missing values and erroneous column/values.
'''This script takes in the ASD adults dataset.
It then splits the data into training and test sets, before proceeding to clean missing values and erroneous column/values.
Usage: split_and_clean.py --adult_path=<adult_path>
Options:
--adult_path=<adult_path> : Relative file path for the adult_autism csv
'''
# import libraries/packages
from docopt import docopt
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from scipy.io import arff
# parse/define command line arguments here
opt = docopt(__doc__)
# define main function
def main(adult_path):
print("working")
autism_df = pd.read_csv(adult_path)
# Introduce nan values for any nonsense values; then remove any rows containing these
autism_df = autism_df.replace("?", np.nan)
autism_df = autism_df.replace("[Oo]thers", np.nan, regex = True)
autism_df = autism_df.dropna(axis = 0)
# we filter out the 1 row that has an extreme age value of 380
autism_df = autism_df.query("age < 120")
# split the data (used random state for reproducibility)
X_train, X_test, y_train, y_test = train_test_split(autism_df.drop(columns = ['austim']), autism_df['austim'], test_size = 0.2, random_state = 55)
X_train = clean_feature_data(X_train)
X_test = clean_feature_data(X_test)
y_train = clean_target_data(pd.DataFrame(y_train))
y_test = clean_target_data(pd.DataFrame(y_test))
check_compatability(X_train, y_train)
check_compatability(X_test, y_test)
X_train.to_csv("data/clean-data/Xtrain-clean-autism-screening.csv", index = True)
y_train.to_csv("data/clean-data/ytrain-clean-autism-screening.csv", index = True, header = True)
X_test.to_csv("data/clean-data/Xtest-clean-autism-screening.csv", index = True)
y_test.to_csv("data/clean-data/ytest-clean-autism-screening.csv", index = True, header = True)
# code for other functions & tests goes here
def clean_feature_data(feature_df):
'''
Takes in a feature df and cleans column names, value types, and corrects erroenous values
Arguments:
feature_df - (DataFrame) Feature dataframe
Returns Cleaned Dataframe
'''
# Clean up column names
feature_df.rename(columns = {'jundice':'jaundice', 'austim': 'autism', 'contry_of_res':"country_of_res"}, inplace = True)
# Drop unecessary columns (will move this to a future script)
feature_df.drop(columns = ['age_desc', 'result', 'Class/ASD'])
# AQ had 4 unique values which didn't make sense; this loop restricts unique values to 0 and 1 integers
# loop goes over the AQ-score columns corrects type formatting
for column in feature_df:
if "Score" in column:
feature_df[column] = pd.to_numeric(feature_df[column])
print(feature_df[column].value_counts())
print("")
# Changing appropriate columns from strings to numeric form
feature_df['age'] = pd.to_numeric(feature_df['age'], downcast = 'integer')
feature_df['result'] = pd.to_numeric(feature_df['result'], downcast = 'integer')
# Correcting any string errors
feature_df['relation'] = feature_df['relation'].replace("self", "Self")
feature_df['relation'] = feature_df['relation'].str.replace("'","")
feature_df['ethnicity'] = feature_df['ethnicity'].replace("'Middle Eastern '", "Middle Eastern")
feature_df['ethnicity'] = feature_df['ethnicity'].replace("'South Asian'", "South Asian")
feature_df['age_desc'] = feature_df['age_desc'].replace("'4-11 years'", "4-11 years")
feature_df['age_desc'] = feature_df['age_desc'].replace("12-15 years", "12-16 years")
feature_df['country_of_res'] = feature_df['country_of_res'].str.replace("'","")
feature_df['country_of_res'] = feature_df['country_of_res'].replace("Viet Nam", "Vietnam")
# Removing age outlier
return feature_df
def clean_target_data(target_df):
'''
Takes in a target df and cleans column names, value types, and corrects erroenous values
Arguments:
target_df - (DataFrame) Target dataframe
Returns Cleaned Dataframe
'''
# Clean up column names
target_df.rename(columns = {'austim': 'autism'}, inplace = True)
return target_df
def check_compatability(feature_df, target_df):
'''
Takes in X and y dataframes and makes sure their shape is compatible
Arguments:
feature_df - (DataFrame) Feature dataframe
target_df - (DataFrame) Target dataframe
Returns an error if incompatible
'''
assert feature_df.shape[0] == target_df.shape[0], "X and y shapes don't match!"
return
# call main function
if __name__ == "__main__":
main(opt["--adult_path"]) | true |
0fb2cec5b4c856028921de68155613b31edf1d21 | Python | greg008/PythonEPS | /W3R/String/4.py | UTF-8 | 483 | 4.28125 | 4 | [] | no_license | """
4. TO DO Write a Python program to get a string from a given string where all
occurrences of its first char have been changed to '$',
except the first char itself.
Sample String : 'restart'
Expected Result : 'resta$t'
"""
def change_str(string):
new_string = ""
for i in range(len(string)):
if i != 0 and string[i] == string[0]:
new_string += "$"
else:
new_string += string[i]
return new_string
print(change_str("restart")) | true |
7735980bbebbe524335fdfee3b902e885718fd81 | Python | AnkitKumar82/CodeCloneDetection | /GetFiles.py | UTF-8 | 473 | 2.765625 | 3 | [] | no_license | import os
import sys
import Config
def getAllFilesUsingFolderPath():
folderPath = Config.dirPath
allFilesInFolder = []
if os.path.exists(folderPath):
fileNames = os.listdir(folderPath)
for fileName in fileNames:
if fileName.split(".")[-1] != "java":
continue
fileFullPath = os.path.join(folderPath, fileName)
allFilesInFolder.append(fileFullPath)
return allFilesInFolder
| true |
dd0457808c44a3a163d63d93f8465f28d38cde8e | Python | anuragcs/PythonRailwayReservation | /tripdetail.py | UTF-8 | 2,992 | 2.75 | 3 | [] | no_license | def tripint():
## sizeofrec=20
## with open("C:\\datafile\\cba.dat",'wb') as file:
print("Ale Destinations")
print("1 for Dehradun to Amritsar")
print("2 for Delhi to Bengaluru")
print("3 for Jammu to Lucknow")
print("4 for Jaipur to Alwar")
print("5 for Amritsar to Delhi")
print("6 for Alwar to Bombay")
print("7 for Bengaluru to Delhi")
print("8 for Lucknow to Jammu")
option=int(input("Enter your choice of number for desired trip"))
if option==1:
print("Train name:Shatabdi EXPRESS")
print("cost for this trip is Rs1000")
print("Duration-21:00-6:00")
print("Available seats=60/120")
elif option==2:
print("Train name:Gatiman EXPRESS")
print("cost for this trip is Rs1200")
print("Duration-23:00-9:00")
print("Available seats=62/120")
elif option==3:
print("Train name:Rajdhani EXPRESS")
print("cost for this trip is Rs1500")
print("Duration-21:00-11:00")
print("Available seats=50/120")
elif option==4:
print("Train name:Garibrath EXPRESS")
print("cost for this trip is Rs1250")
print("Duration-20:00-4:00")
print("Available seats=64/120")
elif option==5:
print("Train name:Shatabdi EXPRESS")
print("cost for this trip is Rs1101")
print("Duration-21:30-6:30")
print("Available seats=80/120")
elif option==6:
print("Train name:Shatabdi EXPRESS")
print("cost for this trip is Rs1000")
print("Duration-16:00-2:00")
print("Available seats=36/120")
elif option==7:
print("Train name:Gatiman EXPRESS")
print("cost for this trip is Rs1800")
print("Duration-21:00-4:30")
print("Available seats=70/120")
elif option==8:
print("Train name:Pooja EXPRESS")
print("cost for this trip is Rs1600")
print("Duration-12:00-21:00")
print("Available seats=10/120")
with open("C:\\datafile\\cba.dat",'wb') as file:
file.write(option.encode())
##
## ans='y'
## while ans=='y' or ans=='Y':
## name=input("Enter your first name")
## name1=input("Ënter your last name")
## age=input("Enter your age")
## gen=input("Enter your gender")
## l=len(name)
## j=len(name1)
## h=len(gen)
## name=name+(sizeofrec-1)*' '
## name1=name1+(sizeofrec-1)*' '
## gen=gen+(sizeofrec-1)*' '
## age=age+(sizeofrec-1)*' '
## file.write(name.encode())
## file.write(name1.encode())
## file.write(age.encode())
## file.write(gen.encode())
## ans=input("Add more")
| true |
33fa5ec2fb3b20604d8383aadb2a48c11cb603c2 | Python | niejn/selenium_test | /re_help/lxml_parse1.py | UTF-8 | 222 | 2.515625 | 3 | [] | no_license | import requests
import lxml
from lxml import html
r = requests.get('http://gun.io')
tree = lxml.html.fromstring(r.content)
elements = tree.get_element_by_id('frontsubtext')
for el in elements:
print(el.text_content()) | true |
7c49aae6e146897e498c73e89cdae01644ba0514 | Python | ap632638/python_codes | /listsort.py | UTF-8 | 636 | 3.34375 | 3 | [] | no_license | n=int(input("Enter total no. of elements for main list:"))
print("Enter ",n," elements in list:")
lmain=[]
lsub=[]
lsm=[]
lgr=[]
lfinal=[]
for i in range(n):
lmain.append(int(input()))
lmain.sort()
s=int(input("Enter total no. of elements for sub list:"))
print("Enter ",s," elements in sublist:")
for i in range(s):
lsub.append(int(input()))
for i in range(n):
if lmain[i]<lsub[1]:
lsm.append(lmain[i])
else:
lgr.append(lmain[i])
for i in range(len(lsm)):
lfinal.append(lsm[i])
for i in range(len(lgr)):
lfinal.append(lgr[i])
print("Sorted list according to sublist:")
lmain=lfinal
print(lfinal)
| true |
a7bce49cf81421adadcc3b938f1375eaeab4ff22 | Python | iansedano/codingame_solutions | /techi.io_courses/genetic_algorithms.py | UTF-8 | 4,595 | 3.8125 | 4 | [] | no_license | # GENETIC ALGORITHMS
import random
import sys
from answer import is_answer, get_mean_score # this is the string that the program is searching to match.
alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ !'."
def get_letter():
return random.choice(alphabet)
def create_chromosome(size):
chromosome = ''
for i in range(size):
chromosome += get_letter()
return chromosome
# SCORING - comparing to answer
def get_score(chrom):
key = get_answer()
# * compare the chromosome with the solution (how many character are in the correct position?)
score = 0
counter = 0
for i in key:
if key[counter] == chrom[counter]:
score += 1
counter += 1
return (score/20)
def make_score_list(population):
score_list = list(map(lambda c: [get_score(c), c], population)) # makes list of lists showing score and corresponding chromosome
score_list.sort(reverse=True, key=lambda s: s[0])
return score_list
# Selecting the chromosomes to pass on to next gen.
def selection(population):
GRADED_RETAIN_PERCENT = 0.3 # percentage of retained best fitting individuals
NONGRADED_RETAIN_PERCENT = 0.2 # percentage of retained remaining individuals (randomly selected)
# * Sorts individuals by their fitting score
# * Selects the best individuals
# * Randomly selects other individuals
selection = []
scores = make_score_list(population)
graded_int_to_retain = int(len(scores) * 0.3)
while graded_int_to_retain > 0:
selected = scores.pop(0)
selection.append(selected)
graded_int_to_retain -= 1
nongraded_int_to_retain = int(len(scores) * 0.2)
while nongraded_int_to_retain > 0:
rand_int = random.randint(0, len(scores)-1)
selection.append(scores.pop(rand_int))
nongraded_int_to_retain -= 1
selected_c = list(map(lambda chromosome: chromosome[1], selection)) # gets the chromosome not the score
return selected_c
# Chromosone Crossover
def crossover(parent1, parent2):
# * Selects half of the parent genetic material
# * child = half_parent1 + half_parent2
# * Returns the new chromosome
# * Genes are not be moved
# maybe not necessary to separate, but if genes are different lengths then it would be.
half_len_p1 = int(len(parent1) / 2)
half_len_p2 = int(len(parent2) / 2)
half_p1 = parent1[:half_len_p1]
half_p2 = parent2[half_len_p2:]
child = half_p1 + half_p2
return child
# Mutation
def mutation(chromosome):
# * Random gene mutation : a character is replaced
chromosome_len = len(chromosome)
index = random.randint(0, chromosome_len - 1)
chromosome = chromosome[:index] + get_letter() + chromosome[index + 1:]
return chromosome
def create_population(pop_size, chrom_size):
# creates the base population
population = []
for i in range(pop_size):
chrom = create_chromosome(chrom_size)
population.append(chrom)
return population
def generation(population):
select = selection(population)
# reproduction
# As long as we need individuals in the new population, fill it with children
children_to_create = population_size - len(population)
while len(children) < children_to_create:
## crossover
parent1 = population[random.randint(0,len(population)-1)] # randomly selected
print(f"parent1 {parent1}")
parent2 = population[random.randint(0,len(population)-1)] # randomly selected
print(f"parent2 {parent2}")
# use the crossover(parent1, parent2) function created on exercise 2
child = crossover(parent1, parent2)
## mutation
child = mutation(child)
population.append(child)
return population
def algorithm():
chrom_size = int(input())
population_size = 30
# create the base population
population = create_population(population_size, chrom_size)
answers = []
# while a solution has not been found :
while not answers:
## create the next generation
population = generation(population)
score_list = make_score_list(population)
mean = sum([s[0] for s in score_list])/len(score_list)
print(mean, file=sys.stderr)
## check if a solution has been found
for chrom in population:
if is_answer(chrom):
answers.append(chrom)
print(answers[0])
| true |
cd185529b3ad31dbd992ec94d1c890def87e6741 | Python | ridho9/crypto-cli | /crypto_cli/util/filter_running.py | UTF-8 | 113 | 3.46875 | 3 | [] | no_license | from sys import stdin
for c in stdin.read():
if c.isalpha():
c = c.lower()
print(c, end="")
| true |
23ea4b775a740c756da6851920b868e259e0ce6e | Python | franklsf95/spider | /parsing/parser.py | UTF-8 | 7,355 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
from bs4 import BeautifulSoup
import dateutil.parser
from db.models import *
import logging
from urllib.parse import urlparse
# Set up logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
class LinkedInParser(object):
"""
A LinkedIn HTML parser.
"""
def __init__(self, session, html):
"""
:param session: an active SQLAlchemy session
:param html: string
"""
self.session = session
self.soup = BeautifulSoup(html, 'lxml')
def extract_person_overview(self):
"""
Extracts a person's overview information from HTML.
:return: a Person object
"""
person = Person()
sec = self.soup.find(id='topcard')
# Get the name
name_tag = sec.find(id='name')
if name_tag is not None:
person.name = name_tag.text.strip()
# Get the headline
headline_tag = sec.find(class_='headline')
if headline_tag is not None:
person.headline = headline_tag.text.strip()
# Get the locality
dm = sec.find(id='demographics')
if dm is not None:
locality_tag = dm.find(class_='locality')
if locality_tag is not None:
person.locality = locality_tag.text.strip()
# Done.
self.session.add(person)
return person
def extract_person_experiences(self, person):
"""
Extracts a person's experiences information from HTML.
:param person: a Person object
:return: None
"""
sec = self.soup.find(id='experience')
if sec is None:
return
items = sec.find_all('li', class_='position')
for li in items:
# Create a position object
exp = PersonExperience()
exp.person = person
# Get title
title = self._extract_li_subitem(li, Title, 'item-title')
exp.title = title
# Get company
company = self._extract_li_subitem(li, Company, 'item-subtitle')
exp.company = company
# Get date range
start, end = self._extract_date_range(li)
exp.start_date = start
exp.end_date = end
# Get description
exp.description = self._extract_description(li)
# Done.
self.session.add(exp)
self.session.flush()
def extract_person_educations(self, person):
"""
Extracts a person's education information from HTML.
:param person: a Person object
:return: None
"""
sec = self.soup.find(id='education')
if sec is None:
return
items = sec.find_all('li', class_='school')
for li in items:
# Create an education object
edu = PersonEducation()
edu.person = person
# Get school
school = self._extract_li_subitem(li, School, 'item-title')
edu.school = school
# Get degree
degree_tag = li.find(class_='item-subtitle')
if degree_tag is not None:
edu.degree = degree_tag.text.strip()
# Get date range
start, end = self._extract_date_range(li)
edu.start_date = start
edu.end_date = end
# Get description
edu.description = self._extract_description(li)
# Done.
self.session.add(edu)
self.session.flush()
def extract_person_certifications(self, person):
"""
Extracts a person's certifications information from HTML.
:param person: a Person object
:return: None
"""
sec = self.soup.find(id='certifications')
if sec is None:
return
items = sec.find_all('li', class_='certification')
for li in items:
# Create a person-certification object
pc = PersonCertification()
pc.person = person
# Get certification
cert = self._extract_li_subitem(li, Certification, 'item-title')
pc.certification = cert
# Get company
company = self._extract_li_subitem(li, Company, 'item-subtitle')
pc.company = company
# Get date range
start, end = self._extract_date_range(li)
pc.start_date = start
pc.end_date = end
# Get description
pc.description = self._extract_description(li)
# Done.
self.session.add(pc)
self.session.flush()
def extract_person_skills(self, person):
"""
Extracts a person's skills information from HTML.
:param person: a Person object
:return: None
"""
sec = self.soup.find(id='skills')
if sec is None:
return
items = sec.find_all('li', class_='skill')
for li in items:
# Create a person-skill object
ps = PersonSkill()
ps.person = person
# Get skill
skill = self._extract_li_subitem(li, Skill, None)
ps.skill = skill
# Done.
self.session.add(ps)
self.session.flush()
def _extract_li_subitem(self, li, model, class_):
# TODO: Account for class_='external-link'
"""
Find or create a <model> instance from given HTML element.
:param li: HTML element
:param model: Class of model (Title, Company)
:param class_: CSS class, or None for the li element itself.
:return: a <model> object
"""
if class_ is None:
h = li
else:
h = li.find(class_=class_)
if h is None:
return None
a = h.find('a')
if a is None:
name = h.text.strip()
url = None
else:
name = a.text.strip()
url = a['href']
url = urlparse(url).path
# Find or create <instance>
instance = model.get_or_create(self.session, name=name, url=url)
return instance
@staticmethod
def _extract_date_range(li):
"""
Extract the start date and end date from given HTML element.
:param li: HTML element
:return: (date, date), date can be None
"""
s = li.find(class_='date-range')
if s is None:
return None, None
times = s.find_all('time')
if times is None:
return None, None
def parse_date(i):
if len(times) <= i:
return None
t = times[i].text.strip()
try:
result = dateutil.parser.parse(t)
except ValueError as e:
result = None
log.error('[Date range] {}'.format(e))
return result
start = parse_date(0)
end = parse_date(1)
return start, end
@staticmethod
def _extract_description(li):
"""
Extract the description field from given HTML element.
:param li: HTML element
:return: string
"""
p = li.find(class_='description')
if p is None:
return None
return p.text.strip()
| true |
c8ac272a2da0047b45c690d0228f68b8e750edb4 | Python | LewPeng97/NLP-Daily | /word2vec.py | UTF-8 | 850 | 2.515625 | 3 | [] | no_license | import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
import logging
import gensim
from gensim.models import word2vec
from gensim import models
def main():
data_path_1 = './data/dict/法律通用词典.txt'
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentences = word2vec.LineSentence(data_path_1)
model = word2vec.Word2Vec(sentences, size=250,min_count=1)
# 保存模型,供日后使用
model.save('./data/word2vec.model')
def load(path):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
model = models.Word2Vec.load(path)
res = model.similarity('守法','法院')
print(res)
if __name__ == "__main__":
# main()
load('./data/word2vec.model') | true |
ee61dd2fc2ccb52f6fa9f9021910a421ebb5591d | Python | mliu420/abides | /cli/ticker_plot.py | UTF-8 | 6,955 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | import ast
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
import os
import sys
from joblib import Memory
# Auto-detect terminal width.
pd.options.display.width = None
pd.options.display.max_rows = 1000
pd.options.display.max_colwidth = 200
# Initialize a persistent memcache.
mem_hist = Memory(cachedir='./.cached_plot_hist', verbose=0)
mem_sim = Memory(cachedir='./.cached_plot_sim', verbose=0)
# We could use some good argparse parameters here instead of
# a bunch of constants to fiddle with.
PRINT_HISTORICAL = False
PRINT_BASELINE = False
PRINT_VOLUME = False
BETWEEN_START = pd.to_datetime('09:30').time()
BETWEEN_END = pd.to_datetime('09:30:00.000001').time()
# Linewidth for plots.
LW = 2
# Used to read and cache real historical trades.
#@mem_hist.cache
def read_historical_trades (file, symbol):
print ("Historical trades were not cached. This will take a minute.")
df = pd.read_pickle(file, compression='bz2')
df = df.loc[symbol]
df = df.between_time('9:30', '16:00')
return df
# Used to read and cache simulated trades.
# Doesn't actually pay attention to symbols yet.
#@mem_sim.cache
def read_simulated_trades (file, symbol):
print ("Simulated trades were not cached. This will take a minute.")
df = pd.read_pickle(file, compression='bz2')
df = df[df['EventType'] == 'LAST_TRADE']
if len(df) <= 0:
print ("There appear to be no simulated trades.")
sys.exit()
df['PRICE'] = [y for x,y in df['Event'].str.split(',')]
df['SIZE'] = [x for x,y in df['Event'].str.split(',')]
df['PRICE'] = df['PRICE'].str.replace('$','').astype('float64')
df['SIZE'] = df['SIZE'].astype('float64')
return df
# Main program starts here.
if len(sys.argv) < 3:
print ("Usage: python ticker_plot.py <Ticker symbol> <Simulator DataFrame file> [agent trade log]")
sys.exit()
# TODO: only really works for one symbol right now.
symbol = sys.argv[1]
sim_file = sys.argv[2]
agent_log = None
if len(sys.argv) >= 4: agent_log = sys.argv[3]
print ("Visualizing simulated {} from {}".format(symbol, sim_file))
df_sim = read_simulated_trades(sim_file, symbol)
if PRINT_BASELINE:
baseline_file = os.path.join(os.path.dirname(sim_file) + '_baseline', os.path.basename(sim_file))
print (baseline_file)
df_baseline = read_simulated_trades(baseline_file, symbol)
# Take the date from the first index and use that to pick the correct historical date for comparison.
if PRINT_HISTORICAL:
hist_date = pd.to_datetime(df_sim.index[0])
hist_year = hist_date.strftime('%Y')
hist_date = hist_date.strftime('%Y%m%d')
hist_file = "/nethome/cb107/emh/data/trades/trades_{}/ct{}_{}.bgz".format(hist_year, 'm' if int(hist_year) > 2014 else '', hist_date)
print ("Visualizing historical {} from {}".format(symbol, hist_file))
df_hist = read_historical_trades(hist_file, symbol)
plt.rcParams.update({'font.size': 12})
# Use to restrict time to plot.
df_sim = df_sim.between_time(BETWEEN_START, BETWEEN_END)
print ("Total simulated volume:", df_sim['SIZE'].sum())
if PRINT_BASELINE:
df_baseline = df_baseline.between_time(BETWEEN_START, BETWEEN_END)
print ("Total baseline volume:", df_baseline['SIZE'].sum())
if PRINT_VOLUME:
fig,axes = plt.subplots(figsize=(12,9), nrows=2, ncols=1)
else:
fig,ax = plt.subplots(figsize=(12,9), nrows=1, ncols=1)
axes = [ax]
# Crop figures to desired times and price scales.
#df_hist = df_hist.between_time('9:46', '13:30')
# For smoothing...
#hist_window = 100
#sim_window = 100
hist_window = 1
sim_window = 1
if PRINT_HISTORICAL:
df_hist = df_hist.between_time(BETWEEN_START, BETWEEN_END)
print ("Total historical volume:", df_hist['SIZE'].sum())
df_hist['PRICE'] = df_hist['PRICE'].rolling(window=hist_window).mean()
df_sim['PRICE'] = df_sim['PRICE'].rolling(window=sim_window).mean()
df_hist['PRICE'].plot(color='C0', grid=True, linewidth=LW, ax=axes[0])
df_sim['PRICE'].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
axes[0].legend(['Historical', 'Simulated'])
if PRINT_VOLUME:
df_hist['SIZE'].plot(color='C0', linewidth=LW, ax=axes[1])
df_sim['SIZE'].plot(color='C1', linewidth=LW, alpha=0.9, ax=axes[1])
axes[1].legend(['Historical Vol', 'Simulated Vol'])
elif PRINT_BASELINE:
# For nanosecond experiments, turn it into int index. Pandas gets weird if all
# the times vary only by a few nanoseconds.
rng = pd.date_range(start=df_sim.index[0], end=df_sim.index[-1], freq='1N')
df_baseline = df_baseline[~df_baseline.index.duplicated(keep='last')]
df_baseline = df_baseline.reindex(rng,method='ffill')
df_baseline = df_baseline.reset_index(drop=True)
df_sim = df_sim[~df_sim.index.duplicated(keep='last')]
df_sim = df_sim.reindex(rng,method='ffill')
df_sim = df_sim.reset_index(drop=True)
df_baseline['PRICE'].plot(color='C0', grid=True, linewidth=LW, ax=axes[0])
df_sim['PRICE'].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
axes[0].legend(['Baseline', 'With Impact'])
else:
#df_sim['PRICE'] = df_sim['PRICE'].rolling(window=sim_window).mean()
# For nanosecond experiments, turn it into int index. Pandas gets weird if all
# the times vary only by a few nanoseconds.
rng = pd.date_range(start=df_sim.index[0], end=df_sim.index[-1], freq='1N')
df_sim = df_sim[~df_sim.index.duplicated(keep='last')]
df_sim = df_sim.reindex(rng,method='ffill')
df_sim = df_sim.reset_index(drop=True)
df_sim['PRICE'].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])
axes[0].legend(['Simulated'])
if PRINT_VOLUME:
df_sim['SIZE'].plot(color='C1', linewidth=LW, alpha=0.9, ax=axes[1])
axes[1].legend(['Simulated Vol'])
# Superimpose a particular trading agent's trade decisions on top of the ticker
# plot to make it easy to visually see if it is making sensible choices.
if agent_log:
df_agent = pd.read_pickle(agent_log, compression='bz2')
df_agent = df_agent.between_time(BETWEEN_START, BETWEEN_END)
df_agent = df_agent[df_agent.EventType == 'HOLDINGS_UPDATED']
first = True
for idx in df_agent.index:
event = df_agent.loc[idx,'Event']
if symbol in event:
shares = event[symbol]
if shares > 0:
print ("LONG at {}".format(idx))
axes[0].axvline(x=idx, linewidth=LW, color='g')
elif shares < 0:
print ("SHORT at {}".format(idx))
axes[0].axvline(x=idx, linewidth=LW, color='r')
else:
print ("EXIT at {}".format(idx))
axes[0].axvline(x=idx, linewidth=LW, color='k')
else:
print ("EXIT at {}".format(idx))
axes[0].axvline(x=idx, linewidth=LW, color='k')
plt.suptitle('Execution Price/Volume: {}'.format(symbol))
axes[0].set_ylabel('Executed Price')
if PRINT_VOLUME:
axes[1].set_xlabel('Execution Time')
axes[1].set_ylabel('Executed Volume')
axes[0].get_xaxis().set_visible(False)
else:
axes[0].set_xlabel('Execution Time')
#plt.savefig('background_{}.png'.format(b))
plt.show()
| true |
e9849df8c6217a1965fed1fc9be7dec55133d78c | Python | davehedengren/exercism_python | /anagram/anagram.py | UTF-8 | 225 | 3.140625 | 3 | [] | no_license | def detect_anagrams(word,a_list):
anagrams = []
for a in a_list:
if ((sorted(word.lower())) == sorted(a.lower())
and word.lower() != a.lower()):
anagrams.append(a)
return anagrams
| true |
5a4f20b383fc5de4b2dad2f59ab2914fbd0157e7 | Python | 4d4c/web_enumeration | /resolve_domains.py | UTF-8 | 3,544 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
import ipaddress
import os
import sys
from argparse import ArgumentParser
import dns.resolver
from lumberjack.lumberjack import Lumberjack
class ResolveDomains():
IP_FILENAME = "ips.txt"
DOMAIN_IP_FILENAME = "domains_ips.csv"
UNRESOLVED_FILENAME = "unresolved.txt"
def __init__(self, input_file, output_path, verbose):
self.input_file = input_file
self.output_path = output_path
self.log = Lumberjack(False, verbose)
if not os.path.exists(self.output_path):
self.log.error("Folder {} not found".format(self.output_path))
sys.exit(1)
self.domains = {
"resolved": {},
"unresolved": []
}
self.log.info("Starting resolving domains...")
def main(self):
with open(self.input_file, "r") as domain_file:
for domain in domain_file:
self.check_domain(domain.strip())
self.log.debug(self.domains)
self.create_files()
def check_domain(self, domain):
resolver = dns.resolver.Resolver()
resolver.nameservers = ["4.2.2.1", "8.8.8.8"]
try:
ips = resolver.resolve(domain, "A")
if ips:
self.log.info("Resolved " + domain)
self.domains["resolved"][domain] = [ips[0].to_text()]
self.log.debug("%s: %s" % (domain, ips[0].to_text()))
for index, ip in enumerate(ips):
if index != 0:
self.domains["resolved"][domain] += [ip.to_text()]
self.log.debug("%s: %s" % (domain, ip.to_text()))
# Remove duplicates
self.domains["resolved"][domain] = list(set(self.domains["resolved"][domain]))
else:
self.domains["unresolved"] += [domain]
self.log.warning("Could not resolve " + domain)
except:
self.domains["unresolved"] += [domain]
self.log.warning("Could not resolve " + domain)
def create_files(self):
with open(os.path.join(self.output_path, self.IP_FILENAME), "w") as ip_file:
all_ips = []
for domain, ips in self.domains["resolved"].items():
for ip in ips:
if ip not in all_ips:
all_ips += [ip]
all_ips = sorted(all_ips, key=ipaddress.IPv4Address)
for ip in all_ips:
ip_file.write(ip + "\n")
with open(os.path.join(self.output_path, self.DOMAIN_IP_FILENAME), "w") as domain_ip_file:
for domain, ips in self.domains["resolved"].items():
domain_ip_file.write("{},{}\n".format(domain, "/".join(ips)))
with open(os.path.join(self.output_path, self.UNRESOLVED_FILENAME), "w") as unresolved_file:
for domain in self.domains["unresolved"]:
unresolved_file.write(domain + "\n")
if __name__ == "__main__":
parser = ArgumentParser(description="Resolve list of domains")
parser.add_argument("-i", "--input", required=True, type=str, action="store", dest="input_file", help="Input file with domains")
parser.add_argument("-o", "--output", required=True, type=str, action="store", dest="output_path", help="Output path")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", help="Verbose output")
args = parser.parse_args()
rd = ResolveDomains(args.input_file, args.output_path, args.verbose)
rd.main()
| true |
d20c232413c2de9e121af4d753c48faed2c501b2 | Python | abhijithneilabraham/kodekochikode-hackathon | /Google_MapsAPI/Distance_between_coordinates.py | UTF-8 | 410 | 2.984375 | 3 | [] | no_license | import googlemaps
gmaps = googlemaps.Client(key='********************') #API_Key
LatO = input("Origin Latitude :")
LongO = input("Origin Longitude :")
LatD = input("Destination Latitude :")
LongD = input("Destination Longitude :")
distance = gmaps.distance_matrix([str(LatO) + " " + str(LongO)], [str(LatD) + " " + str(LongD)], mode='walking')['rows'][0]['elements'][0]
print(distance["distance"]["text"])
| true |
7d0a69a0a947764a2acf1a486901bf6e5de6c39f | Python | badbye/ORM | /myorm/test.py | UTF-8 | 896 | 2.65625 | 3 | [] | no_license | # !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 16/1/19 上午10:45
@author: yalei
'''
from field import *
from model import Model
from expr import Expr
class User(Model):
__table__ = 'user'
id = IntegerField()
char = CharField()
date = DateField()
def __repr__(self):
return '<[id]: %s; [char]: %s; [date]: %s>' %(self.id, self.char, self.date)
class Hi(Model):
__table__ = 'hi'
hi = IntegerField()
def __repr__(self):
return '<hi: %s>' %self.hi
# hi = Hi(hi = 123)
# hi_expr = Expr(hi)
# print hi_expr.insert()
# print hi_expr.update(hi = 456)
#
# print '-' * 100
#
user = User()
print '-' * 100
user_expr = Expr(user)
print user_expr.insert()
print user_expr.update(id = 456)
print '-' * 100
print user.date
user.date = None
print user.date
user_expr = Expr(user)
print user_expr.insert()
print user_expr.update(id = 456) | true |
fe35c3b5551973a1fb1ecca67f56f195886124a3 | Python | amlalejini/lalejini_checkio_ws | /home/median/median.py | UTF-8 | 1,252 | 4.0625 | 4 | [] | no_license | #!/usr/bin/python
'''
Task:
For this mission, you are given a non-empty array of natural numbers (X).
With it, you must separate the upper half of the numbers from the lower half
and find the median.
Input: An array as a list of integers.
Output: The median as a float or an integer.
'''
def verbose_checkio(data):
'''
This function accomplishes the task.
'''
data.sort()
median = None
if len(data) % 2 == 0:
# even
median = (data[len(data)/2 - 1] + data[len(data)/2]) / 2.0
else:
# odd
median = data[int(len(data)/2)]
return median
def checkio(data):
'''
This function accomplishes the task.
'''
data.sort()
return (data[int(len(data)/2 - 1)] + data[int(len(data)/2)]) / 2.0 if len(data) % 2 == 0 else data[int(len(data)/2)]
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio([1, 2, 3, 4, 5]) == 3, "Sorted list"
assert checkio([3, 1, 2, 5, 3]) == 3, "Not sorted list"
assert checkio([1, 300, 2, 200, 1]) == 2, "It's not an average"
assert checkio([3, 6, 20, 99, 10, 15]) == 12.5, "Even length"
print("Start the long test")
assert checkio(list(range(1000000))) == 499999.5, "Long."
print("The local tests are done.")
checkio([3,6,20,99,10,15]) | true |
758c44827e0dfc1f3608f38bad0fcada19fc359e | Python | TengounPlan/Sr.Cotorre-discord-edition | /e_cards/search.py | UTF-8 | 1,354 | 3.09375 | 3 | [] | no_license | from core.utils import is_lvl
def use_ec_keywords(cards: list, key_list: str):
"""
Filtra cartas de encuentro según los caracteres del string dado
:param cards: Lista de cartas
:param key_list: Argumentos dados
:return:
"""
filtered_cards = cards
for char in key_list.lower():
if char.isdigit():
filtered_cards = [c for c in filtered_cards if is_lvl(c, int(char))]
if char == "e":
filtered_cards = [c for c in filtered_cards if c['type_code'] == "enemy"]
if char == "a":
filtered_cards = [c for c in filtered_cards if c['type_code'] == "act"]
if char == "p":
filtered_cards = [c for c in filtered_cards if c['type_code'] == "agenda"]
if char == "t":
filtered_cards = [c for c in filtered_cards if c['type_code'] == "treachery"]
if char == "s":
filtered_cards = [c for c in filtered_cards if c['type_code'] == "scenario"]
if char == "l":
filtered_cards = [c for c in filtered_cards if c['type_code'] == "location"]
if char == "i":
filtered_cards = [c for c in filtered_cards if c['faction_code'] == 'investigator']
if char == "j":
filtered_cards = [c for c in filtered_cards if c['faction_code'] == 'neutral']
return filtered_cards
| true |
2ab97cadaadcb6a0019408157e17a6bc2f2c56d7 | Python | Nabellaleen/dosye | /dosye/files.py | UTF-8 | 1,567 | 3.15625 | 3 | [] | no_license | # Import from external libraries
from path import Path
from werkzeug.utils import secure_filename
class FilesManagerException(Exception):
pass
class FolderConfigException(FilesManagerException):
def __init__(self):
self.message = (
"UPLOAD_FOLDER not found - Please check "
"server configuration: UPLOAD_FOLDER must "
"exists and have correct rights")
class ForbiddenFileAccessException(FilesManagerException):
def __init__(self, file):
self.file = file
self.message = (
f"The targeted file is outside the configured folder: {file}")
class FilesManager:
files = []
def __init__(self, folder):
self.folder = Path(folder)
def __contains__(self, item):
file = self.folder / item
return not self.folder.relpathto(file).startswith('..')
def save(self, file):
file_name = secure_filename(file.filename)
file_path = self.folder / file_name
try:
file.save(file_path)
except FileNotFoundError:
raise FolderConfigException
return file_name
def get_files(self):
try:
return self.folder.files()
except FileNotFoundError:
raise FolderConfigException
def delete(self, filename):
if filename not in self:
raise ForbiddenFileAccessException(filename)
file = self.folder / filename
if not file.exists():
raise FileNotFoundError(2, 'File not found', str(file))
file.remove()
| true |
dc0ce6a20059b009ff77d7eab4714715130dc7ae | Python | nacro711072/smooth_location_trajectory | /algorithm/__init__.py | UTF-8 | 1,386 | 3.40625 | 3 | [] | no_license | import util
import math
def min_distance(data_list, max_d=30.0):
pre = data_list[0]
new_points = [pre]
for i in range(1, len(data_list)):
curr = data_list[i]
distance = util.latlon_distance(pre[0], pre[1], curr[0], curr[1])
if distance < max_d:
continue
else:
new_points.append(curr)
pre = curr
return new_points
def distance_and_triangle(data_list, max_dis=30.0, max_angle=30.0):
new_points = [data_list[0]]
buffer1 = data_list[0]
buffer2 = [data_list[0]]
for i in range(1, len(data_list)):
target = data_list[i]
distance = util.latlon_distance(buffer1[0], buffer1[1], target[0], target[1])
if distance < max_dis:
continue
buffer1 = target
buffer2.append(target)
if len(buffer2) < 3:
continue
elif len(buffer2) > 3:
raise Exception("len(buffer2) > 3 !!!")
angle = util.radian(buffer2) * 180 / math.pi
if angle < max_angle:
buffer2 = buffer2[1:]
continue
new_points.append(buffer2[1])
buffer2 = buffer2[1:]
new_points.append(data_list[-1])
return new_points
if __name__ == "__main__":
test1 = [(0, 0), (0, 0), (0.0000001, 0.0000001), (1, 1), (2, 2)]
assert (distance_and_triangle(test1) == [(0, 0), (1, 1), (2, 2)])
| true |
d0ef0502ac1458b1ca1494f5ee70e51c27137f36 | Python | raymondem1/hot-or-not-ai | /roastScript.py | UTF-8 | 896 | 2.515625 | 3 | [] | no_license | import requests
import pyttsx3
from better_profanity import profanity
engine = pyttsx3.init()
eb =""
def roast():
global eb
context = "Your mom is a hoe.\nYou are the proof that God makes mistakes\nYou make me want to go blind\nDo us a favor and stay inside\nYou look like an anti cigaretts commercial\n"
payload = {
"context": context,
"token_max_length": 64,
"temperature": 0.6,
"stop_sequence": "\n"
}
response = requests.post("http://api.vicgalle.net:5000/generate", params=payload).json()
eb =response["text"]
if __name__=='__main__':
eb=profanity.censor(eb)
if("****" in eb):
eb= "saying this instead of what he wanted to say so nobody gets cancelled"
print(eb)
engine.setProperty('rate', 120)
engine.setProperty('volume', 0.9)
engine.say(eb)
engine.runAndWait()
| true |
b4970a5e5c6badcfedea39593b06d2ab1b50fb0e | Python | nxhuy-github/code_20201031 | /code_20201031/streamlit_20200308/first_app.py | UTF-8 | 1,079 | 3.625 | 4 | [] | no_license | import streamlit as st
import numpy as np
import pandas as pd
import time
st.title('My first app')
st.write("Here's our first attempt at using data to create a table:")
st.write(pd.DataFrame({
'first_column': [1,2,3,4],
'second_column': [10,20,30,40]
}))
df = pd.DataFrame({
'first_column': [1,2,3,4],
'second_column': [10,20,30,40]
})
df
chart_data = pd.DataFrame(
np.random.randn(20,3),
columns=['a','b','c'])
st.line_chart(chart_data)
if st.checkbox('Show dataframe'):
chart_data = pd.DataFrame(
np.random.randn(20,3),
columns=['a','b','c'])
st.line_chart(chart_data)
"""
option = st.selectbox(
'Which number do you like best?',
df['first_column'])
'You selected: ', option
"""
option = st.sidebar.selectbox(
'Which number do you like best?',
df['first_column'])
'You selected: ', option
latest_iteration = st.empty()
bar = st.progress(0)
for i in range(100):
latest_iteration.text(f'Iteration{i+1}')
bar.progress(i+1)
time.sleep(0.1)
"...and now we'are done!"
| true |
46ee4e758882b88e29e0fbd80840fd2754f218d5 | Python | sy1wi4/ASD-2020 | /searching/binary_search_recursive.py | UTF-8 | 222 | 3.578125 | 4 | [] | no_license | def binSearch(tab,val,p,k):
if(p<=k):
s=(p+k)//2
if(tab[s]==val): return s
elif(tab[s]>val): return binSearch(tab,val,p,s-1)
else: return binSearch(tab,val,s+1,k)
else: return None
| true |
09ec0c830590c3c5fdbaf0da409def02c22fde6e | Python | jacquerie/leetcode | /leetcode/0077_combinations.py | UTF-8 | 365 | 3.265625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import itertools
class Solution:
def combine(self, n, k):
return [list(el) for el in itertools.combinations(range(1, n + 1), k)]
if __name__ == "__main__":
solution = Solution()
assert [
[1, 2],
[1, 3],
[1, 4],
[2, 3],
[2, 4],
[3, 4],
] == solution.combine(4, 2)
| true |
161012239a7ee36dc986922e62737d624def594e | Python | mmreis/pdTransformers | /pdTransformers/lags.py | UTF-8 | 4,915 | 2.828125 | 3 | [] | no_license | from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
import warnings
class InsertLags(BaseEstimator, TransformerMixin):
"""
Insert lags using shift method
:param lags: : dict, dictionary with reference of the columns and number of lags for each column
Example:
from WWIR.pd_transformers.datasets import generate_ts
dataset = generate_ts(n_samples=1000, n_features=2, n_targets=1,
split_X_y=False, start_date='2016-01-03 00:00',
freq='1H')
from WWIR.pd_transformers.lags import InsertLags
nlags = 3
IL = InsertLags(lags={0: np.arange(1, nlags + 1), })
"""
def __init__(self, lags):
self.lags = lags
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.sort_index()
if type(X) is pd.DataFrame:
original_cols = X.columns.values.tolist()
subs = [item for item in self.lags.keys()]
original_cols = [i for i, item in enumerate(original_cols) if item in subs]
new_dict = {}
for predictor, all_lags in self.lags.items():
if predictor not in X.columns:
warnings.warn(' ## Lags from \'{}\' were excluded, since the dataset wasn\'t loaded.'.format(
predictor))
continue
new_dict[predictor] = X[predictor]
for l in all_lags:
X['%s_lag%d' % (predictor, l)] = X[predictor].shift(l)
return X
elif type(X) is pd.Series:
X = pd.concat([X.shift(i) for i in self.lags], axis=1)
return X
def inverse_transform(self, X, y=None):
return X
class InsertAggByTime(BaseEstimator, TransformerMixin):
"""
Inserts aggregated features (p.e. averages)
:param agg: dict
dictionary with variable name (column) as key, key-item as tuple (aggregator, by)
:param timev: default='index'
column translating the time.
:param dropna: default=False,
flag indicating if the NaN are to be dropped
Example
from WWIR.pd_transformers.datasets import generate_ts
dataset = generate_ts(n_samples=1000, n_features=2, n_targets=1,
split_X_y=False, start_date='2016-01-03 00:00',
freq='1H')
from WWIR.pd_transformers.ts_transformers import InsertAggByTimeLags as IATL
c = IATL(agg_lags={'target': [('mean', '5min')]})
c.fit_transform(dataset)
"""
def __init__(self, agg, timev='index', dropna=False):
self.agg = agg
self.timev = timev
self.dropna = dropna
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# @todo use re-sample function instead
XX = X.copy()
XX.reset_index(inplace=True)
if self.timev == 'index':
self.timev = X.index.name
if hasattr(X, 'columns'):
original_cols = X.columns.values.tolist()
subs = [item for item in self.agg.keys()]
original_cols = [i for i, item in enumerate(original_cols) if item in subs]
else:
original_cols = list(range(len(X[0, :])))
for predictor, all_lags in self.agg.items():
if predictor not in X.columns:
warnings.warn(
' ## ERROR! Lags from \'{}\' were excluded, since the dataset wasn\'t loaded.'.format(predictor))
continue
df = XX[[self.timev, predictor]]
for tuple_ in all_lags:
if tuple_.__len__() == 2:
agg, by = tuple_
ar = {}
elif tuple_.__len__() == 3:
agg, by, ar = tuple_
else:
print('parameters not well defined')
XX['ts'] = XX[self.timev].values.astype('<M8[' + by + ']')
df['ts'] = XX['ts']
df_agg = eval("df.groupby(['ts'])." + agg + "(**" + ar.__str__() + ")")
if isinstance(df_agg.index, pd.core.index.MultiIndex):
df_agg = df_agg.unstack(None)
XX = XX.merge(df_agg, left_on='ts', right_index=True, how='left',
suffixes=('', '_%s_%s' % (agg.lower(), by)))
if 'ts' in XX.columns:
XX.drop('ts', axis=1, inplace=True)
if ('%s_%s_%s' % (self.timev, agg.lower(), by)) in XX.columns:
XX.drop('%s_%s_%s' % (self.timev, agg.lower(), by), axis=1, inplace=True)
XX.set_index(self.timev, inplace=True)
if self.dropna:
XX.dropna(inplace=True)
return XX
def inverse_transform(self, X, y=None):
pass
| true |
5e3aa86d130f52be1e3d91eff069ea0631d175c5 | Python | oceanbei333/leetcode | /1394.找出数组中的幸运数.py | UTF-8 | 406 | 2.96875 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=1394 lang=python3
#
# [1394] 找出数组中的幸运数
#
# @lc code=start
class Solution:
def findLucky(self, arr: List[int]) -> int:
counter = collections.Counter(arr)
if any(counter[val]==val for val in counter):
return int( max(not counter[val]==val or val for val in counter) )
else:
return -1
# @lc code=end
| true |
12bd4f20b6754a0fe61361af4b3aa5db082dac68 | Python | to-yuki/pythonLab-v3 | /sample/8/GmailAT.py | UTF-8 | 1,177 | 2.953125 | 3 | [] | no_license | import gmail # GMail簡易送信モジュール
import getpass
# Gmailアカウント情報
sendUsername = None #'from_user@gmail.com'
sendUserPassword = None #'from_user_password'
# メール送信パラメータ
subject = '件名'
toAddr = None #'to_user@gmail.com'
body = '本文'
# メールサーバに接続して、ログインとメール送信
try:
print('メール送信開始')
# ユーザ名とパスワードの入力
print('MailAccount: ',end=' ')
sendUsername = input()
sendUserPassword = getpass.getpass()
# 受信者アドレスの入力
print('宛先アドレス: ',end=' ')
toAddr = input()
# 件名と本文の入力
print('件名: ',end=' ')
subject = input()
print('メール本文: ',end=' ')
body = input()
# Gmailへのログインとメール送信
client = gmail.GMail(sendUsername, sendUserPassword)
message = gmail.Message(subject=subject,to=toAddr,text=body)
client.send(message)
client.close()
print('メール送信完了!')
except:
# メール送信エラー時の対処
try:
client.close()
except:
pass
print('メール送信エラーです。')
| true |
14744acaece5efc82528cd736f654d693cc5f7b2 | Python | largomst/HackerRank-Algorithms | /Week of Code/Repeated String.py | UTF-8 | 199 | 3.046875 | 3 | [] | no_license | #!/bin/python3
import sys
s = input().strip()
n = int(input().strip())
time = sum([1 for i in s if i == 'a']) * (n // len(s))
time += sum([1 for i in range(n % len(s)) if s[i] == 'a'])
print(time)
| true |
f3a67b210cd21d1d9b11ef07b18b821faa888a68 | Python | MacHu-GWU/angora-project | /angora/filesystem/winzip.py | UTF-8 | 5,390 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module description
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A data/file compress utility module. You can easily programmatically add files
and directorys to zip archives. And compress arbitrary binary content.
- :func:`zip_a_folder`: add folder to archive.
- :func:`zip_everything_in_a_folder`: add everything in a folder to archive.
- :func:`zip_many_files`: Add many files to a zip archive.
- :func:`write_gzip`: Write binary content to gzip file.
- :func:`read_gzip`: Read binary content from gzip file.
**中文文档**
提供了若干个文件和数据压缩的快捷函数。
- :func:`zip_a_folder`: 将目录添加到压缩包。
- :func:`zip_everything_in_a_folder`: 将目录内的所有文件添加到压缩包。
- :func:`zip_many_files`: 将多个文件添加到压缩包。
- :func:`write_gzip`: 将二进制数据写入文件, 例如python pickle, bytes string。
- :func:`read_gzip`: 读取解压后的二进制数据内容。
Compatibility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Python2: Yes
- Python3: Yes
Prerequisites
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- None
Class, method, function, exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function
from zipfile import ZipFile
import gzip
import os
"""
注: python中zipfile包自带的ZipFile方法的用法如下::
基本用法:
with ZipFile("filename.zip", "w") as f:
f.write(path)
其中path是文件路径。 如果path是文件夹, 并不会将文件夹内所有的文件添加到压缩包中。
相对路径压缩:
比如你有一个路径C:\download\readme.txt
如果当前路径是C:\, 而此时你将readme.txt添加到压缩包时则是在压缩包内添加一个:
download\readme.txt
如果当前路径是C:\download\, 则在压缩包内添加的路径则是:
readme.txt
"""
def zip_a_folder(src, dst):
"""Add a folder and everything inside to zip archive.
Example::
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
zip_a_folder("paper", "paper.zip")
paper.zip
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
**中文文档**
将整个文件夹添加到压缩包, 包括根目录本身。
"""
src, dst = os.path.abspath(src), os.path.abspath(dst)
cwd = os.getcwd()
todo = list()
dirname, basename = os.path.split(src)
os.chdir(dirname)
for dirname, _, fnamelist in os.walk(basename):
for fname in fnamelist:
newname = os.path.join(dirname, fname)
todo.append(newname)
with ZipFile(dst, "w") as f:
for newname in todo:
f.write(newname)
os.chdir(cwd)
def zip_everything_in_a_folder(src, dst):
"""Add everything in a folder except the root folder it self to zip archive.
Example::
|---paper
|--- algorithm.pdf
|--- images
|--- 1.jpg
zip_everything_in_folder("paper", "paper.zip")
paper.zip
|--- algorithm.pdf
|--- images
|--- 1.jpg
**中文文档**
将目录内部的所有文件添加到压缩包, 不包括根目录本身。
"""
src, dst = os.path.abspath(src), os.path.abspath(dst)
cwd = os.getcwd()
todo = list()
os.chdir(src)
for dirname, _, fnamelist in os.walk(os.getcwd()):
for fname in fnamelist:
newname = os.path.relpath(os.path.join(dirname, fname), src)
todo.append(newname)
with ZipFile(dst, "w") as f:
for newname in todo:
f.write(newname)
os.chdir(cwd)
def zip_many_files(list_of_abspath, dst):
"""Add many files to a zip archive.
**中文文档**
将一系列的文件压缩到一个压缩包中, 若有重复的文件名, 在zip中保留所有的副本。
"""
base_dir = os.getcwd()
with ZipFile(dst, "w") as f:
for abspath in list_of_abspath:
dirname, basename = os.path.split(abspath)
os.chdir(dirname)
f.write(basename)
os.chdir(base_dir)
def write_gzip(content, abspath):
"""Write binary content to gzip file.
**中文文档**
将二进制内容压缩后编码写入gzip压缩文件。
"""
with gzip.open(abspath, "wb") as f:
f.write(content)
def read_gzip(abspath):
"""Read binary content from gzip file.
**中文文档**
从gzip压缩文件中读取二进制内容。
"""
with gzip.open(abspath, "rb") as f:
return f.read()
#--- Unittest ---
if __name__ == "__main__":
def test_zip():
zip_a_folder(os.getcwd(), "1.zip")
zip_everything_in_a_folder(os.getcwd(), "2.zip")
zip_many_files([os.path.abspath("winzip.py")], "3.zip")
# test_zip()
def test_gzip():
abspath = "winzip.py"
with open(abspath, "rb") as f:
content = f.read()
write_gzip(content, "winzip.gz")
text = read_gzip("winzip.gz").decode("utf-8")
print(text)
# test_gzip()
| true |
bf4b9d3b15f8105ae4bab4ab6a42db2f71bfaf16 | Python | KimMinjun7/Work | /data_functions_solar.py | UTF-8 | 2,790 | 2.6875 | 3 | [] | no_license | import pvlib
import pymysql
import numpy as np
import pandas as pd
class Data:
def __init__(self):
print('Start: Data\n')
def _print_start(self, content):
print(f'Start: {content}')
def _print_end(self, content):
print(f'End: {content}\n')
def _load_table(self, table, sql):
content = f'Load {table}'
self._print_start(content)
db = pymysql.connect(user = "사용자",\
passwd = "비번",\
host = "192.168.xx.xxx",\
db = "db명",\
charset = "utf8",\
port = 포트번호)
cursor = db.cursor(pymysql.cursors.DictCursor)
cursor.execute(sql)
result = pd.DataFrame(cursor.fetchall())
db.close()
self._print_end(content)
self.df = result.copy()
# 위치 설정
def _set_location(self, altitude = 0):
# altitude = 0
self.lat = float(self.df['LAT'].values)
self.lon = float(self.df['LON'].values)
return pvlib.location.Location(self.lat, self.lon, altitude = altitude)
# 태양위치 데이터
def load_solar_elev(self, site_id, times, tz = 'Asia/Seoul'):
# site_id = 40009
# times = datetime type
# tz = Asia/Seoul
self._load_site(site_id)
loc = self._set_location()
elv = loc.get_solarposition(times.tz_localize(tz))[['elevation', 'zenith', 'azimuth']]
elv.index = elv.index.tz_localize(None)
return elv
# 청천일사량 데이터
def load_clearsky(self, site_id, times, model, tz = 'Asia/Seoul', linke_turbidity = False):
# site_id = 40009
# times = datetime type
# model = ineichen, haurwitz
# tz = Asia/Seoul
# linke_turbidity = True, False
self._load_site(site_id)
loc = self._set_location()
loc_times = times.tz_localize(tz)
if model == 'haurwitz':
rad = loc.get_clearsky(loc_times, model = model)
elif model == 'ineichen':
if linke_turbidity: # 링케혼탁도 월 평균값으로 설정
rad = loc.get_clearsky(loc_times, model = model,
linke_turbidity = pvlib.clearsky.lookup_linke_turbidity(loc_times, self.lat, self.lon))
if not linke_turbidity: # 링케혼탁도 1(아주 맑음)으로 설정
rad = loc.get_clearsky(loc_times, model = model, linke_turbidity = 1)
rad.index = rad.index.tz_localize(None)
return rad | true |
15662f7f62b063903c2f9eefcea956e805977ced | Python | littlecoon/EffectivePython | /article8.py | UTF-8 | 1,608 | 4.4375 | 4 | [] | no_license | 编写高质量python代码的方法8:不要使用含有两个以上表达式的列表推导
除了基本的用法之外,列表推导也支持多重循环。例如,要把矩阵(也就是二维列表)简化成一维列表,使原来的每个单元格都成为新列表中的普通元素。这个功能采用包含两个for表达式的列表推导即可实现,这些for表达式会按照从左至右的顺序来评估。
上面这个例子简单易易懂,这就是多重循环的合理用法。还有一种包含多重循环的合理用法,那就是根据输入列表来创建有两层深度的新列表。例如,我们要对二维矩阵的每个单元格取平方,然后用这些平方值构建新的矩阵。由于要多使用一对中括号,所以实现该功能的代码会经上例稍微复杂一点,但依然不难理解。
如果表达式里还有一层循环,那么列表推导就会变得很长,这时必须把它分成多行来写,才能看得清楚一些
可以看出此时列表推导并不比普通写法更加简单,如果用普通写法的话:
#4
列表推导也支持多个if条件。处在同一循环级别中的多项条件,彼此之间默认形成and表达式。例如,要从数字列表中选出大于4的偶数,那么下面这两种列表推导方式是等效的。
#5
每一级循环的for表达式后面都可以指定条件。例如,要从原矩阵中把那些本身能为3所整除,且其所在行的各元素之和又大于等于10的单元格挑出来。我们只需编写很简短的代码,就可用列表推导来实现此功能,但是,这样的代码非常难懂。
| true |
2b935e5adaff4b4a32970b170b39816e99663da1 | Python | svvchen/siteblocker | /media_blocker.py | UTF-8 | 2,342 | 3 | 3 | [] | no_license | import time
from datetime import datetime
import os
import fileinput
import re
import secrets
class Blocker:
def __init__(self):
# starting with an initial set of sites that the user can modify
self.blocked_sites = ["www.youtube.com", "www.facebook.com", "www.reddit.com", "www.linkedin.com"]
def current_sites(self):
print("Current sites include:")
print(self.blocked_sites)
def add_sites(self, new_site):
# basic website format validation
if re.match(r"www\..*.com", new_site):
self.blocked_sites.append(new_site)
print("Added " + new_site + " to blocked sites.")
else:
print("Invalid site format. Please input sites as 'www.insertsitename.com.'")
def action_block(self, status):
# block
if status == False:
with open('/etc/hosts', 'rt') as original_host_file:
copy_host_file = original_host_file.read()
with open('/tmp/etc_hosts.tmp', 'a+') as outf:
outf.write(copy_host_file)
for site in self.blocked_sites:
# print(site)
if site in copy_host_file:
# print(site)
pass
else:
# print(site)
outf.write('\n' + '127.0.0.1' + " " + site)
os.system('sudo mv /tmp/etc_hosts.tmp /etc/hosts')
# unblock
else:
with open('/etc/hosts', 'rt') as original_host_file:
copy_host_file = original_host_file.read()
lines = copy_host_file.splitlines()
with open('/tmp/etc_hosts.tmp', 'a+') as outf:
for line in lines:
if not any(site in line for site in self.blocked_sites):
if line != '':
outf.write(line + '\n')
else:
# print(line)
pass
outf.close()
os.system('sudo mv /tmp/etc_hosts.tmp /etc/hosts')
if __name__ == "__main__":
new_blocker = Blocker()
# tests
# -----
# new_blocker.current_sites()
# new_blocker.add_sites('www.9gag.com')
# new_blocker.action_block(True)
# -----
| true |
604d830f1b8e2f8fca5eebdde7b8958f791b7e9b | Python | moxy37/NodePiAlpha | /TestListen.py | UTF-8 | 462 | 3.28125 | 3 | [] | no_license | #!/usr/bin/python
import serial, string
output = " "
ser = serial.Serial('/dev/ttyACM0', 9600)
print("Starting")
while True:
while output != "":
output = ser.readline()
output = output[:-2]
o = output.split(' ')
lat = ''
lon = ''
#print(o)
for x in range(0, len(o)):
if o[x] != '':
if lat == '':
lat = o[x]
else:
lon = o[x]
latitude = float(lat)/100
longitude = float(lon)/-100
print(latitude)
print(longitude) | true |
1b1d83708786e53a5e2252d212d401e7c34185ff | Python | lxjack/Python_Code | /data_struct/Llist/link_list_test.py | UTF-8 | 15,664 | 3.84375 | 4 | [] | no_license | #_*_ coding: utf-8 _*_
import unittest
from link_list import Llist
from link_list import LinkedListOperateError
class LlistTest(unittest.TestCase):
'''my linked list unittest'''
def setUp(self):
pass
def tearDown(self):
pass
def test_is_empty_list(self):
"case1:test is_empty_list function"
'''
1、当链表为空时返回True
2、当链表不为空时返回False
'''
temp_list=Llist()
value=temp_list.is_empty_list()
self.assertEqual(value,True,"value_info "+str(value))
temp_list.append(1)
value = temp_list.is_empty_list()
self.assertEqual(value,False,"value_info "+str(value))
def test_delete_list(self):
'''
case2:测试函数delete_list功能
1、删除空链表,链表仍为空
2、删除非空链表,链表为空
'''
temp_list = Llist()
temp_list.delete_list()
value = temp_list.is_empty_list()
self.assertEqual(value, True, "value_info " + str(value))
temp_list.append(1)
temp_list.delete_list()
value = temp_list.is_empty_list()
self.assertEqual(value, True, "value_info " + str(value))
def test_prepend(self):
'''
case3:测试prepend函数
1、使用prepend添加1个元素'a',索引0为元素'a'
2、使用prepend再添加1个元素'b',索引0为元素'b';索引1为元素'a'
'''
temp_list = Llist()
temp_list.prepend('a')
value=temp_list.index(0)
self.assertEqual(value, 'a', "value_info " + str(value))
temp_list.prepend('b')
value = temp_list.index(0)
self.assertEqual(value, 'b', "value_info " + str(value))
value = temp_list.index(1)
self.assertEqual(value, 'a', "value_info " + str(value))
def test_append(self):
'''
case4:测试append函数
1、使用append添加1个元素'a',索引0为元素'a'
2、使用append再添加1个元素'b',索引0为元素'a';索引1为元素'b'
3、使用append再添加1个元素'c',索引2为元素'c'
'''
temp_list = Llist()
temp_list.append('a')
value = temp_list.index(0)
self.assertEqual(value, 'a', "value_info " + str(value))
temp_list.append('b')
value = temp_list.index(0)
self.assertEqual(value, 'a', "value_info " + str(value))
value = temp_list.index(1)
self.assertEqual(value, 'b', "value_info " + str(value))
temp_list.append('c')
value = temp_list.index(2)
self.assertEqual(value, 'c', "value_info " + str(value))
def test_middle_pend(self):
'''
case5:测试middle_pend函数
1、当被插入元素链表length<2,抛出指定异常
2、原链表存在2个元素[1,2],使用middle_pend在索引1添加元素'a',索引1为元素'a',索引0为元素1,索引2为元素2
3、原链表存在3个元素[1,'a',2] , 使用middle_pend在索引2添加元素'b',索引1为元素'a',索引2为元素'b',索引3为元素2
4、原链表存在4个元素[1,'a','b',2],指定插入index=0或4,抛出指定异常
'''
'''step 1'''
temp_list = Llist()
temp_list.append(1)
with self.assertRaisesRegexp(LinkedListOperateError,'the length of list is less than 2,can not execute middle_pend'):
temp_list.middle_pend(0,'aaa')
'''step 2'''
temp_list.append(2)
temp_list.middle_pend(1,'a')
value = temp_list.index(1)
self.assertEqual(value, 'a', "value_info " + str(value))
value = temp_list.index(0)
self.assertEqual(value, 1, "value_info " + str(value))
value = temp_list.index(2)
self.assertEqual(value, 2, "value_info " + str(value))
'''step 3'''
temp_list.middle_pend(2, 'b')
value = temp_list.index(1)
self.assertEqual(value, 'a', "value_info " + str(value))
value = temp_list.index(2)
self.assertEqual(value, 'b', "value_info " + str(value))
value = temp_list.index(3)
self.assertEqual(value, 2, "value_info " + str(value))
'''step 4'''
with self.assertRaisesRegexp(LinkedListOperateError,'the input index can not execute middle_pend'):
temp_list.middle_pend(0,'aaa')
with self.assertRaisesRegexp(LinkedListOperateError,'the input index can not execute middle_pend'):
temp_list.middle_pend(4,'aaa')
def test_prepop(self):
'''
case6:测试prepop函数
1、链表存在2个元素[1,2],使用prepop函数,删除元素,被删除元素为1
2、使用prepop函数,再次删除元素,被删除元素为2
3、使用prepop函数,再次删除元素,抛出指定异常
'''
temp_list = Llist()
temp_list.append(1)
temp_list.append(2)
value = temp_list.prepop()
self.assertEqual(value, 1, "value_info " + str(value))
value = temp_list.prepop()
self.assertEqual(value, 2, "value_info " + str(value))
with self.assertRaisesRegexp(LinkedListOperateError, 'list is empty'):
temp_list.prepop()
def test_pop(self):
'''
case7:测试pop函数
1、链表存在2个元素[1,2,'a'],使用pop函数,删除元素,被删除元素为'a'
2、使用pop函数,再次删除元素,被删除元素为2
3、使用pop函数,再次删除元素,被删除元素为1
4、使用pop函数,再次删除元素,抛出指定异常
'''
temp_list = Llist()
temp_list.append(1)
temp_list.append(2)
temp_list.append('a')
value = temp_list.pop()
self.assertEqual(value,'a', "value_info " + str(value))
value = temp_list.pop()
self.assertEqual(value,2, "value_info " + str(value))
value = temp_list.pop()
self.assertEqual(value,1, "value_info " + str(value))
with self.assertRaisesRegexp(LinkedListOperateError, 'list is empty'):
temp_list.pop()
def test_middle_pop(self):
'''
case8:测试middle_pop函数
1、当被插入元素链表length=2,抛出指定异常
2、原链表存在4个元素[1,2,'a','b'],指定删除index=0或3元素,抛出指定异常
3、原链表存在4个元素[1,2,'a','b'],删除index=2元素,删除元素为'a'
4、原链表存在3个元素[1,2,'b'],删除index=1元素,删除元素为2
'''
'''step 1'''
temp_list = Llist()
temp_list.append(1)
temp_list.append(2)
with self.assertRaisesRegexp(LinkedListOperateError,'the length of list is less than 3,can not execute middle_pop'):
temp_list.middle_pop(1)
'''step 2'''
temp_list.append('a')
temp_list.append('b')
with self.assertRaisesRegexp(LinkedListOperateError, 'the input index can not execute middle_pop'):
temp_list.middle_pop(0)
with self.assertRaisesRegexp(LinkedListOperateError, 'the input index can not execute middle_pop'):
temp_list.middle_pop(3)
'''step 3'''
value = temp_list.middle_pop(2)
self.assertEqual(value, 'a', "value_info " + str(value))
'''step 4'''
value = temp_list.middle_pop(1)
self.assertEqual(value, 2, "value_info " + str(value))
def test_find_node(self):
'''
case9:测试find_node函数
1、当链表为空时,调用find_node函数,抛出指定异常
2、原链表存在4个元素[1,2,'a','b'],指定索引index=-1或4元素,抛出指定异常
3、原链表存在4个元素[1,2,'a','b'],指定索引index=0,找到节点0
4、原链表存在4个元素[1,2,'a','b'],指定索引index=3,找到节点3
'''
'''step 1'''
temp_list = Llist()
with self.assertRaisesRegexp(LinkedListOperateError,'list is empty'):
temp_list.find_node(1)
'''step 2'''
temp_list.append(1)
temp_list.append(2)
temp_list.append('a')
temp_list.append('b')
with self.assertRaisesRegexp(LinkedListOperateError, 'list index out of range'):
temp_list.find_node(-1)
with self.assertRaisesRegexp(LinkedListOperateError, 'list index out of range'):
temp_list.find_node(4)
'''step 3'''
p=temp_list.find_node(0)
value = p.element
self.assertEqual(value, 1, "value_info " + str(value))
'''step 4'''
p = temp_list.find_node(3)
value = p.element
self.assertEqual(value, 'b', "value_info " + str(value))
def test_index(self):
'''
case10:测试index函数
1、当链表为空时,调用index函数,抛出指定异常
2、原链表存在4个元素[1,2,'a','b'],指定索引index=-1或4元素,抛出指定异常
3、原链表存在4个元素[1,2,'a','b'],指定索引index=0,找到节点0
4、原链表存在4个元素[1,2,'a','b'],指定索引index=3,找到节点3
'''
'''step 1'''
temp_list = Llist()
with self.assertRaisesRegexp(LinkedListOperateError, 'list is empty'):
temp_list.index(1)
'''step 2'''
temp_list.append(1)
temp_list.append(2)
temp_list.append('a')
temp_list.append('b')
with self.assertRaisesRegexp(LinkedListOperateError, 'list index out of range'):
temp_list.index(-1)
with self.assertRaisesRegexp(LinkedListOperateError, 'list index out of range'):
temp_list.index(4)
'''step 3'''
value = temp_list.index(0)
self.assertEqual(value, 1, "value_info " + str(value))
'''step 4'''
value = temp_list.index(3)
self.assertEqual(value, 'b', "value_info " + str(value))
def test_modify_element(self):
'''
case11:测试modify_element函数
1、当链表为空时,调用modify_element函数,抛出指定异常
2、原链表存在4个元素[1,2,'a','b'],指定索引index=-1或4元素,抛出指定异常
3、原链表存在4个元素[1,2,'a','b'],修改索引index=0元素为'aa'
4、原链表存在4个元素[1,2,'a','b'],修改索引index=3元素为'cc'
'''
'''step 1'''
temp_list = Llist()
with self.assertRaisesRegexp(LinkedListOperateError, 'list is empty'):
temp_list.modify_element(1,'aa')
'''step 2'''
temp_list.append(1)
temp_list.append(2)
temp_list.append('a')
temp_list.append('b')
with self.assertRaisesRegexp(LinkedListOperateError, 'list index out of range'):
temp_list.modify_element(-1,'aa')
with self.assertRaisesRegexp(LinkedListOperateError, 'list index out of range'):
temp_list.modify_element(4,'aa')
'''step 3'''
temp_list.modify_element(0, 'aa')
value = temp_list.index(0)
self.assertEqual(value, 'aa', "value_info " + str(value))
'''step 4'''
temp_list.modify_element(3, 'cc')
value = temp_list.index(3)
self.assertEqual(value, 'cc', "value_info " + str(value))
def test_length(self):
'''
case12:测试length函数
1、当链表为空时,调用length函数,返回链表长度为0
2、原链表存在4个元素[1,2,'a','b'],调用length函数,返回链表长度为4
'''
'''step 1'''
temp_list = Llist()
value = temp_list.length()
self.assertEqual(value, 0, "value_info " + str(value))
'''step 2'''
temp_list.append(1)
temp_list.append(2)
temp_list.append('a')
temp_list.append('b')
value = temp_list.length()
self.assertEqual(value, 4, "value_info " + str(value))
def test_index_insert(self):
'''
case13:测试index_insert函数
1、当链表为空时,在索引0插入元素'a',索引index=0元素为'a'
2、当链表为['a']时,在索引1插入元素'b',索引index=1元素为'b'
3、当链表为['a','b']时,在索引1插入元素'c',索引index=1元素为'c',索引index=2元素为'b'
4、当链表为['a','c','b']时,传入索引>=4或<=-1或非数字,抛出指定异常
'''
'''step 1'''
temp_list = Llist()
temp_list.index_insert(0,'a')
value =temp_list.index(0)
self.assertEqual(value, 'a', "value_info " + str(value))
'''step 2'''
temp_list.index_insert(1, 'b')
value = temp_list.index(1)
self.assertEqual(value, 'b', "value_info " + str(value))
'''step 3'''
temp_list.index_insert(1, 'c')
value = temp_list.index(1)
self.assertEqual(value, 'c', "value_info " + str(value))
value = temp_list.index(2)
self.assertEqual(value, 'b', "value_info " + str(value))
'''step 4'''
with self.assertRaisesRegexp(LinkedListOperateError, 'index is out of range or index not a integer'):
temp_list.index_insert(4, 'ccc')
with self.assertRaisesRegexp(LinkedListOperateError, 'index is out of range or index not a integer'):
temp_list.index_insert(-1, 'ccc')
with self.assertRaisesRegexp(LinkedListOperateError, 'index is out of range or index not a integer'):
temp_list.index_insert('aaa', 'ccc')
def test_index_pop(self):
'''
case14:测试index_pop函数
1、当链表为空时,删除空链表中元素,抛出指定异常
2、当链表为[0,1,2,3,4,5,6]时,删除index=-1或7的元素,抛出指定异常
3、当链表为[0,1,2,3,4,5,6]时,删除index=5和index=1的元素,元素删除正确,删除后链表长度为5
4、当链表为[0,2,3,4,6]时,删除index=0,元素删除正确
5、当链表为[2,3,4,6]时,删除index=3,元素删除正确,删除后链表长度为3
'''
'''step 1'''
temp_list = Llist()
with self.assertRaisesRegexp(LinkedListOperateError, 'list is empty'):
temp_list.index_pop(0)
'''step 2'''
for i in xrange(7):
temp_list.append(i)
with self.assertRaisesRegexp(LinkedListOperateError, 'index is out of range'):
temp_list.index_pop(-1)
with self.assertRaisesRegexp(LinkedListOperateError, 'index is out of range'):
temp_list.index_pop(7)
'''step 3'''
value = temp_list.index_pop(5)
self.assertEqual(value, 5, "value_info " + str(value))
value = temp_list.index_pop(1)
self.assertEqual(value, 1, "value_info " + str(value))
value = temp_list.length()
self.assertEqual(value, 5, "value_info " + str(value))
'''step 4'''
value = temp_list.index_pop(0)
self.assertEqual(value, 0, "value_info " + str(value))
'''step 5'''
value = temp_list.index_pop(3)
self.assertEqual(value, 6, "value_info " + str(value))
value = temp_list.length()
self.assertEqual(value, 3, "value_info " + str(value))
if __name__=="__main__":
unittest.main() | true |
e70499e70ef2ffb505795f703964c821776c0987 | Python | olivierdarchy/python_lab | /pgcd.py | UTF-8 | 1,888 | 3.765625 | 4 | [] | no_license | # Made to stay sharp in python:
#
# Exercice : design an algorithm to evaluate the pgcd of two natural integers
# - input: a, b for a, b € N*
# - output: pgcd
#
# by Olivier Darchy
# created the 23th of September,2017
def eval_pgcd(a, b) :
"""
Evaluate and return the greatest comon divisor of two number.
Use Euclide algorithm to do so
"""
# verify if a and/or b are not equal to zero (bit shift trick)
if a << b == a :
raise ValueError("given number must be defined on N*")
divisor, divided = (a, b) if a <= b else (b, a)
carry = -1
pgcd = 0
while carry != 0 :
pgcd = divisor
carry = divided % divisor
divided = divisor
divisor = carry
return pgcd
# testing
# TODO: encapsulation of testing tools
class Static_int :
"""
Represent a static incrementable integer
"""
def __init__(self, val=0, step=1) :
self.val = val
self.step = step
def inc(self) :
val = self.val
self.val += self.step
return val
test_number = Static_int(1)
MSG = "Dump data {} : {}"
# function decorator
def test_formating(func) :
def wrapper(*args, **kwargs) :
assert func(*args, **kwargs), MSG.format(args[0], args[1])
print("TEST NUMBER {}: OK".format(test_number.inc()))
return wrapper
@test_formating
def equal_relation_test(known, actual) :
return known == actual
if __name__ == "__main__" :
equal_relation_test(7, eval_pgcd(14, 21))
equal_relation_test(7, eval_pgcd(21, 14))
equal_relation_test(21, eval_pgcd(1071, 1029))
print("PGCD({}, {}) = {}".format(21, 21, eval_pgcd(21, 21)))
print("PGCD({}, {}) = {}".format(4620, 6625, eval_pgcd(4620, 6625)))
print("PGCD({}, {}) = {}".format(10884, 8058, eval_pgcd(10884, 8058)))
try :
eval_pgcd(5, 0)
except ValueError as e:
print(e)
| true |
64db57507d2128bd79a12548f97b4fb1cbde0fdd | Python | TimP4w/dva | /Exercise 1/dva_hs19_ex1.py | UTF-8 | 9,596 | 3.140625 | 3 | [] | no_license | import numpy as np
import numpy.polynomial.polynomial as poly
import pandas as pd
import os
from scipy import interpolate
from bokeh.layouts import layout
from bokeh.io import show
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.plotting import figure
from bokeh.palettes import RdYlBu
from bokeh.transform import dodge
from numpy.polynomial.polynomial import polyvander
# CONSTANTS
MAXIMUM_YEAR = 2016
MINIMUM_VALUE = 10
# read data from .csv file by using absolute path
__file__ = "20151001_hundenamen.csv"
data_absolute_dirpath = os.path.abspath(os.path.dirname(__file__))
try:
df1 = pd.read_csv(os.path.join(data_absolute_dirpath, __file__))
except FileNotFoundError:
print(
"Couldn't find the dataset file, please check that you have the file in the same folder as the script"
)
exit()
except:
print("Something went wrong while opening the dataset file...")
exit()
# rename the columns of the data frame
df1.rename(
columns={
"HUNDENAME": "name",
"GEBURTSJAHR_HUND": "birth_year",
"GESCHLECHT_HUND": "gender",
},
inplace=True,
)
# Count the nr of births per year
nr_of_births_per_year = df1.groupby("birth_year").size()
# ====================================================================
# =============== 1. data cleaning and basic plotting ================
# ====================================================================
# task 1.1: Remove outliers and construct a ColumnDataSource from the clean DataFrame
# hint: use df.loc to remove the outliers as specified in the assignment document
# then reset the index of DataFrame before constructing ColumnDataSource
# Only use years from before 2016 and which have more than 10 births per year
# reference dataframe: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
# reference columndatasource: https://bokeh.pydata.org/en/latest/docs/reference/models/sources.html
df1 = df1.loc[df1["birth_year"] < MAXIMUM_YEAR]
nr_of_births_per_year = df1.groupby("birth_year").size()[lambda x: x > MINIMUM_VALUE]
df = pd.DataFrame(
{"Years": nr_of_births_per_year.index, "Numbers": nr_of_births_per_year.values}
)
clean_column_data_source = ColumnDataSource(
dict(x=df["Years"], y=df["Numbers"], sizes=df["Numbers"] / 20)
)
# task 1.2: construct an array of evenly spaced numbers with a stepsize of 0.1. Start and stop are the earliest and
# latest year contained in your cleaned source data. These will be your x values for the plotting of the interpolation.
# hint: use numpys linspace() function for this task
# the array should look similar to this: array([start, ... , 1999.1, 1999.2, ... , 2014.7, 2014.8, ... , end])
start_year = df["Years"].iloc[0]
end_year = df["Years"].iloc[-1] + 0.1
x_values = np.arange(start_year, end_year, 0.1, float)
# task 1.3: configure mouse hover tool
# reference: https://bokeh.pydata.org/en/latest/docs/user_guide/categorical.html#hover-tools
# your tooltip should contain 'Year' and 'Number' Take care, that only the diamond plot is affected by the hover tool.
tooltip = [("Year", "@x"), ("Number", "@y")]
# task 1.4: generate the figure for plot 1 and add a diamond glyph renderer with a size, color, and alpha value
# reference: https://bokeh.pydata.org/en/latest/docs/reference/plotting.html
# examples: https://bokeh.pydata.org/en/latest/docs/user_guide/plotting.html
# hint: For the figure, set proper values for x_range and y_range, x and y labels, plot_height, plot_width and
# title and remember to add the hovertool. For the diamond glyphs, set preferred values for size, color and alpha
# optional task: set the size of the glyphs such that they adapt it according to their 'Numbers' value
plot_1 = figure(
plot_width=1500,
plot_height=500,
title="Number of dog births per year",
tools="hover",
tooltips=tooltip,
)
plot_1.xaxis.axis_label = "Year"
plot_1.yaxis.axis_label = "Number of Dogs"
plot_1.diamond(
x="x",
y="y",
source=clean_column_data_source,
size="sizes",
color="navy",
alpha=0.8,
legend="Number of dog births per year",
)
# task 1.5: generate the figure for plot 2 with proper settings for x_range, x and y axis label, plot_height,
# plot_width and title
plot_2 = figure(plot_width=1500, plot_height=200, title="Error bars")
plot_2.yaxis.axis_label = "Fitting Error"
# ======================================================================
# ============= 2. Plotting fitting curves and error bars ==============
# ======================================================================
# task 2.1: Perform a piecewise linear interpolation
# hint: this can be achieved in two ways: either with scipys interp1d solution or with a bokeh line plot
plot_1.line(
x="x",
y="y",
source=clean_column_data_source,
line_width=2,
color="green",
alpha=0.8,
legend="Piecewise linear interpolation",
)
# task 2.2: draw fitting lines in plot 1 and error bars in plot 2 using the following for loop. You should fit curves
# of degree 2, 4 and 6 to the points. The range of the for loop is already configured this way.
for i in range(2, 7, 2):
# degree of each polynomial fitting
degree = i
# color used in both fitting curves and error bars
color = RdYlBu[11][4 + degree]
# hint: use numpys polyfit() to calculate fitting coefficients and polyval() to calculate estimated y values for
# the x values you constructed before.
fitting_coefficient = np.polyfit(df["Years"], df["Numbers"], i)
fitting_curve = np.polyval(fitting_coefficient, x_values)
# hint: construct new ColumnDataSource for fitting curve, x should be the constructed x values and
# y should be the estimated y. Then draw the fitting line into plot 1, add proper legend, color, line_width and
# line_alpha
fitting_curve_column_data_source = ColumnDataSource(
dict(x=x_values, y=fitting_curve)
)
legend = "Polynomial Least-Squares Interpolation: Fitting Degree = " + str(i)
plot_1.line(
x="x",
y="y",
source=fitting_curve_column_data_source,
line_width=2,
line_alpha=0.8,
color=color,
legend=legend,
)
# draw the error bars into plot 2
# hint: calculate the fitting error for each year by subtracting the original 'Numbers' value off your cleaned
# source from the estimated y values. Be careful to match the correct y estimation to the respective 'Numbers'
# value! For the subsampling look up array slicing for numpy arrays. Use the absolute values of the errors to only
# get error bars above the baseline.
y_estimate = fitting_curve[0 : len(x_values) : 10]
error = abs(df["Numbers"] - y_estimate)
error_data_source = ColumnDataSource(dict(x=df["Years"], y=error))
# hint: before plotting, make sure the bars don't overlap each other, i.e. slightly adjust the x position for each
# bar within each loop cycle
x_pos = 0.1 * (i / 2 - 1)
plot_2.vbar(
x=dodge("x", x_pos),
top="y",
source=error_data_source,
width=0.09,
alpha=0.8,
color=color,
)
# task 2.3: draw a 6th degree smooth polynomial interpolation into plot 1
# hint 1: since most good math libraries use the least square method or similarly stable interpolation approaches you
# have to do this manually. Have a look at the lecture slides DVA 02 page 22 and use numpys (multi dimensional) array
# functions to solve the equations. However, if you find a library/package that calculates the interpolation with the
# method shown in the lecture, you are welcome to use it.
# hint 2: Use the entries 0, 3, 6, 9, 12, 15 of the original column data source
# start by constructing your x and y values from the source
degree = 6
x_val = clean_column_data_source.data["x"][0:16:3]
y_val = clean_column_data_source.data["y"][0:16:3]
# generate a 6x6 matrix filled with 1
V = np.ones((6, 6))
# use array slicing and the power function to fill columns 2-6 of the matrix with the correct values
for i in range(0, degree - 1):
V[:, i] = np.power(x_val, degree - 1 - i)
# solve the equation from the slides with the correct numpy functions
coefficients = np.linalg.solve(V, y_val)
# use polyval and the x values from task 2.1 together with the calculated coefficients to estimate y values and then
# plot the result into plot 1
smooth_fitting_curve_y = np.polyval(coefficients, x_values)
smooth_fitting_curve = ColumnDataSource(dict(x=x_values, y=smooth_fitting_curve_y))
plot_1.line(
x="x",
y="y",
source=smooth_fitting_curve,
legend="Smooth Polynomial Interpolation, Fitting Degree = 6",
color="blue",
)
# draw the error bars for this polynomial again into plot 2
y_estimate = smooth_fitting_curve_y[0 : len(x_values) : 10]
error = abs(df["Numbers"] - y_estimate)
error_data_source = ColumnDataSource(dict(x=df["Years"], y=error))
# move the x positions such that the new bars are to the right of the previous ones
x_pos = 0.1 * (8 / 2 - 1)
plot_2.vbar(
x=dodge("x", x_pos),
top="y",
source=error_data_source,
width=0.09,
alpha=0.8,
color="blue",
)
# set up the position of legend in plot 1 as you like
plot_1.legend.location = "top_left"
# ==============================================
# ================= dashboard ==================
# ==============================================
# put all the plots into one layout
# reference: https://bokeh.pydata.org/en/latest/docs/user_guide/layout.html
# fill in the function
dashboard = layout([[plot_1], [plot_2]])
show(dashboard)
| true |
4d9a33d8ada159a001db08b084de3f9452e70f90 | Python | xiewendan/algorithm | /leetcode/00 common.py | UTF-8 | 197 | 2.703125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# __author__ = xiaobao
# __date__ = 2019/11/14 12:01:42
# desc: 用于描述常用模块
import sys
print(sys.maxsize) # 正无穷大
print(-sys.maxsize) # 负无穷大 | true |
24ef6f6a988dc81cbeb6f828f34dee12ebc42b40 | Python | salimregragui/python_poker | /player.py | UTF-8 | 11,489 | 3.65625 | 4 | [] | no_license | import deck
class Player:
"""Player Class"""
def __init__(self, money, name):
self.hand = []
self.money = money
self.name = name
self.state = "Playing"
self.status = ""
self.current_call = 0
def calling(self, money_to_call):
if self.money - money_to_call > 0:
self.money = self.money - money_to_call
return "Called"
elif self.money - money_to_call == 0:
self.money = self.money - money_to_call
return "All in"
else:
return "Invalid"
def raising(self, money_bet, money_to_raise):
if self.money - money_to_raise >= 0:
self.money = self.money - money_to_raise
return ("Raised", money_to_raise)
else:
self.calling(money_bet)
def folding(self):
self.hand = []
self.state = "Folded"
def checking(self):
return "Checked"
def pay_small_blind(self, value):
if self.money - value >= 0:
self.money -= value
return value
else:
self.folding()
def pay_big_blind(self, value):
if self.money - value >= 0:
self.money -= value
return value
else:
self.folding()
def show_hand(self, state):
status = "NONE" if self.status == "" else self.status
if state == "Hidden" or status == "Folded": #if we are not in a showdown or the player has folded we don't show his cards
print("------- -------")
print(f"| | | | PLAYER : {self.name}")
print(f"| | | | MONEY : {self.money}")
print(f"| | | | STATUS : {status}")
print("------- -------")
elif state == "Shown" and status != "Folded": #if we are in a showdown and the player hasn't folded we show his cards
card1 = f"{self.hand[0].value} " if self.hand[0].value < 10 else f"{self.hand[0].value}"
card2 = f"{self.hand[1].value} " if self.hand[1].value < 10 else f"{self.hand[1].value}"
cards = [card1, card2]
for i in range(2): #for each card from the two if it's an 11 a 12 a 13 or a 1 we turn it to it's showing value
if(cards[i] == "11"):
cards[i] = "J "
elif(cards[i] == "12"):
cards[i] = "Q "
elif(cards[i] == "13"):
cards[i] = "K "
elif(cards[i] == "1 "):
cards[i] = "A "
print("------- -------")
print(f"|{cards[0]} | |{cards[1]} | PLAYER : {self.name}")
print(f"| {self.hand[0].suit[0]} | | {self.hand[1].suit[0]} | MONEY : {self.money}")
print(f"| {cards[0]}| | {cards[1]}| STATUS : {status}")
print("------- -------")
def hand_power(self, table):
if self.status != "Folded":
list_of_cards = self.hand + table
suits = [suit.suit for suit in list_of_cards] #we get all the suits of the cards
values = [value.value for value in list_of_cards] #we get all the values of the card
best_hand = [] #best hand of the player
values.sort() #we sort the values
#booleans to check wich hand the player has
check_brelan = False
check_pair = False
check_four_kind = False
check_double_pair = False
check_full_house = False
check_flush = False
check_straight = False
check_royal_straight = False
check_straight_flush = False
check_royal_flush = False
check_pair_value = 0
straight_list = list(dict.fromkeys(values)) #removing duplicates from the values
if straight_list[0] == 1: #as the As can be both one and 14 we add it to the list
straight_list.append(14)
straight_counter = 1 #the counter of followed numbers
current_number = straight_list[0] #the number that we check
best_hand.append(current_number)
for i in range(len(straight_list)):
if straight_counter == 5 and straight_list[i] != current_number + 1: #if we found 5 followed numbers
break
elif straight_list[i] == current_number + 1 and i != 0: #if the number is equal to +1 the number before (exp 2=> 3)
straight_counter += 1
current_number = straight_list[i]
best_hand.append(current_number)
elif straight_list[i] != current_number + 1:#if the number is different from the number before +1 exp(2=>4)
straight_counter = 1
current_number = straight_list[i]
best_hand.clear()
best_hand.append(current_number)
if straight_counter >= 5 and current_number != 14:
check_straight = True
elif straight_counter >= 5 and current_number == 14:
check_royal_straight = True
elif straight_counter < 5:
best_hand = []
if check_royal_straight: #if we have a possible royal straight we check the color of the suit
royal_flush_check = [card for card in list_of_cards]
royal_check_counter = 1
color_check = max(suits, key=suits.count) #checking the most predominant color
for card in reversed(royal_flush_check):
if card.value == 10 and card.suit == color_check or card.value == 11 and card.suit == color_check or card.value == 12 and card.suit == color_check or card.value == 13 and card.suit == color_check:
royal_check_counter += 1
if royal_check_counter >= 5:
check_royal_flush = True
else:
check_royal_flush = False
check_straight = True
#checking if it's a straight flush
if check_straight:
straight_flush_check = [card for card in list_of_cards]
straight_flush_check = sorted(straight_flush_check, key=lambda card: card.value)
straight_flush_counter = 0
current_straight_num = straight_list[0]
color_check = max(suits, key=suits.count) #checking the most predominant color
for i in range(len(straight_flush_check)):
if straight_flush_counter == 5 and straight_flush_check[i].value != current_straight_num + 1: #if we found 5 followed numbers
break
elif straight_flush_check[i].value == current_straight_num + 1 and straight_flush_check[i].suit == color_check and i != 0: #if the number is equal to +1 the number before (exp 2=> 3)
straight_flush_counter += 1
current_straight_num = straight_flush_check[i].value
elif straight_flush_check[i].value != current_straight_num + 1:
straight_flush_counter = 0
current_straight_num = straight_flush_check[i].value
if straight_flush_counter >= 5:
check_straight_flush = True
#checking if it's a flush
for j in range(len(suits)):
if suits.count(suits[j]) == 5:
check_flush = True
flush_color = suits[j]
if check_flush:
for i,card in enumerate(list_of_cards):
if(card.suit == flush_color):
best_hand.append(card.value)
if not best_hand:
check_hand = True
else:
check_hand = False
for i in range(len(values)):
if values.count(values[i]) == 2 and check_pair_value == 0:
check_pair = True
check_pair_value = values[i]
if check_hand and not best_hand:
best_hand.append(values[i])
best_hand.append(values[i])
elif values.count(values[i]) == 2 and check_pair_value != values[i]:
check_double_pair = True
if check_hand and len(best_hand) == 2:
best_hand.append(values[i])
best_hand.append(values[i])
elif values.count(values[i]) == 3 and not check_pair:
check_brelan = True
if check_hand and not best_hand:
best_hand.append(values[i])
best_hand.append(values[i])
best_hand.append(values[i])
elif values.count(values[i]) == 4:
check_four_kind = True
if check_hand and not best_hand:
best_hand.append(values[i])
best_hand.append(values[i])
best_hand.append(values[i])
best_hand.append(values[i])
elif values.count(values[i]) == 3 and check_pair:
check_full_house = True
if check_hand and len(best_hand) == 2:
best_hand.append(values[i])
best_hand.append(values[i])
best_hand.append(values[i])
elif values.count(values[i]) == 2 and check_brelan:
check_full_house = True
if check_hand and len(best_hand) == 3:
best_hand.append(values[i])
best_hand.append(values[i])
if self.hand[0].value not in best_hand:
best_hand.append(self.hand[0].value)
if self.hand[1].value not in best_hand:
best_hand.append(self.hand[1].value)
best_hand = [14 if x == 1 else x for x in best_hand]
if check_royal_flush:
return ("Royal Flush", 1.0 * 100, best_hand)
elif check_straight_flush:
return ("Straight Flush", 0.9 * 100, best_hand)
elif check_four_kind:
return ("Four of a kind", 0.8 * 100, best_hand)
elif check_full_house:
return ("Full House", 0.7 * 100, best_hand)
elif check_flush:
return ("Flush", 0.6 * 100, best_hand)
elif check_straight:
return ("Straight", 0.5 * 100, best_hand)
elif check_brelan:
return ("Three of a kind", 0.4 * 100, best_hand)
elif check_double_pair:
return ("Double Pair", 0.3 * 100, best_hand)
elif check_pair:
return ("Pair", 0.2 * 100, best_hand)
else:
best = [14 if x.value == 1 else x.value for x in self.hand]
best_hand.clear()
best_hand.append(max(best))
return ("High Card", 0.1 * 100, best_hand)
else:
return 0
| true |
d175fc282eeb2cf90d03af361cb4942a448d738d | Python | masatomix/ai-samples | /samples/nlp/MecabFacade.py | UTF-8 | 2,604 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import MeCab
from logging import getLogger
log = getLogger(__name__)
class MecabFacade(object):
"""
MecabのFacade。実行結果を表形式で返すメソッドを提供
https://www.masatom.in/pukiwiki/%BC%AB%C1%B3%B8%C0%B8%EC%BD%E8%CD%FD/%B7%C1%C2%D6%C1%C7%B2%F2%C0%CF%A5%A8%A5%F3%A5%B8%A5%F3Mecab/Python%A4%AB%A4%E9%B8%C6%A4%D3%BD%D0%A4%B9/
"""
def __init__(self, options='-Ochasen', padding=True):
"""
:param options: Chasenのオプションを設定
:param padding: 空文字で埋めるかどうか
"""
self.__options = options
self.__results = []
self.__padding = padding
def parse(self, text):
"""
処理したい文章を受け取って、Mecabでパースする
:param text: 対象文字列
:return: this
"""
# m = MeCab.Tagger("-Ochasen -d /usr/lib/mecab/dic/mecab-ipadic-neologd")
m = MeCab.Tagger(self.__options)
m.parse('') # なんだかおまじないらしい。
node = m.parseToNode(text)
node = node.next
results = []
while node:
word = []
word.append(node.surface)
featuresTmp = node.feature.split(",")
if self.__padding:
features = self.padding(featuresTmp, 9, '') # 9列に満たない場合は、''で埋める
else:
features = featuresTmp
word.extend(features)
results.append(word)
log.debug(f'{node.surface}, {node.feature}')
node = node.next
results.pop() # 最後はいらないので除去
self.__results = results
return self
def results(self):
return self.__results
def padding(self, inputArray, length, padding):
"""
引数の長さより小さい配列について、配列の後ろにpaddingの文字をAppendする
長さが、引数の配列の報が大きい場合、lengthの長さに短くされた配列を返す。
:param inputArray: 長さが足りない配列
:param length: 求める長さ
:param padding: 求める長さまで、後ろをPaddingする時の文字列
:return: Paddingされた配列。
"""
results = []
count = 0
while count < length:
if count < len(inputArray):
results.append(inputArray[count])
else:
results.append(padding)
count = count + 1
return results
| true |
624fff85e6872f08a56420423bb8b2f71d6c22a3 | Python | majorbriggs/python_training | /decorators_tags.py | UTF-8 | 1,026 | 4 | 4 | [] | no_license | from functools import wraps
def tags(tag_name):
def tags_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
return "<{tag_name}>{result}</{tag_name}>".format(tag_name=tag_name, result=result)
return wrapper
return tags_decorator
@tags('div')
@tags('p')
@tags('span')
@tags('strong')
def greet(name):
return "Hello " + name
print(greet("My Friend"))
def outer_decorator(some_parameter):
def real_decorator(func):
def wrapper(*args, **kwargs):
print("Decorator got the parameter: {}".format(some_parameter))
print("Decorated function starts")
result = func(*args, **kwargs)
print("Decorated function finished.")
return result
# inner function uses the parameter from the outer-most function
return wrapper
return real_decorator
@outer_decorator("Some parameter")
def test_step():
print("Do something")
test_step()
| true |
0717b3e13c1d80dbbd2397fce002c89536d6f1d1 | Python | ctensmeyer/formCluster | /preprocessing/image/line_detect_lib.py | UTF-8 | 7,371 | 2.890625 | 3 | [] | no_license |
import Image
from collections import deque
import sys
# Enum
START = 0
BETWEEN = 1
CONSEC = 2
GAP = 3
LABELING = 4
END = 5
# Constants
BLACK = 0
WHITE = 255
# Configurations
CONSEC_THRESHOLD = 70
MAX_GAP = 3
COLOR_LABEL = (0, 255, 0)
SMOOTH_KERNEL = 7
class CC:
def __init__(self, im, label, coords):
self.im = im
self.label = label
self.coords = coords
self.bounding_box = self.calc_bb()
def calc_bb(self):
maxx = 0
minx = 10e5
maxy = 0
miny = 10e5
for p in self.coords:
maxx = max(maxx, p[0])
minx = min(minx, p[0])
maxy = max(maxy, p[1])
miny = min(miny, p[1])
ul = (minx, miny)
lr = (maxx, maxy)
return (ul, lr)
def get_size(self):
ul, lr = self.bounding_box
return lr[0] - ul[0] + 1, lr[1] - ul[1] + 1
def make_mask(self):
size = self.get_size()
ul, lr = self.bounding_box
im = Image.new("L", size, "black")
pix = im.load()
for p in self.coords:
#print p, ul, size
pix[p[0] - ul[0], p[1] - ul[1]] = 255
self.mask = im
def display(self):
print "CC %s: size %d\tbounding box %s" % (self.label, len(self.coords), self.bounding_box)
def label_component(x, y, pix, mask, label):
pix_queue = deque([(x, y)])
pix[x, y] = label
cc_coords = []
while pix_queue:
x, y = pix_queue.popleft()
for _x in [-1, 0, 1]:
for _y in [-1, 0, 1]:
__x = x + _x
__y = y + _y
try:
if mask[__x, __y] not in [BLACK, WHITE]:
print __x, __y, mask[__x, __y]
if pix[__x , __y] != label and mask[__x, __y] == BLACK:
pix[__x, __y] = label
cc_coords.append( (__x, __y) )
pix_queue.append( (__x, __y) )
except:
pass
return cc_coords
def find_ccs(im):
pix = im.load()
mut = Image.new('I', im.size, color='black') # assign all 0s
mut_pix = mut.load()
cur_idx = 1
ccs = list()
for x in xrange(im.size[0]):
for y in xrange(im.size[1]):
if pix[x, y] == BLACK and mut_pix[x, y] == BLACK:
coords = label_component(x, y, mut_pix, pix, cur_idx)
cc = CC(mut, cur_idx, coords)
ccs.append(cc)
cur_idx += 1
return mut, ccs
def color_components(orig, ccs, colors):
n = len(colors)
#mut = Image.new('RGB', o.size, color='white')
mut = orig.convert('RGB')
pix = ccs.load()
mut_pix = mut.load()
for x in xrange(orig.size[0]):
for y in xrange(orig.size[1]):
if pix[x, y] != 0:
mut_pix[x, y] = colors[ pix[x, y] % n]
return mut
def get_value(pix, x, y):
try:
return pix[x, y]
except:
return 0
def neighbors(pix, x, y):
vals = []
for _x in [-1, 0, 1]:
for _y in [-1, 0, 1]:
vals.append(get_value(pix, x + _x, y + _y))
return vals
def nebs(x, y, x_size, y_size, pix, mask):
vals = []
for _x in xrange(-1 * x_size / 2, (x_size + 1) / 2):
for _y in xrange(-1 * y_size / 2, (y_size + 1) / 2):
__x = x + _x
__y = y + _y
try:
if mask[__x, __y] == BLACK:
vals.append(pix[__x, __y])
except:
pass
return vals
def smooth_line_data(im, horz_lines):
'''
Propagates line data to adjacent pixels
'''
copy = horz_lines.copy()
pix = im.load()
line_pix = horz_lines.load()
copy_pix = copy.load()
for x in xrange(im.size[0]):
for y in xrange(im.size[1]):
if pix[x, y] == BLACK and copy_pix[x, y] <= CONSEC_THRESHOLD:
if max(neighbors(copy_pix, x, y)) > CONSEC_THRESHOLD:
line_pix[x, y] = CONSEC_THRESHOLD + 1
def median(arr):
'''
returns median of the list
'''
sorts = sorted(arr)
n = len(sorts)
if n % 2 == 0:
return (sorts[n / 2] + sorts[n / 2 - 1]) / 2.0
return sorts[n / 2]
def smooth_median(im, lines):
'''
smooths the line data by performing a median filter over it
using im as a mask
'''
copy = lines.copy()
pix = im.load()
line_pix = lines.load()
copy_pix = copy.load()
for x in xrange(im.size[0]):
for y in xrange(im.size[1]):
if pix[x, y] == BLACK and copy_pix[x, y] <= CONSEC_THRESHOLD:
neb_vals = nebs(x, y, 20, 7, copy_pix, pix)
new_val = median(neb_vals)
line_pix[x, y] = new_val
def label_horz_lines(im):
'''
Takes an Image (thresholded) and returns an 'I' image with the horizontal
consecutive regions labeled
'''
im = im.convert('1')
width, height = im.size
mut = Image.new('I', im.size, color='black') # assign all 0s
pix_orig = im.load()
pix_mut = mut.load()
x = 0 # iterator index
_x = 0 # marks start of consecutive region
y = 0 # iterator index
gapped = 0
consec = 0
for y in xrange(height):
state = START
x = 0
while state != END:
#print "(%d, %d)" % (x, y)
if state == START:
# decide which state to go to
if pix_orig[x, y] == BLACK:
consec = 0
state = CONSEC
_x = x
elif pix_orig[x, y] == WHITE:
state = BETWEEN
else:
print "pix_orig[%d, %d] = %s" % (x, y, pix_orig[x, y])
assert(False)
elif state == BETWEEN:
# burn through pixels
if pix_orig[x, y] == BLACK:
state = CONSEC
consec = 0
_x = x # record the start of a new consecutive block
elif pix_orig[x, y] == WHITE:
x += 1
if x >= width:
state = END
else:
print "pix_orig[%d, %d] = %s" % (x, y, pix_orig[x, y])
assert(False)
elif state == CONSEC:
# burn through pixels, detect gap and end
if pix_orig[x, y] == BLACK:
consec += 1
x += 1
if x >= width:
state = LABELING
elif pix_orig[x, y] == WHITE:
gapped = 0
state = GAP
else:
print "pix_orig[%d, %d] = %s" % (x, y, pix_orig[x, y])
assert(False)
elif state == GAP:
# go for a few pixels to see if we get BLACK again
if pix_orig[x, y] == BLACK:
state = CONSEC
elif pix_orig[x, y] == WHITE:
x += 1
gapped += 1
if x >= width or gapped > MAX_GAP:
state = LABELING
else:
print "pix_orig[%d, %d] = %s" % (x, y, pix_orig[x, y])
assert(False)
elif state == LABELING:
for __x in xrange(_x, x):
if pix_orig[__x, y] == BLACK:
pix_mut[__x, y] = consec
if x >= width:
state = END
else:
state = START
elif state == END:
continue # exit condition
else:
print "State is %d" % state
assert(False)
# end loops
return mut
def paint_lines(canvas, lines):
'''
:param canvas: Image 'RGB' to paint on. This is modified
:param lines: Image 'I' line data. This is not modified
'''
pix_canvas = canvas.load()
pix_lines = lines.load()
for x in xrange(lines.size[0]):
for y in xrange(lines.size[1]):
if pix_lines[x, y] > CONSEC_THRESHOLD:
pix_canvas[x, y] = COLOR_LABEL
def line_detect(im, thresh=None, noise_gap=None, color=None):
'''
Performs rudimentary line detection
:param im: binary Image(L), not modified
:param thresh: int minimum line length
:param noise_gap: int minimum number of pixels between separate contiguous regions
:param color: color to mark the lines in the returned Image
'''
if thresh is not None:
CONSEC_THRESHOLD = thresh
if noise_gap is not None:
MAX_GAP = noise_gap
if color is not None:
COLOR_LABEL = color
im = im.copy()
rotated = im.rotate(90)
horz_lines = label_horz_lines(im)
vert_lines = label_horz_lines(rotated)
print "raw lines done"
#for x in xrange(1):
# smooth_median(rotated, vert_lines)
# smooth_median(im, horz_lines)
# print "done with round of median filter"
vert_lines = vert_lines.rotate(270)
canvas = im.convert('RGB')
paint_lines(canvas, horz_lines)
paint_lines(canvas, vert_lines)
return canvas
| true |
c06a32dcb3e5d91f4aa722270de0cd6829a80e7d | Python | ozkriff/misery | /misery/ast.py | UTF-8 | 3,127 | 2.5625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # -*- coding: utf-8 -*-
# See LICENSE file for copyright and license details
'''
Asbtract Syntax Tree
'''
from misery import (
misc,
)
class Module(object):
def __init__(
self,
import_list=None,
decl_list=None,
):
self.import_list = misc.tolist(import_list)
self.decl_list = misc.tolist(decl_list)
self.ident_list = None
class Ident(object):
def __init__(self, name):
self.name = name
class Field(object):
def __init__(self, name, datatype):
self.name = name
self.datatype = datatype
class ConstDecl(object):
def __init__(self, name, datatype, expr):
self.name = name
self.datatype = datatype
self.expr = expr
class ClassDecl(object):
def __init__(self, name, field_list=None, decl_list=None):
self.name = name
self.field_list = misc.tolist(field_list)
self.decl_list = misc.tolist(decl_list)
class Number(object):
def __init__(self, value):
self.value = value
self.binded_var_name = None
class String(object):
def __init__(self, value):
self.value = value
self.binded_var_name = None
class FuncDecl(object):
def __init__(self, name, signature, body=None):
self.name = name
self.signature = signature
self.body = misc.tolist(body)
# TODO: move to ast.Block ...
self.vars = {}
self.tmp_vars = {}
self.constants = {}
class FuncSignature(object):
def __init__(
self,
param_list=None,
generic_param_list=None,
return_type=None,
):
self.param_list = misc.tolist(param_list)
self.generic_param_list = misc.tolist(generic_param_list)
self.return_type = return_type
class Param(object):
def __init__(self, name, datatype):
self.name = name
self.datatype = datatype
class FuncCall(object):
def __init__(self, expr, arg_list=None):
self.called_expr = expr
self.arg_list = misc.tolist(arg_list)
self.binded_var_name = None
class VarDecl(object):
def __init__(
self,
name,
expr=None,
datatype=None,
allocate_memory_on_stack=False,
):
self.name = name
self.rvalue_expr = expr
self.datatype = datatype
self.allocate_memory_on_stack = allocate_memory_on_stack
class Assign(object):
def __init__(
self,
name,
expr=None,
datatype=None,
):
self.name = name
self.rvalue_expr = expr
self.datatype = datatype
class If(object):
def __init__(
self,
condition,
branch_if,
branch_else=None,
):
self.condition = condition
self.branch_if = branch_if
self.branch_else = branch_else
class For(object):
def __init__(self, condition, branch):
self.condition = condition
self.branch = branch
class Return(object):
def __init__(self, expr=None):
self.expr = expr
# vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
| true |
62ae332a7b51185f9b995d8f2ff5a54e6513b14b | Python | kautsiitd/Competitive_Programming | /CodeChef/Long/August 2017/HILLJUMP.py | UTF-8 | 576 | 2.609375 | 3 | [] | no_license | n,q = map(int,raw_input().split())
a = map(int,raw_input().split())
for _ in range(q):
Q = map(int,raw_input().split())
if Q[0] == 1:
current,k = Q[1]-1,Q[2]
lastBig = a[current]
lastBigIndex = current
while(current<n and k>0 and current - lastBigIndex <= 100):
if a[current] > lastBig:
lastBig = a[current]
k-=1
lastBigIndex = current
current += 1
print lastBigIndex+1
else:
l,r,x = Q[1:]
for i in range(l-1,r):
a[i] += x
| true |
4c757a960ce6ff776062bf6d969b8dd0a55c823d | Python | manxisuo/MachineLearningLibDIY | /algo.py | UTF-8 | 1,637 | 2.875 | 3 | [] | no_license | # encoding: utf-8
from typing import Tuple
from time import time
import numpy as np
from numpy import ndarray
from tool import CachedFunc
class History:
"""表示模型训练过程的历史记录"""
def __init__(self, loss_list, consuming_time):
self.loss_list = loss_list # 每次迭代后的损失函数的值的列表
self.consuming_time = consuming_time # 训练耗时
def bgd(X: ndarray, y: ndarray, _loss, _gradient_of_loss,
alpha=0.01, num_iteration: int = None, epsilon: float = None,
show_process=False, save_history=False) -> Tuple[ndarray, History]:
"""批量梯度下降。"""
start = time()
theta = np.zeros(X.shape[1]) # 模型参数初始化
loss_list = [] # 每次迭代后的损失的列表
k = 0 # 迭代次数
previous_loss = _loss(theta, X, y) # 前一次的损失
func_loss = CachedFunc(_loss) # 为了减少loss的计算次数
while True:
theta = theta - _gradient_of_loss(theta, X, y) * alpha
# 检查迭代次数条件是否满足
k += 1
if num_iteration and 0 < num_iteration < k:
break
# 检查损失下降条件是否满足
if epsilon and 0 < epsilon:
if abs(func_loss(theta, X, y) - previous_loss) <= epsilon:
break
previous_loss = func_loss(theta, X, y)
if show_process:
print(f'epoch: {k}, loss: {func_loss(theta, X, y)}, theta: {theta}')
if save_history:
loss_list.append(func_loss(theta, X, y))
func_loss.reset()
return theta, History(loss_list, time() - start)
| true |
01193520f4ca41afc35c5bd4e34b2f1ccaf42527 | Python | VireshDoshi/pd_test | /PerspectumDiagnostics.py | UTF-8 | 2,721 | 3.953125 | 4 | [] | no_license | #!/usr/bin/env python
import itertools
def strings_appear_in_multiple_lists(list_in):
""" This method will print out the list of Strings appearing in multiple
Lists.
Input: List of Lists ( 1..n)
Output: String
"""
# establish the size of the list
list_in_len = len(list_in)
# set the display_string
display_string = ''
final_list = []
for b in range(0,list_in_len -1):
for a in range(b,list_in_len-1):
new_list = [ x for x in list_in[b] if x in list_in[a + 1] ]
final_list.append(new_list)
# remove unwated empty lists
filtered_final_list = [ x for x in final_list if x]
for list in filtered_final_list:
# display_string = "'".join(map(str,list))[1:-1]
display_string = display_string + str(list)[1:-1] + ','
# strip out the last comma
display_string = display_string.rstrip(',')
# print "Strings appearing in multiple lists: {0}".format(display_string)
return display_string
def number_of_unique_strings(list_in):
"""
This method will return the number of unique strings in the list given
:param list_in:
:return: integer of unique strings
"""
# lets concatenate all the values into one large list for processing
merged_list = list(itertools.chain(*list_in))
# establish the length of items in the set
merged_set_len = len(set(merged_list))
# print "number of unique strings: {0}".format(merged_set_len)
return merged_set_len
def total_number_of_strings_processed(list_in):
"""
This method will return the number of strings processed
:param list_in:
:return: integer of strings processed
"""
# lets concatenate all the values into one large list for processing
merged_list = list(itertools.chain(*list_in))
merged_len = len(merged_list)
# print "Total number of strings processed {0}".format(merged_len)
return merged_len
def perspectum_diagnostics_test(list_in):
"""
This method displays the output to the screen based on the list
:param list_in:
:return: print output to the screen
"""
print "Strings appearing in multiple lists: {0}".format(strings_appear_in_multiple_lists(list_in))
print "number of unique strings: {0}".format(number_of_unique_strings(list_in))
print "Total number of strings processed {0}".format(total_number_of_strings_processed(list_in))
if __name__ == '__main__':
test_list_1 = [['a','b','c','dh'],['a','d','ha','e'],['f','g','h'],['c'],['dh'],['h','ha'],['e'],['d']]
test_list_2 = [['g', 'gh', 'ghj', 'g'], ['j', 'ju', 'gh', 'gk', 'gn']]
test_list_3 = [['a'],['f'],['f']]
perspectum_diagnostics_test(test_list_3)
| true |
7f9421337bb1cb24b9b79b9fe14e0621faf2d8ac | Python | Jalabre1995/PasswordChecker | /checkmypass.py | UTF-8 | 1,690 | 3.515625 | 4 | [] | no_license | import requests
import hashlib
import sys
#API key for pwnedpasswords.com. The api is working when it runs a 200 in the command line, but if not then raise a runtimeError
def request_api_data(query_char):
url = 'https://api.pwnedpasswords.com/range/' + query_char
res = requests.get(url)
if res.status_code != 200:
raise RuntimeError(f'Error fetching: {res.status_code}, check the api and try again')
return res
#Create a function where we keep count of how many times the password has been leaked.
def get_password_leaks_count(hashes, hash_to_check):
hashes = (line.split(':') for line in hashes.text.splitlines())
for h, count in hashes:
if h == hash_to_check:
return count
def read_res(response):
print(response.text)
#using hashlib module to make sure password is hash from the last 5 character to the first 5 characters.
def pwned_api_check(password):
sha1password = hashlib.sha1(password.encode('utf-8')).hexdigest().upper()
first5_char, tail = sha1password[:5], sha1password[5:]
response = request_api_data(first5_char)
print(response)
return get_password_leaks_count(response, tail)
#import sys to run the arguments above in a for loop and check the passwords and count. This will return an response if the count was successful or it needs to be changed.
def main(args):
for password in args:
count = pwned_api_check(password)
if count:
print(f'{password} was found {count} times.... you should change it')
else:
print(f'{password} was not found. Great Security')
return 'done!'
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | true |
3ad9a37e9e171f8ce39f37f8c3144b8607c507e5 | Python | Aasthaengg/IBMdataset | /Python_codes/p02743/s099779834.py | UTF-8 | 155 | 2.734375 | 3 | [] | no_license | from decimal import *
import math
getcontext().prec=1000
a,b,c=map(int,input().split())
if a+b+2*Decimal(a*b).sqrt()<c:
print("Yes")
else:
print("No")
| true |
6e6e31ae85c91b104c6398fe84aeedb99585c379 | Python | nuonuozi/Python- | /Python_Primer/LogisticRegressionModel.py | UTF-8 | 2,846 | 3.171875 | 3 | [] | no_license | #导入相关包
import pandas as pd
import numpy as np
#创建特征列表
column_names=['Sample code number','Clump Thickness','Uniformity Cell Size','Uniformity of Cell Shape','Marginal Adhesion','Single Epithelial Cell Size','Bare Nuclei','Bland Chromatin','Normal Nucleoli','Mitoses','Class']
#从互联网读取指定数据
data=pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data',names=column_names)
#将?替换为标准缺失值表示
data=data.replace(to_replace='?',value=np.nan)
#丢失带有缺失值的数据(只要有一个维度有缺失)
data=data.dropna(how='any')
#输出data的数据量和维度
data.shape
#print(data.shape)
#分割数据
from sklearn.cross_validation import train_test_split
#随机采样25%数据用于测试,剩下的75%用于构建训练集合
X_train, X_test, y_train, y_test=train_test_split(data[column_names[1:10]],data[column_names[10]],test_size=0.25, random_state=33)
#查验训练样本的数量和类别分布
#print(y_train.value_counts())
#查验测试样本的数量和类别分布
#print(y_test.value_counts())
#使用线性分类模型从事良、恶性肿瘤预测任务
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
#标准化测试数据,保证每个维度方差为1,均值为0.使得预测结果不会被某些维度过大的特征值而主导
ss=StandardScaler()
X_train=ss.fit_transform(X_train)
X_test=ss.fit_transform(X_test)
#初始化LogisticRegression 和 SGDClassifier
lr=LogisticRegression()
sgdc=SGDClassifier()
#调用LogisticRegression中的fit函数、模块用来训练模型参数
lr.fit(X_train,y_train)
#使用训练好的模型lr对X_test进行预测,结果储存在变量lr_y_predict中
lr_y_predict=lr.predict(X_test)
#调用SGDClassifier中的fit函数、模块用来训练模型参数
sgdc.fit(X_train,y_train)
#使用训练好的模型sgdc对X_test进行预测,结果储存在变量sgdc_y_predict中
sgdc_y_predict=sgdc.predict(X_test)
#使用逻辑斯蒂回归模型自带的评分函数score获得模型在测试集上的准确性结果
print('Accurary of LR Classifier:',lr.score(X_test,y_test))
from sklearn.metrics import classification_report
#利用classification_report 模块获得LogisticRegression其他三个指标的结果
#print(classification_report(y_test,lr_y_predict,target_names=['Benign','Malignant']))
#使用随机梯度下降模型自带的评分函数score获得模型在测试集上的准确性的结果
print('Accuarcy SGD Classifier:',sgdc.score(X_test,y_test))
#利用classification_report模块SGDClassifier其他三个指标的结果
print(classification_report(y_test,sgdc_y_predict,target_names=['Benign','Malignant']))
| true |
ae8b2127a1eb26bc78f417d0a8ba665ce0776835 | Python | ericsperanza/ST | /WarmColdDiffBZ.py | UTF-8 | 6,053 | 2.671875 | 3 | [] | no_license | # WarmColdDiff.py
from __future__ import print_function
import openpyxl
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import matplotlib.dates as mdates
from scipy import interpolate
from scipy import signal
from scipy.stats import ttest_ind
from scipy.stats import pearsonr
import numpy as np
import pandas as pd
# importo datos de excel y paso valores al array arnum
libro = openpyxl.load_workbook('compWC.xlsx')
hoja = libro.get_sheet_by_name('h1')
ar = pd.read_excel('compWC.xlsx','h1', header=0, index_col=None, na_values=['NA'])
arnum=ar.values
# cuento cuantas muestras hay en BZ y N
sizeBZ=0
for a in arnum[:,0]:
if a == 'BZ':
sizeBZ+=1
sizeN=0
for a in arnum[:,0]:
if a == 'N':
sizeN+=1
# separo los datos calidos y frios para BZ y N (WBZ,CBZ, WN, CN)
WBZ = np.empty((0,3))
CBZ = np.empty((0,3))
for row in range(sizeBZ):
if arnum[row,1] == 'calido':
WBZ = np.append(WBZ,[arnum[row,2:5]], axis=0)
else:
CBZ= np.append(CBZ,[arnum[row,2:5]], axis=0)
WN = np.empty((0,3))
CN = np.empty((0,3))
for row in range(sizeBZ,(sizeBZ+sizeN)):
if arnum[row,1] == 'calido':
WN = np.append(WN,[arnum[row,2:5]],axis=0)
else:
CN = np.append(CN,[arnum[row,2:5]],axis=0)
# paso el coprostanol de BZ a mg/g para no usar valores tan altos
rows, cols = WBZ.shape
for a in range(rows):
WBZ[a,1] = WBZ[a,1]/1000
print (WBZ[:,1])
rows, cols = CBZ.shape
for a in range(rows):
CBZ[a,1] = CBZ[a,1]/1000
print (CBZ[:,1])
# imprimo las medias y desvios para cada estacion y variable
print ("Warm vs. Cold\nBZ:")
print ("Flux: %.2f %s %.2f vs. %.2f %s %.2f"%((np.mean(WBZ[:,0])),u"\u00b1",(np.std(WBZ[:,0],dtype=float,ddof=1)),(np.mean(CBZ[:,0])),u"\u00b1",(np.std(CBZ[:,0],dtype=float,ddof=1))))
print ("Coprostanol: %.2f %s %.2f vs. %.2f %s %.2f"%((np.mean(WBZ[:,1])),u"\u00b1",(np.std(WBZ[:,1],dtype=float,ddof=1)),(np.mean(CBZ[:,1])),u"\u00b1",(np.std(CBZ[:,1],dtype=float,ddof=1))))
print ("Total ST: %.2f %s %.2f vs. %.2f %s %.2f"%((np.mean(WBZ[:,2])),u"\u00b1",(np.std(WBZ[:,2],dtype=float,ddof=1)),(np.mean(CBZ[:,2])),u"\u00b1",(np.std(CBZ[:,2],dtype=float,ddof=1))))
print ("N:")
print ("Flux: %.2f %s %.2f vs. %.2f %s %.2f"%((np.mean(WN[:,0])),u"\u00b1",(np.std(WN[:,0],dtype=float,ddof=1)),(np.mean(CN[:,0])),u"\u00b1",(np.std(CN[:,0],dtype=float,ddof=1))))
print ("Coprostanol: %.2f %s %.2f vs. %.2f %s %.2f"%((np.mean(WN[:,1])),u"\u00b1",(np.std(WN[:,1],dtype=float,ddof=1)),(np.mean(CN[:,1])),u"\u00b1",(np.std(CN[:,1],dtype=float,ddof=1))))
print ("Total ST: %.2f %s %.2f vs. %.2f %s %.2f"%((np.mean(WN[:,2])),u"\u00b1",(np.std(WN[:,2],dtype=float,ddof=1)),(np.mean(CN[:,2])),u"\u00b1",(np.std(CN[:,2],dtype=float,ddof=1))))
# creo el fondo de la figura
fig=plt.figure(facecolor='white', figsize=(11,8))
csfont = {'fontname':'Liberation Sans'}
# agrego la serie temporal
time1 = fig.add_axes([0.1, 0.12, 0.82, 0.85])
date = arnum[0:24,5]
flux = arnum[0:24,2]
ndate = np.empty((0,1))
for i in date:
ndate = np.append(ndate, (mdates.date2num(i)))
#rep = interpolate.splrep(ndate, flux, s=0)
newx = np.linspace(ndate.min(), ndate.max(), 200)
#newy = interpolate.splev(newx,rep)
dd = mdates.num2date(newx)
akima1 = interpolate.Akima1DInterpolator(ndate, flux)
time1.plot(newx, akima1(newx), 'black', linewidth=3)
time1.plot(date, flux, 'o', color = 'black', markersize=10, markeredgecolor = 'black')
time1.xaxis.set_major_locator(mdates.MonthLocator(interval = 4))
time1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
#time1.plot(dd, newy)
plt.ylim(0,350)
plt.yticks(np.arange(0,351,100), size = 16, **csfont)
time1.set_ylabel('Vertical flux (g.$\mathregular{cm^{-2}.day^{-1}}$)', size = 22, **csfont)
fig.autofmt_xdate(rotation = 90)
time1.tick_params(axis='x', which='major', labelsize=14)
# ploteo el copr en timeserie
time2 = time1.twinx()
copr = arnum[0:24,3]
# paso el copro a mg/g en el array copr
for a in range (0,24):
copr[a] = copr[a]/1000
akima2 = interpolate.Akima1DInterpolator(ndate, copr)
time2.plot(newx, akima2(newx), 'black', linestyle='--',linewidth=3 )
time2.plot(date, copr, 'o', color = 'white', markersize=10,markeredgecolor = 'black')
plt.ylim(0,20)
plt.yticks(np.arange(0,21,8), size = 16, **csfont)
time2.set_ylabel('Coprostanol (mg.$\mathregular{g^{-1}}$)', size = 22, **csfont)
plt.xticks(size = 16, **csfont)
# agrego el boxplot
box1 = fig.add_axes([0.16, 0.60, 0.3, 0.35])
# elndarray tiene dtype=object y no sirve xa hacer bplot directo
WBZ = WBZ.astype(float)
CBZ = CBZ.astype(float)
# ploteo flujo
bprop = dict(linewidth = 2, color = 'black')
wprop = dict(linewidth = 2, linestyle = '-', color = 'black')
mprop = dict(linestyle = '-', linewidth = 2, color = 'white')
pprop = dict(marker = 'o', markeredgecolor = 'white', markerfacecolor = 'white')
cap = dict(linewidth = 2)
box1.boxplot([WBZ[:,0],CBZ[:,0]], vert=True, positions = (0.9,1.9), notch=False, patch_artist = True, showmeans = True, showfliers = False, boxprops = bprop, whiskerprops = wprop, medianprops = mprop, meanprops = pprop, capprops = cap)
plt.ylim(0,255)
plt.yticks(np.arange(0,255,125), size = 16, **csfont)
#box1.set_ylabel('Flux', size = 22)
# ploteo coprostanol
bprop2 = dict(linewidth = 2)
wprop2 = dict(linewidth = 2, linestyle = '-', color = 'black')
mprop2 = dict(linestyle = '-', linewidth = 2, color = 'black')
pprop2 = dict(marker = 'o', markeredgecolor = 'black', markerfacecolor = 'black')
cap2 = dict(linewidth = 2)
box2 = box1.twinx()
box2.boxplot([WBZ[:,1], CBZ[:,1]], notch=None, vert = True, positions = (0.9,2.2), patch_artist = True, showmeans = True, showfliers = False,boxprops = bprop2, whiskerprops = wprop, medianprops = mprop2, meanprops = pprop2, capprops = cap2)
plt.ylim(0,18)
plt.yticks(np.arange(0,18,8), size = 16, **csfont)
#box2.set_ylabel('Copr', size = 22)
plt.xticks(np.arange(4), ('','Warm','Cold',''), size =22, **csfont)
plt.setp(box1.get_xticklabels(), visible = 'True', fontsize = 22)
print("t-test BZ (flux, copr, total)")
for a in range(3):
print(ttest_ind(WBZ[:,a],CBZ[:,a]))
print("Pearson for Flux-Copr:")
print(pearsonr(flux,copr))
plt.show()
| true |
500d211b2130056f2e80339145858f5cb1aca547 | Python | tekemperor/snsync | /simplenote_sync/config.py | UTF-8 | 2,803 | 2.609375 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | """
Configuration settings for snsync
"""
# pylint: disable=W0702
# pylint: disable=C0301
import os
import collections
import configparser
class Config:
"""
Config Object
"""
def __init__(self, custom_file=None):
"""
Defult settings and the like.
"""
self.home = os.path.abspath(os.path.expanduser('~'))
# Static Defaults
defaults = \
{
'cfg_sn_username' : '',
'cfg_sn_password' : '',
'cfg_nt_ext' : 'txt',
'cfg_nt_path' : os.path.join(self.home, 'Simplenote'),
'cfg_nt_trashpath' : '.trash',
'cfg_nt_filenamelen' : '60',
'cfg_log_level' : 'info'
}
cp = configparser.SafeConfigParser(defaults)
if custom_file is not None:
self.configs_read = cp.read([custom_file])
else:
self.configs_read = cp.read([os.path.join(self.home, '.snsync')])
cfg_sec = 'snsync'
if not cp.has_section(cfg_sec):
cp.add_section(cfg_sec)
self.configs = collections.OrderedDict()
self.configs['sn_username'] = [cp.get(cfg_sec, 'cfg_sn_username', raw=True), 'Simplenote Username']
self.configs['sn_password'] = [cp.get(cfg_sec, 'cfg_sn_password', raw=True), 'Simplenote Password']
self.configs['cfg_nt_ext'] = [cp.get(cfg_sec, 'cfg_nt_ext'), 'Note file extension']
self.configs['cfg_nt_path'] = [cp.get(cfg_sec, 'cfg_nt_path'), 'Note storage path']
self.configs['cfg_nt_trashpath'] = [cp.get(cfg_sec, 'cfg_nt_trashpath'), 'Note Trash Bin Folder for deleted notes']
self.configs['cfg_nt_filenamelen'] = [cp.get(cfg_sec, 'cfg_nt_filenamelen'), 'Length of Filename']
self.configs['cfg_log_level'] = [cp.get(cfg_sec, 'cfg_log_level'), 'snsync log level']
# Dynamic Defaults
if cp.has_option(cfg_sec, 'cfg_db_path'):
self.configs['cfg_db_path'] = [cp.get(cfg_sec, 'cfg_db_path'), 'snsync database location']
else:
self.configs['cfg_db_path'] = [os.path.join(cp.get(cfg_sec, 'cfg_nt_path'), '.snsync.sqlite'), 'snsync database location']
if cp.has_option(cfg_sec, 'cfg_log_path'):
self.configs['cfg_log_path'] = [cp.get(cfg_sec, 'cfg_log_path'), 'snsync log location']
else:
self.configs['cfg_log_path'] = [os.path.join(cp.get(cfg_sec, 'cfg_nt_path'), '.snsync.log'), 'snsync log location']
def get_config(self, name):
"""
Return a config setting
"""
return self.configs[name][0]
def get_config_descr(self, name):
"""
Return a config description (future use in docs)
"""
return self.configs[name][1]
| true |
c1a524d57adab1f93d3e7c7264e74e99238803f2 | Python | najibelkihel/python-crash-course | /Chapter 4 Working with Lists - Lessons/magicians.py | UTF-8 | 1,081 | 4.21875 | 4 | [] | no_license | players = ['magic', 'lebron', 'kobe']
# use of for LOOP to apply printing to each player within player variable.
for player in players:
print(player)
# for LOOP has been defined, and each item from the 'players' loop has been
# stored in a new variable called 'player'.
# for every player in the players list
# printout each name in the players list.
# when naming the for LOOP, it's best to use a name that represents a single
# item in the list. e.g. for player in players. for dog in dogs etc.
for player in players:
print('Dear ' + player.title() + ', you\'ve been invited to our dinner')
# When creating a loop, any indented line under the loop is considered part
# of the loop. Multiple operations can thus be executed with one loop.
for player in players:
print('Dear ' + player.title() + ', you\'ve been invited to our dinner')
print('Looking forward to having you with us, ' + player.title() + '.')
# if the next operation within the loop is NOT indented, it will only be
# applied to the last item on the list (in the e.g. above: to Kobe only)
| true |
96c554bf88f06840ca97e905d0933f4ed198963e | Python | pylinx64/wen_python_16 | /wen_python_16/list_1.py | UTF-8 | 261 | 3.46875 | 3 | [] | no_license | colors=['red', 'purple', 'blue', 'orange', 'yellow']
print(colors)
print(colors[0])
print(colors[1])
print(colors[2])
print(colors[3])
i = 0
list_len = len(colors)
while i < list_len:
print(colors[i])
i = i + 1
for i in colors:
print(i)
| true |
460b02658fd4daa49b589e472205c76c173eace4 | Python | DeyanGrigorov/Python-Advanced | /Multidimensional_lists./2x2.py | UTF-8 | 452 | 3.21875 | 3 | [] | no_license | m, n = list(map(int, input().split(' ')))
matrix = [[''] * n for i in range(m)]
num_squares = 0
for i in range(m):
row = input().split(' ')
for j in range(n):
matrix[i][j] = row[j]
if i - 1 >= 0 and j - 1 >= 0:
if matrix[i][j] == matrix[i][j - 1] and \
matrix[i][j] == matrix[i - 1][j] and \
matrix[i][j] == matrix[i - 1][j - 1]:
num_squares += 1
print(num_squares) | true |
a0d1497a9d31f1eaa704ea31e9477de9222470dc | Python | SomeoneSerge/cf | /345/b.py | UTF-8 | 351 | 2.90625 | 3 | [] | no_license | # ATTENTION:
# never attend codeforces if u're drunk
n = int(input())
a = sorted([int(x) for x in input().split()])
c = dict()
for x in a: c[x] = c[x]+1 if x in c else 1
uniq = sorted(c.keys())
C=0
while n>0:
t = 0
for u in uniq:
if c[u] == 0:
continue
c[u] -= 1
n -= 1
t += 1
C += t-1
print(C)
| true |
d8d5bdcd989aa05469402a1984955ec355908973 | Python | AngelSosaGonzalez/IntroduccionMachineLearning | /Machine Learning/IntroduccionML/Preprocesamiento/AgrupaDatos.py | UTF-8 | 1,864 | 4 | 4 | [] | no_license | """ Agrupacio de datos: El agrupamiento de datos o binning en ingles, es un método de preprocesamiento de datos y consiste en agrupar valores
en compartimientos. En ocasiones este agrupamiento puede mejorar la precisión de los modelos predictivos y, a su vez, puede mejorar la
comprensión de la distribución de los datos.
Antes de comenzar quiero aclarar que este proyecto se basa (o copia mas bien) del curso de Machine Learning del canal de: AprendeIA con Ligdi Gonzalez,
fuente: https://www.youtube.com/watch?v=Ij-j7XLVXCw&list=PLJjOveEiVE4BK9Vnnl99H2IlYGhmokn1V&index=5 """
#Comenzamos importando el modulo de Pandas
import pandas as pd
#Importamos nuestros datos
DataFrameTitanic = pd.read_csv('IntroduccionML/Preprocesamiento/Data/DataFrameEdit.csv')
#Comprobamos si se importo correctamente
print(DataFrameTitanic.head())
""" Para este proyecto nos basaremos en los rangos dados por el tutorial que son:
- El primer grupo lo comprenda las personas con edades entre 0 a 5,
- El segundo grupo serán las personas con edades entre 6 a 12,
- El tercer grupo estarán las personas entre 13 a 18 años,
- El cuarto grupo estará formado por las personas con edades comprendidas entre 19 a 35 años,
- El quinto lo forman las personas entre 36 años a 60, y
- El último grupo esta comprendido por las personas entre 61 año a 100 años. """
""" Comenzamos definiendo los rangos, este arreglo inicia con el numero donde inicia el primer rango '0' y
los demas datos del arreglo con los numero que finalizan: '5, 12, 18, 35, 60, 100' """
Agrupacio = [0, 5, 12, 18, 35, 60, 100]
#Ahora nombramos nuestras agrupaciones, no nos rompemos la cabeza y solo le pondremos numeros de 1 - 6 (Puedes poner cualquier nombre)
Nombre = ['1', '2', '3', '4', '5', '6']
#Vamos a agrupar nuestro datos
print(pd.cut(DataFrameTitanic['Edad'], Agrupacio, labels = Nombre))
| true |
40cdc642cc22d2dafc3d5121f6e06f0074dd035a | Python | Sitarweb/Python_study | /pythontutor_5/num_8.py | UTF-8 | 472 | 3.703125 | 4 | [] | no_license | #Дана строка, в которой буква h встречается как минимум два раза.
# Разверните последовательность символов, заключенную между первым и последним появлением буквы h, в противоположном порядке.
s = input()
a = s[:s.find("h") + 1]
b = s[s.find("h") + 1 : s.rfind("h")]
c = s[s.rfind("h"):]
s = a + b[::-1] + c
print(s) | true |
c466a59dae7fd36e68e782b0e6e2826f22d37d99 | Python | busisd/PythonPractice | /ImageGradient.py | UTF-8 | 441 | 3.109375 | 3 | [] | no_license | from PIL import Image
MAX_X = 400
MAX_Y = 400
pic = Image.new("RGB", (MAX_X, MAX_Y))
pic_pix = pic.load()
for i in range(0, MAX_X):
for j in range(0, MAX_Y):
color_R = 255 - int(255*(i/(MAX_X-1)))
color_G = int(255*(j/(MAX_Y-1)))
color_B = int(255*(i/(MAX_X-1))/2) + int(255*(j/(MAX_Y-1))/2)
pic_pix[i,j] = (color_R, color_G, color_B)
#print(color_R, color_G, color_B)
pic.show()
| true |
8cc006c910d08a16b716929f7250137b36e1c152 | Python | daniel-reich/ubiquitous-fiesta | /jzCGNwLpmrHQKmtyJ_0.py | UTF-8 | 101 | 3.171875 | 3 | [] | no_license |
def parityAnalysis(num):
digit_sum = sum(int(i) for i in str(num))
return digit_sum%2 == num%2
| true |
6b52c1116933018a1e200fc036c7acf7300971d7 | Python | weiaicunzai/blender_shapenet_render | /render_helper.py | UTF-8 | 6,938 | 3.375 | 3 | [] | no_license | """render_helper.py contains functions that processing
data to the format we want
Available functions:
- load_viewpoint: read viewpoint file, load viewpoints
- load_viewpoints: wrapper function for load_viewpoint
- load_object_lists: return a generator of object file pathes
- camera_location: return a tuple contains camera location (x, y, z)
in world coordinates system
- camera_rot_XYZEuler: return a tuple contains cmera ration
- random_sample_objs: sample obj files randomly
- random_sample_vps: sample vps from viewpoint file
- random_sample_objs_and_vps: wrapper function for sample obj
and viewpoints
author baiyu
"""
import os
import glob
import math
import random
from collections import namedtuple
from settings import *
# need to write outside the function, otherwise pickle can find
# where VP were defined
VP = namedtuple('VP',['azimuth', 'elevation', 'tilt', 'distance'])
Model = namedtuple('Model', ['path', 'vps'])
def load_viewpoint(viewpoint_file):
"""read viewpoints from a file, can only read one file at once
Args:
viewpoint_file: file path to viewpoint file, read only one file
for each function call
Returns:
generator of viewpoint parameters(contains azimuth,elevation,tilt angles and distance)
"""
with open(viewpoint_file) as viewpoints:
for line in viewpoints.readlines():
yield VP(*line.strip().split())
def load_viewpoints(viewpoint_file_list):
"""load multiple viewpoints file from given lists
Args:
viewpoint_file_list: a list contains obj path
a wrapper for load_viewpoint function
Returns:
return a generator contains multiple generators
which contains obj pathes
"""
if isinstance(viewpoint_file_list, str):
vp_file_list = [viewpoint_file_list]
try:
vp_file_list = iter(viewpoint_file_list)
except TypeError:
print("viewpoint_file_list is not an iterable object")
for vp_file in vp_file_list:
yield load_viewpoint(vp_file)
def load_object_lists(category=None):
"""
load object pathes according to the given category
Args:
category:a iterable object contains the category which
we want render
Returns:
generator of gnerators of obj file pathes
"""
#type checking
if not category:
category = g_render_objs
elif isinstance(category, str):
category = [category]
else:
try:
iter(category)
except TypeError:
print("category should be an iterable object")
#load obj file path
for cat in category:
num = g_shapenet_categlory_pair[cat]
search_path = os.path.join(g_shapenet_path, num, '**','*.obj')
yield glob.iglob(search_path, recursive=True)
def camera_location(azimuth, elevation, dist):
"""get camera_location (x, y, z)
you can write your own version of camera_location function
to return the camera loation in the blender world coordinates
system
Args:
azimuth: azimuth degree(object centered)
elevation: elevation degree(object centered)
dist: distance between camera and object(in meter)
Returens:
return the camera location in world coordinates in meters
"""
#convert azimuth, elevation degree to radians
phi = float(elevation) * math.pi / 180
theta = float(azimuth) * math.pi / 180
dist = float(dist)
x = dist * math.cos(phi) * math.cos(theta)
y = dist * math.cos(phi) * math.sin(theta)
z = dist * math.sin(phi)
return x, y, z
def camera_rot_XYZEuler(azimuth, elevation, tilt):
"""get camera rotaion in XYZEuler
Args:
azimuth: azimuth degree(object centerd)
elevation: elevation degree(object centerd)
tilt: twist degree(object centerd)
Returns:
return the camera rotation in Euler angles(XYZ ordered) in radians
"""
azimuth, elevation, tilt = float(azimuth), float(elevation), float(tilt)
x, y, z = 90, 0, 90 #set camera at x axis facing towards object
#twist
#if tilt > 0:
# y = tilt
#else:
# y = 360 + tilt
#latitude
x = x - elevation
#longtitude
z = z + azimuth
return x * math.pi / 180, y * math.pi / 180, z * math.pi / 180
def random_sample_objs(num_per_cat):
"""randomly sample object file from ShapeNet for each
category in global variable g_render_objs, and then
save the result in global variable g_obj_path
Args:
num_per_cat: how many obj file we want to sample per
category
Returns:
vps: a dictionary contains category name and its corresponding
obj file path
"""
obj_path_lists = load_object_lists(g_render_objs)
obj_path_dict = {}
for cat, pathes in zip(g_render_objs, obj_path_lists):
pathes = list(pathes)
random.shuffle(pathes)
samples = random.sample(pathes, num_per_cat)
obj_path_dict[cat] = samples
return obj_path_dict
def random_sample_vps(obj_path_dict, num_per_model):
"""randomly sample vps from vp lists, for each model,
we sample num_per_cat number vps, and save the result to
g_vps
Args:
obj_pathes: result of function random_sample_objs,
contains obj file pathes
num_per_cat: how many view point to sample per model
Returns:
result_dict: a dictionary contains model name and its corresponding
viewpoints
"""
vp_file_lists = [g_view_point_file[name] for name in g_render_objs]
viewpoint_lists = load_viewpoints(vp_file_lists)
obj_file_pathes = [obj_path_dict[name] for name in g_render_objs]
result_dict = {}
for cat, pathes, vps in zip(g_render_objs, obj_file_pathes, viewpoint_lists):
vps = list(vps)
random.shuffle(vps)
models = []
for p in pathes:
samples = random.sample(vps, num_per_model)
models.append(Model(p, samples))
result_dict[cat] = models
return result_dict
def random_sample_objs_and_vps(model_num_per_cat, vp_num_per_model):
"""wrapper function for randomly sample model and viewpoints
and return the result, each category in g_render_objs contains
multiple Model object, each Model object has path and vps attribute
path attribute indicates where the obj file is and vps contains
viewpoints to render the obj file
Args:
model_num_per_cat: how many models you want to sample per category
vp_num_per_model: how many viewpoints you want to sample per model
Returns:
return a dict contains Model objects
"""
obj_path_dict = random_sample_objs(model_num_per_cat)
result_dict = random_sample_vps(obj_path_dict, vp_num_per_model)
return result_dict
| true |
781ef8d3e860ed00e74e21feddc8660049f96006 | Python | sfade070/keras_min | /activations/activations.py | UTF-8 | 550 | 3.015625 | 3 | [] | no_license | import numpy as np
def relu(x):
return np.maximum(x, 0)
def linear(x):
return x
def sigmoid(x):
out1 = np.exp(-x)
out = 1 / (1 + out1)
return out
def softmax_function(x):
expo = np.exp(x)
expo_sum = np.sum(np.exp(x))
return expo / expo_sum
def softplus_function(x):
expo = np.exp(x)
return np.log(expo + 1)
def softsign_function(x):
absx = np.abs(x) + 1
return x / absx
def tanh_function(x):
return np.tanh(x)
def elu_function(x, alpha):
return alpha * (np.exp(x) - 1) * (x > 0)
| true |
d52d5520348aae755fd718313ea5e49aeb0a6fa3 | Python | tanujdhiman/OpenCV | /Click image and read data from Image/click_image.py | UTF-8 | 356 | 2.890625 | 3 | [] | no_license | import cv2
import matplotlib.pyplot as plt
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
print(ret)
print(frame)
else:
ret = False
img1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
plt.imshow(img1)
plt.title('Image Camera-1')
plt.xticks([])
plt.yticks([])
plt.show()
cap.release()
print(frame) | true |
c5def224e5d2e9fa2b3eaebf05bad467bfaf70f9 | Python | MegEngine/MegEngine | /lite/pylite/megenginelite/utils.py | UTF-8 | 8,702 | 2.71875 | 3 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
import threading
import warnings
import numpy as np
from .base import *
from .struct import *
from .tensor import *
class TensorBatchCollector:
"""
A tensor utils is used to collect many single batch tensor to a multi batch
size tensor, when the multi batch size tensor collect finish, the result
tensor can be get and send to the model input for forwarding.
when collect single batch tensor, the single batch tensor is no need in the
same device_type and device_id with the result tensor, however the dtype must
match and the shape must match except the highest dimension.
Args:
shape: the multi batch size tensor shape, After collection, the result
tensor shape.
dtype(LiteDataType): the datatype of the single batch tensor and the
result tensor, default value is LiteDataType.LITE_INT8.
device_type(LiteDeviceType): the target device type the result tensor
will allocate, default value is LiteDeviceType.LITE_CUDA.
device_id: the device id the result tensor will allocate, default 0.
is_pinned_host: Whether the memory is pinned memory, refer to CUDA
pinned memory, default False.
tensor(LiteTensor): the result tensor, user can also create the multi
batch size tensor and then create the TensorBatchColletor, if tensor is
not None, all the member, such as shape, dtype, device_type,
device_id, is_pinned_host will get from the tensor, if the tensor is
None and the result tensor will create by the TensorBatchCollector,
default is None.
Note:
when collect tensor, the single batch tensor or array shape must match the
result tensor shape except the batch size dimension (the highest dimension)
Examples:
.. code-block:: python
import numpy as np
batch_tensor = TensorBatchCollector([4, 8, 8])
arr = np.ones([8, 8], "int8")
for i in range(4):
batch_tensor.collect(arr)
arr += 1
data = batch_tensor.to_numpy()
assert data.shape[0] == 4
assert data.shape[1] == 8
assert data.shape[2] == 8
for i in range(4):
for j in range(64):
assert data[i][j // 8][j % 8] == i + 1
"""
def __init__(
self,
shape,
dtype=LiteDataType.LITE_INT8,
device_type=LiteDeviceType.LITE_CUDA,
device_id=0,
is_pinned_host=False,
tensor=None,
):
self._mutex = threading.Lock()
self.dev_type = device_type
self.is_pinned_host = is_pinned_host
self.dev_id = device_id
self.shape = shape
self.dtype = LiteLayout(dtype=dtype).data_type
self._free_list = list(range(self.shape[0]))
if tensor is not None:
assert (
tensor.layout.shapes[0 : tensor.layout.ndim] == shape
), "The tensor set to TensorBatchCollector is not right."
self._tensor = tensor
self.dtype = tensor.layout.data_type
self.device_type = tensor.device_type
self.device_id = tensor.device_type
else:
self._tensor = LiteTensor(
LiteLayout(shape, dtype), device_type, device_id, is_pinned_host
)
def collect_id(self, array, batch_id):
"""
Collect a single batch through an array and store the array data to the
specific batch_id.
Args:
array: an array maybe LiteTensor or numpy ndarray, the shape of
array must match the result tensor shape except the highest
dimension.
batch_id: the batch id to store the array data to the result tensor,
if the batch_id has already collected, a warning will generate.
"""
# get the batch index
with self._mutex:
if batch_id in self._free_list:
self._free_list.remove(batch_id)
else:
warnings.warn(
"batch {} has been collected, please call free before collected it again.".format(
batch_id
)
)
self._collect_with_id(array, batch_id)
def _collect_with_id(self, array, batch_id):
if isinstance(array, np.ndarray):
shape = array.shape
assert list(shape) == self.shape[1:]
in_dtype = ctype_to_lite_dtypes[np.ctypeslib.as_ctypes_type(array.dtype)]
assert in_dtype == self.dtype
# get the subtensor
subtensor = self._tensor.slice([batch_id], [batch_id + 1])
if subtensor.device_type == LiteDeviceType.LITE_CPU:
subtensor.set_data_by_copy(array)
else:
pinned_tensor = LiteTensor(
subtensor.layout, self.dev_type, self.dev_id, True
)
pinned_tensor.set_data_by_share(array)
subtensor.copy_from(pinned_tensor)
else:
assert isinstance(array, LiteTensor)
ndim = array.layout.ndim
shape = list(array.layout.shapes)[0:ndim]
assert list(shape) == self.shape[1:]
in_dtype = array.layout.data_type
assert in_dtype == self.dtype
# get the subtensor
subtensor = self._tensor.slice([batch_id], [batch_id + 1])
subtensor.copy_from(array)
return batch_id
def collect(self, array):
"""
Collect a single batch through an array and store the array data to an
empty batch, the empty batch is the front batch id in free list.
Args:
array: an array maybe LiteTensor or numpy ndarray, the shape must
match the result tensor shape except the highest dimension
"""
with self._mutex:
if len(self._free_list) == 0:
warnings.warn(
"all batch has been collected, please call free before collect again."
)
return -1
idx = self._free_list.pop(0)
return self._collect_with_id(array, idx)
def collect_by_ctypes(self, data, length):
"""
Collect a single batch through an ctypes memory buffer and store the
ctypes memory data to an empty batch, the empty batch is the front
batch id in free list.
Args:
array: an array maybe LiteTensor or numpy ndarray, the shape must
match the result tensor shape except the highest dimension
"""
with self._mutex:
if len(self._free_list) == 0:
return -1
idx = self._free_list.pop(0)
# get the subtensor
subtensor = self._tensor.slice([idx], [idx + 1])
if subtensor.device_type == LiteDeviceType.LITE_CPU:
subtensor.set_data_by_copy(data, length)
else:
pinned_tensor = LiteTensor(
subtensor.layout, self.dev_type, self.dev_id, True
)
pinned_tensor.set_data_by_share(data, length)
subtensor.copy_from(pinned_tensor)
def free(self, indexes):
"""
free the batch ids in the indexes, after the batch id is freed, it can
be collected again without warning.
Args:
indexes: a list of to be freed batch id
"""
with self._mutex:
for i in indexes:
if i in self._free_list:
warnings.warn(
"batch id {} has not collected before free it.".format(i)
)
self._free_list.remove(i)
self._free_list.extend(indexes)
def get_tensor_at(self, idx):
"""
get the tensor from the internal big tensor by the idx, make sure the
idx is not freed, return the tensor
Args:
idx: the tensor index in the internal big tensor
"""
assert idx < self.shape[0], "the idx specific the tensor is out of range."
if idx in self._free_list:
warnings.warn(
"tensor with batch id {} has not collected before get it.".format(idx)
)
return self._tensor.slice([idx], [idx + 1])
def get(self):
"""
After finish collection, get the result tensor
"""
return self._tensor
def to_numpy(self):
"""
Convert the result tensor to a numpy ndarray
"""
return self._tensor.to_numpy()
| true |
ea172dbb588454af7e81cb36c4edee39287c3aba | Python | suwendtc/dash-table-enhanced | /demo.py | UTF-8 | 1,835 | 2.59375 | 3 | [] | no_license | import sys
sys.path.insert(0, r"/mnt/c/Users/Super Bruce/Desktop/tornado/dash/test/dash_table_enhanced")
import dash
import dash_table
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
from collections import OrderedDict
from datetime import datetime as dt
import json
df = pd.DataFrame(OrderedDict([
('climate', ['Sunny', 'Snowy', 'Sunny', 'Rainy']),
('temperature', [13, 43, 50, 30]),
('city', [['NYC', 'Montreal'], ['Montreal'], ['Miami'], ['NYC']]),
('days in', [[dt.today()], [], [], []])
]))
app = dash.Dash(__name__)
app.layout = html.Div([
dash_table.DataTable(
id='table',
data=df.to_dict('records'),
columns=[
{'id': 'climate', 'name': 'climate', 'presentation': 'dropdown'},
{'id': 'temperature', 'name': 'temperature'},
{'id': 'city', 'name': 'city', 'presentation': 'multiValueDropdown'},
{'id': 'days in', 'name': 'days in', 'presentation': 'multiDatesPicker'}
],
editable=True,
dropdown={
'climate': {
'options': [
{'label': i, 'value': i}
for i in df['climate'].unique()
],
},
'city': {
'clearable': True,
'options': [
{'label': i, 'value': i}
for i in ['NYC', 'Montreal', 'Miami']
]
}
}
),
html.Pre(id="output")
])
@app.callback(Output('output', 'children'), [Input('table', 'data')])
def display_output(data):
return 'You have entered {}'.format(json.dumps(data, indent=2))
if __name__ == '__main__':
app.run_server(debug=True)
| true |
0f6431dd74d067cd5f102ae829f85142d85fc58f | Python | jolugama/python3-tutorial-by-examples | /e08listas.py | UTF-8 | 1,054 | 3.9375 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
https://www.youtube.com/watch?v=I1a7piALq60&index=7&list=PLpOqH6AE0tNiK7QN6AJo_3nVGQPc8nLdM
codigo facilito
'''
class E8:
'''
Listas, los arrays en javascript
'''
def __init__(self):
mi_lista = ['una string', 4.2, 56, False]
print(mi_lista)
mi_lista.append('otro') #añado string al final de la lista
print(mi_lista)
mi_lista.insert(1, 'otro string') #inserto un string en la posición 1
print(mi_lista)
mi_lista.remove(4.2) #quita todos los que tengan como valor 4.2
print(mi_lista)
ultimo_lista = mi_lista.pop() #quita el último de la lista
print(ultimo_lista)
print(mi_lista)
mi_lista2 = [5, 9, 44, 4, 2, 1, 5, 7, 4, 3, 23, 66]
print(mi_lista2)
mi_lista2.sort() #ordeno lista
print(mi_lista2)
mi_lista2.reverse() #ordeno lista en sentido inverso
print(mi_lista2)
mi_lista.extend(mi_lista2) #uno dos listas
print(mi_lista)
E8()
| true |
daff705c6ebe4ae69980f28820fc869bd4b67446 | Python | LegenDu/MachineLearning-BasicAlgorithms | /Clustering/KMeans.py | UTF-8 | 6,340 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import random
import sys
np.random.seed(2)
# In[2]:
def loadDermatologyDataset():
filename ="Datasets/dermatologyData.csv"
return pd.read_csv(filename, header=None).to_numpy()
def loadVowelDataset():
filename ="Datasets/vowelsData.csv"
return pd.read_csv(filename, header=None).to_numpy()
def loadGlassDataset():
filename ="Datasets/glassData.csv"
return pd.read_csv(filename, header=None).to_numpy()
def loadEcoliDataset():
filename ="Datasets/ecoliData.csv"
return pd.read_csv(filename, header=None).to_numpy()
def loadYeastDataset():
filename ="Datasets/yeastData.csv"
return pd.read_csv(filename, header=None).to_numpy()
def loadSoybeanDataset():
filename ="Datasets/soybeanData.csv"
return pd.read_csv(filename, header=None).to_numpy()
# In[3]:
def calc_distance(a, b):
return np.sqrt(np.sum((a - b) ** 2))
# In[4]:
def kMeans(data_X, k, n_iters, tol=0):
n_samples, n_features = data_X.shape
# initialize centroids
history_centroids = []
centroids = data_X[np.random.randint(0, n_samples - 1, size=k)]
history_centroids.append(centroids)
centroids_old = np.zeros(centroids.shape)
clusters = [[] for _ in range(k)]
for _ in range(n_iters):
clusters = create_clusters(centroids, data_X, k)
centroids_old = centroids
centroids = calc_centroids(data_X, clusters, k, n_samples, n_features)
history_centroids.append(centroids)
if calc_distance(centroids_old, centroids) == 0:
break
sse = calc_sse(data_X, centroids, clusters)
# nmi = calc_nmi(data_X, centroids, clusters)
return sse, clusters, centroids
# In[5]:
def create_clusters(centroids, X, k):
clusters = [[] for _ in range(k)]
for idx, sample in enumerate(X):
centroid_idx = closest_centroid(sample, centroids)
clusters[centroid_idx].append(idx)
return clusters
# In[6]:
def closest_centroid(sample, centroids):
distances = [calc_distance(sample, cen) for cen in centroids]
closest_idx = np.argmin(distances)
return closest_idx
# In[7]:
def calc_centroids(X, clusters, k, n_samples, n_features):
centroids = np.zeros((k, n_features))
for idx, cluster in enumerate(clusters):
if cluster == []:
centroids[idx] = X[np.random.randint(0, n_samples - 1, 1)]
else:
centroids[idx] = np.mean(X[cluster], axis=0)
return centroids
# In[8]:
def calc_cluster_label(clusters, n_samples):
labels = np.empty(n_samples)
for c_idx, cluster in enumerate(clusters):
for s_idx in cluster:
labels[s_idx] = c_idx
return labels
# In[9]:
def calc_sse(X, centroids, clusters):
sse = 0
for idx, cluster in enumerate(clusters):
for sample_idx in cluster:
sse += calc_distance(X[sample_idx], centroids[idx]) ** 2
return sse
# In[10]:
def calc_entropy(prob):
return -1 * prob * math.log(prob, 2)
# In[97]:
def calc_nmi(X, Y, clusters):
H_Y = 0
lbs, l_counts = np.unique(Y, return_counts=True)
k = len(lbs)
n_samples = X.shape[0]
for c in l_counts:
H_Y += calc_entropy(c / n_samples)
H_C = 0
for cluster in clusters:
c_len = len(cluster)
if c_len == 0:
continue
H_C += calc_entropy(c_len / n_samples)
H_YC = 0
for cluster in clusters:
c_len = len(cluster)
data = Y[cluster]
lbs, cnts = np.unique(data, return_counts=True)
yc = 0
for c in cnts:
yc += calc_entropy(c / c_len)
H_YC += yc * (c_len / n_samples)
I_YC = H_Y - H_YC
return (2 * I_YC) / (H_Y + H_C)
# In[12]:
def plot_sse_vs_k(sses, k):
plt.figure()
plt.xlabel('Number of Clusters')
plt.ylabel('SSE')
plt.xticks(np.arange(min(k), max(k) + 1, 1.0))
plt.plot(k, sses, 'g-')
plt.show()
# In[13]:
def plot_nmi_vs_k(nmis, k):
plt.figure()
plt.xlabel('Number of Clusters')
plt.ylabel('NMI')
plt.xticks(np.arange(min(k), max(k) + 1, 1.0))
plt.plot(k, nmis, 'g-')
plt.show()
# In[14]:
def main(filename, k_pref):
dataset = None
if filename == 'Dermatology':
dataset = loadDermatologyDataset()
k_range = 10
elif filename == 'Vowels':
dataset = loadVowelDataset()
k_range = 10
elif filename == 'Glass':
dataset = loadGlassDataset()
k_range = 6
elif filename == 'Ecoli':
dataset = loadEcoliDataset()
k_range = 5
elif filename =='Yeast':
dataset = loadYeastDataset()
k_range = 7
elif filename == 'Soybean':
dataset = loadSoybeanDataset()
k_range = 5
else:
print('Please input correct dataset name.')
return
data_X = dataset[:, 0: -1]
data_Y = dataset[:, -1]
classes = len(np.unique(data_Y))
k_list = []
if k_pref == 'c':
k_list.append(classes)
elif k_pref == 'l':
k_list = [i for i in range(2, classes + k_range)]
sses = []
nmis = []
else:
print('Please input correct K preference.')
return
for k in k_list:
sse, clusters, centroids = kMeans(data_X, k, 100)
nmi = calc_nmi(data_X, data_Y, clusters)
if k_pref == 'l':
while len(sses) > 0 and sse > sses[-1]:
sse, clusters, centroids = kMeans(data_X, k, 100)
nmi = calc_nmi(data_X, data_Y, clusters)
sses.append(sse)
nmis.append(nmi)
else:
plt.figure()
plt_name = filename + 'Dataset(K-Means) K=' + str(k)
plt.title(plt_name)
plt.scatter(data_X[:, 0], data_X[:, 1], marker='o', c=data_Y)
plt.scatter(centroids[:, 0], centroids[:, 1], marker='^', c='r')
plt.show()
print('No. of Cluster\t\t\tSSE\t\t\tNMI')
print('\t', k, '\t\t', sse, '\t', nmi)
if k_pref == 'l':
plot_sse_vs_k(sses, k_list)
plot_nmi_vs_k(nmis, k_list)
# In[15]:
if __name__ == '__main__':
dataset = sys.argv[1]
k_pref = sys.argv[2]
main(dataset, k_pref)
| true |
df585a44013dd164b869c2bb1c0c14c1336dbffb | Python | alexandrucanavoiu/local_check_ssl_expiration_date | /check_ssl.py | UTF-8 | 5,625 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
#
# Local Check SSL expiration date
#
# Last Modified: 2020-11-10
#
# Usage: SSL Check [-h] [-v] -c CRITICAL -w WARNING -p PATH -e EXTENSION
#
# Outputs:
#
# CRITICAL: example.org expired on 2020-10-02, example2.org will expire on 2020-11-13 - 3 day(s) left
# WARNING: example2.org will expire on 2020-11-13 - 3 day(s) left
# OK: All certs are ok. Monitoring domain(s): example.org, example2.org
#
# Copyright (C) by Alexandru Canavoiu (alex.canavoiu@marketingromania.ro) used under MIT license
#
import sys
try:
import OpenSSL.crypto
import os
import argparse
import pathlib
from pathlib import Path
from datetime import datetime, timedelta, date
except ImportError as e:
print("Missing python module: {}".format(e.message))
sys.exit(255)
def dir_path(path):
p = pathlib.Path(path)
if p.is_dir():
return p
raise argparse.ArgumentTypeError(f"readable_dir:{path} is not a valid path")
parser = argparse.ArgumentParser(
description="""
Check SSL expiration date
-------------------------
This script searches in a folder for files with the extension provided.
Every file found needs to be a certificate, the script will decode the file and get the data when it expires.
You can use this script with SNMP Extended and Icinga/Nagios or Zabbix
""",
prog="SSL Check",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument("-v", "--version", action="version", version="%(prog)s version 0.1")
opts = parser.add_argument_group("Options")
opts.add_argument(
"-c",
"--critical",
type=int,
required=True,
help="""
Critical if cert expires in less than X days.
Example: 10
""",
)
opts.add_argument(
"-w",
"--warning",
type=int,
required=True,
help="""
Warning if cert expires in less than X days.
Example: 30
""",
)
opts.add_argument(
"-p",
"--path",
type=dir_path,
required=True,
help="""
Path where crts are located.
Example: /etc/nginx/ssl
""",
)
opts.add_argument(
"-e",
"--extension",
required=True,
help="""
File extension to check.
Example: .cer
""",
)
def main():
args = parser.parse_args()
warning_days = args.warning
critical_days = args.critical
path_folder = args.path
extension_file = "*" + args.extension
datetoday = date.today()
current_date = datetoday.strftime("%Y-%m-%d")
domains_name = []
expire_list = []
exit_code = [0]
path_count = 0
for path in Path(path_folder).rglob(extension_file):
try:
cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, path.read_bytes()
)
except OpenSSL.crypto.Error as e:
return f"whatever {path} was, it wasn't a cert"
for subject_component in cert.get_subject().get_components():
if b"CN" in subject_component:
for CN_component in subject_component:
if not CN_component == b"CN":
domains_name.append(CN_component.decode("utf8"))
domain_name = CN_component.decode("utf8")
certificate_expiration_date = datetime.strptime(
cert.get_notAfter().decode("ascii"), "%Y%m%d%H%M%SZ"
)
certificate_expiration_date_warning = certificate_expiration_date - timedelta(
days=warning_days
)
certificate_expiration_date_critical = certificate_expiration_date - timedelta(
days=critical_days
)
days_until_expire = certificate_expiration_date.date() - datetoday
path_count += 1
if certificate_expiration_date.strftime("%Y-%m-%d") < current_date:
expire_list.append(
"{} expired on {}".format(
domain_name, certificate_expiration_date.strftime("%Y-%m-%d")
)
)
exit_code.append(2)
elif certificate_expiration_date_critical.strftime("%Y-%m-%d") <= current_date:
expire_list.append(
"{} will expire on {} - {} day(s) left".format(
domain_name,
certificate_expiration_date.strftime("%Y-%m-%d"),
days_until_expire.days,
)
)
exit_code.append(2)
elif certificate_expiration_date_warning.strftime("%Y-%m-%d") < current_date:
expire_list.append(
"{} will expire on {} - {} day(s) left".format(
domain_name,
certificate_expiration_date.strftime("%Y-%m-%d"),
days_until_expire.days,
)
)
exit_code.append(1)
exit_code = max(exit_code)
if path_count == 0:
print("Error: No certificate found with extension '{}'".format(extension_file))
exit_code = 2
elif expire_list:
if exit_code == 1:
code = "WARNING: "
elif exit_code == 2:
code = "CRITICAL: "
else:
code = ""
print(code + ", ".join([str(item) for item in expire_list]))
elif not expire_list:
print(
"OK: All certs are ok. Monitoring domain(s): {}".format(
", ".join(domains_name[1::2])
)
)
return exit_code
if __name__ == "__main__":
sys.exit(main()) | true |
8a61e3c407a5ff13b531ba494ed4dac46f29b8f0 | Python | paalso/hse_python_course | /3/3-5.py | UTF-8 | 850 | 3.84375 | 4 | [] | no_license | # https://www.coursera.org/learn/python-osnovy-programmirovaniya/programming/HO43Q/okrughlieniie-po-rossiiskim-pravilam
# Округление по российским правилам
# По российский правилам числа округляются до ближайшего целого числа,
# а если дробная часть числа равна 0.5, то число округляется вверх.
# Дано неотрицательное число x, округлите его по этим правилам.
# Обратите внимание, что функция round не годится для этой задачи!
def russian_round(x):
int_x = int(x)
if x - int_x == int_x + 1 - x:
return int_x + 1
return round(x)
x = float(input())
print(russian_round(x))
| true |
a6051db2296640c832fa26e6c18f400e1c2ce503 | Python | gregone/collectives-flask2 | /collectives/utils/export.py | UTF-8 | 3,385 | 2.828125 | 3 | [] | no_license | from openpyxl import load_workbook
from openpyxl.writer.excel import save_virtual_workbook
from flask import current_app
import json
from ..models import Event
class DefaultLayout:
ACTIVITIES = 'A8'
TITLE = 'D8'
DESCRIPTION = 'A10'
LEADER_NAME = 'E11'
LEADER_PHONE = 'E12'
LEADER_EMAIL = 'E13'
START = 'E15'
END = 'E16'
REGISTRATION_START_ROW = 28
REGISTRATION_ROW_COUNT = 12
def registration_cell(self, index, field):
column = ''
if field == 'index':
column = 'A'
elif field == 'license':
column = 'B'
elif field == 'name':
column = 'C'
elif field == 'email':
column = 'E'
elif field == 'phone':
column = 'D'
return '{c}{r}'.format(r=self.REGISTRATION_START_ROW+index, c=column)
def strip_tags(ops):
"""
Very naive parsing on quill deltas
Keep only plain text and bullet lists
"""
tags = json.loads(ops)
text = ''
chunk = ''
for op in tags['ops']:
# Attributes refer to the previous chunk of text
pattern = '{}'
if 'attributes' in op.keys():
for attr, value in op['attributes'].items():
if attr == 'list' and value == 'bullet':
pattern = '- {}'
# Apply attributes to current chunk
text += pattern.format(chunk)
# Grab next chunk
chunk = ''
if 'insert' in op.keys():
chunk = op['insert']
text += chunk
return text
def to_xlsx(event, cells=DefaultLayout()):
wb = load_workbook(filename=current_app.config['XLSX_TEMPLATE'])
# grab the active worksheet
ws = wb.active
# Activity types
activity_types = [at.name for at in event.activity_types]
ws[cells.ACTIVITIES] = 'Collective de {}'.format(', '.join(activity_types))
# Title, Description
ws[cells.TITLE] = event.title
ws[cells.DESCRIPTION] = strip_tags(event.description)
# Leader(s)
leader_names = [l.full_name() for l in event.leaders]
leader_phones = [l.phone for l in event.leaders if l.phone]
leader_emails = [l.mail for l in event.leaders]
ws[cells.LEADER_NAME] = ', '.join(leader_names)
ws[cells.LEADER_EMAIL] = ', '.join(leader_emails)
ws[cells.LEADER_PHONE] = ', '.join(leader_phones)
# Dates
ws[cells.START] = 'Départ: {d}/{m}/{y}'.format(
d=event.start.day, m=event.start.month, y=event.start.year)
ws[cells.END] = 'Retour: {d}/{m}/{y}'.format(
d=event.start.day, m=event.start.month, y=event.start.year)
# Participants
registrations = event.active_registrations()
# Default templage has a limited number of existing rows
# If we have more registrations, insert supplemental rows
reg_count = len(registrations)
for i in range(cells.REGISTRATION_ROW_COUNT, reg_count):
ws.insert_rows(cells.REGISTRATION_START_ROW+i)
ws[cells.registration_cell(i, 'index')] = i+1
# Insert participants data
for i, reg in enumerate(registrations):
ws[cells.registration_cell(i, 'name')] = reg.user.full_name()
ws[cells.registration_cell(i, 'license')] = reg.user.license
ws[cells.registration_cell(i, 'email')] = reg.user.mail
ws[cells.registration_cell(i, 'phone')] = reg.user.phone
return save_virtual_workbook(wb)
| true |
ab9b5edddf3f3d9a8e09da729fe10abbe7e4fedb | Python | HoeYeon/Algorithm | /Python_Algorithm/codeforce/1181B.py | UTF-8 | 193 | 2.671875 | 3 | [] | no_license | l = int(input());
s = input()
j = l//2
i = j+1
while j>0 and s[j]=='0':j-=1
while i<l and s[i]=='0':i+=1
print(min(int(s[0 if i>=l else i:])+int(s[:i]), int(s[j:])+int(s[: l if j==0 else j])))
| true |
af644c81b9b5f38f1a7d31824e7f30b370cfef01 | Python | kundor/canta | /canta/theme/ani_model.py | UTF-8 | 5,474 | 2.625 | 3 | [] | no_license | #! /usr/bin/python -O
# -*- coding: utf-8 -*-
#
# CANTA - A free entertaining educational software for singing
# Copyright (C) 2007 S. Huchler, A. Kattner, F. Lopez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import soya
from canta.theme.model import Model
class AniModel(Model):
'''A class for Cal3D models.'''
def __init__(self, parent_world=None, name='', position=(0.0, 0.0, 0.0), \
scale=(1.0, 1.0, 1.0), rotation=(0.0, 0.0, 0.0), shadow=0, \
action='', debug=0):
self.debug = debug
# call constructor of super class:
Model.__init__(self, parent_world, name, position, scale, \
rotation, self.debug)
# set shadow:
# TODO: get shadow state from config file.
self.shadow = shadow
# set action:
self.action = action
# create an animated model:
animated_model = soya.AnimatedModel.get(self.name)
# set shadow of the animated model:
animated_model.shadow = self.shadow
# create a body from the animated model:
self.body = soya.Body(self.parent_world, animated_model)
# start the animation cycle:
self.body.animate_blend_cycle(self.action)
# position, scale and rotate the body:
self.set_position(self.position)
self.set_scale(self.scale)
self.set_rotation(self.rotation)
# set name of the body:
self.body.name = self.name
def main():
DEBUG = 1
import sys
import os
#import MovableCamera
# init soya in resizable window:
soya.init('Canta', 1024, 768, 0)
# append our data path:
soya.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'data'))
# disable soya's auto exporter:
soya.AUTO_EXPORTERS_ENABLED = 0
# set the root scene:
scene = soya.World()
# set up the light:
light = soya.Light(scene)
#light.set_xyz(1.0, 0.7, 1.0)
light.set_xyz(0.0, 0.7, 1.0)
# set up the camera:
# (uncomment for static camera):
camera = soya.Camera(scene)
camera.set_xyz(0.0, 0, 10.0)
# (uncomment for movable camera):
#camera = MovableCamera.MovableCamera(scene)
# create 5 animated objects (CANTA letters):
# Letter 'C':
name = 'Logo_0'
position = (-4.0, 0.0, 0.0)
scale = (4.0, 3.0, 3.0)
rotation = (0.0, 0.0, 0.0)
shadow = 1
action = 'Logo_0Damping'
test_animodel0 = AniModel(
parent_world = scene,
name = name,
position = position,
scale = scale,
rotation = rotation,
shadow = shadow,
action = action,
debug = DEBUG
)
# Letter 'A':
name = 'Logo_1'
position = (-3.0, -0.2, 0.0)
scale = (1.0, 1.0, 1.0)
rotation = (0.0, 0.0, 0.0)
shadow = 1
action = 'Logo_1Damping'
test_animodel1 = AniModel(
parent_world = scene,
name = name,
position = position,
scale = scale,
rotation = rotation,
shadow = shadow,
action = action,
debug = DEBUG
)
# Letter 'N':
name = 'Logo_2'
position = (-1.5, 0.9, 0.0)
scale = (1.0, 1.0, 1.0)
rotation = (0.0, 0.0, 0.0)
shadow = 1
action = 'Logo_2Damping'
test_animodel2 = AniModel(
parent_world = scene,
name = name,
position = position,
scale = scale,
rotation = rotation,
shadow = shadow,
action = action,
debug = DEBUG
)
# Letter 'T':
name = 'Logo_3'
position = (0.0, -0.5, 0.5)
scale = (1.0, 1.0, 1.0)
rotation = (0.0, 0.0, 0.0)
shadow = 1
action = 'Logo_3Damping'
test_animodel3 = AniModel(
parent_world = scene,
name = name,
position = position,
scale = scale,
rotation = rotation,
shadow = shadow,
action = action,
debug = DEBUG
)
# Letter 'A':
name = 'Logo_4'
position = (2.0, 0.0, -0.3)
scale = (1.5, 1.5, 1.5)
rotation = (0.0, 0.0, 0.0)
shadow = 1
action = 'Logo_4Damping'
test_animodel1 = AniModel(
parent_world = scene,
name = name,
position = position,
scale = scale,
rotation = rotation,
shadow = shadow,
action = action,
debug = DEBUG
)
# set our root widget:
soya.set_root_widget(camera)
# start soya main loop:
soya.MainLoop(scene).main_loop()
if __name__ == '__main__': main()
| true |
3fe49c58ae92d857007b62fd30a232e01b060504 | Python | tawender/Python_Programs | /charge per charge/HVcharge_q_verification.py | UTF-8 | 14,023 | 2.671875 | 3 | [] | no_license | import visa
import numpy
import threading
import time
import Queue
from matplotlib import pyplot as plot
import os.path
import sys
sys.path.append("T:\python programs\modules")
import Instruments
import pyNIDAQ
class data_plots(object):
def __init__(self,path):
self.plot_number = 1
self.path = path
def add_plot(self,a,name,samp_per_sec,x_units,y_units,y_scale=-1):
"""add the plot to the directory containing all plots
a (numpy array) array of data to graph
name (string)the name of the test
result (string)the result of the test data
samp_per_sec (float)sample rate of the acquired data
x_units (string)units along the x-axis
y_units (string)units along the y-axis
y_scale (tuple)min and max for the y scale
"""
try:
dt = 1.0 / float(samp_per_sec)
time_graphed = len(a) * dt
t = numpy.arange(0,time_graphed,dt,dtype="float")
plot.subplot(1,1,1)
plot.plot(t,a,'.',markersize=3)
plot.xlim(0, time_graphed)
if y_scale != -1: plot.ylim(y_scale)
plot.xlabel(x_units)
plot.ylabel(y_units)
plot.title(name)
plot.grid(True)
fig_name = self.path + "\%02d_"%self.plot_number + name + ".png"
self.plot_number += 1
plot.savefig(fig_name,dpi=200)
plot.clf()
except Exception as e:
print "Exception in add_plot: " + repr(e)
raise e
def plot_currents(self,measured_currents,sourced_currents,y_scale=-1):
"""
"""
try:
plot.subplot(1,1,1)
plot.plot(sourced_currents,measured_currents,'bo',
sourced_currents,measured_currents,'k--',label='data',markersize=3)
if y_scale != -1: plot.ylim(y_scale)
plot.xlabel("measured current (Amps)")
plot.ylabel("sourced current (Amps)")
plot.title("Measured Current vs. Sourced Current")
plot.grid(True)
#calculate the trendline
z = numpy.polyfit(measured_currents,sourced_currents,1)
eq = numpy.poly1d(z)
eq_str = "y = %.5fx + (%.5f)"%(z[0],z[1])
## plot.plot(measured_currents,eq(measured_currents),'r--',label=)
## plot.text(1.0,4.0,eq_str)
fig_name = self.path + "\%02d_"%self.plot_number + "Input_vs_Measured.png"
self.plot_number += 1
plot.savefig(fig_name,dpi=200)
plot.clf()
## print "\n Best fit equation for measured current data: %s"%(eq_str)
## test_report.write("\n Best fit equation for measured current data: %s\n"%(eq_str))
return (eq,eq_str)
except Exception as e:
print "Exception in plot_currents(): " + repr(e)
raise e
def array_to_csv(csv_file,array,test_name,samp_per_sec,x_units,y_units,y_scale=-1,make_plot=True):
try:
csv_file.write("%s,%f,%s,%s,"%(test_name,samp_per_sec,x_units,y_units))
for i in range(len(array)):
csv_file.write("%f,"%float(array[i]))
csv_file.write("\n")
if make_plot: graphs.add_plot(array,test_name,samp_per_sec,x_units,y_units,y_scale)
except Exception as e:
print "Exception writing array data to csv file: " + repr(e)
raise e
def measure_Ibat(I_load):
try:
print "\n Isource of %.3f"%I_load
meas = Ibat_meas.Take_Voltage_Measurement()
Vfixture_arr = meas[0]
Vcircuit_arr = meas[1]
print " average voltage ch0(fixture): %.6fV, adjusted: %.6fV"\
%(numpy.average(Vfixture_arr),numpy.average(Vfixture_arr)-fixture_Imeas_Voffset)
print " average voltage ch2(circuit): %.6fV, adjusted: %.6fV"\
%(numpy.average(Vcircuit_arr),numpy.average(Vcircuit_arr)-I_sourced_offset)
Ibat_sourced_arr = (Vcircuit_arr - I_sourced_offset) / tot_resistance
Ibat_fixture_arr = abs(Vfixture_arr - fixture_Imeas_Voffset)
Ibat_sourced_avg = numpy.average(Ibat_sourced_arr)
Ibat_fixture_avg = numpy.average(Ibat_fixture_arr)
test_name = "Isourced of %.5f Amps"%(Ibat_sourced_avg)
array_to_csv(test_data,Ibat_sourced_arr,test_name,Ibat_meas_sampleRate,"time(sec)","volts",(0.0,6.0))
test_name = "Ibat from fixture of %.5f Amps"%(Ibat_fixture_avg)
array_to_csv(test_data,Ibat_fixture_arr,test_name,Ibat_meas_sampleRate,"time(sec)","volts",(0.0,6.0))
pct_err = ( (Ibat_fixture_avg-Ibat_sourced_avg) / Ibat_sourced_avg ) * 100.0
print " Measured source current: %0.5fA, measured fixture current: %.5fA, %6.3f%% error"\
%(Ibat_sourced_avg,Ibat_fixture_avg,pct_err)
test_report.write(" Measured source current: %0.5fA, measured fixture current: %.5fA, %6.3f%% error\n"\
%(Ibat_sourced_avg,Ibat_fixture_avg,pct_err))
return (Ibat_sourced_avg,Ibat_fixture_avg)
except Exception as e:
print "Exception in measure_Ibat(): " + repr(e)
raise e
def measure_Vbat(V_level):
try:
dmm_reading = dmm.measure_voltage(dmm_range)
print " Measurement with Vbat of %8.5fV(dmm) = "%(dmm_reading),
readings = Vbat_meas.Take_Voltage_Measurement()
if readings == 'error':
print "\nError measuring Ibat"
test_report.write("Error measuring Ibat")
return 'error'
avg = numpy.average(readings - fixture_Vbat_Voffset)
pct_err = (dmm_reading-avg) / dmm_reading * 100.0
print "%7.5fV(daq offset adjusted), %6.3f%% error"%(avg,pct_err)
test_report.write(" measured voltage with input of %8.5fV(dmm) = %7.5fV(daq), %6.3f%% error\n"\
%(dmm_reading,avg,pct_err))
test_name = "Vsource=%.2f__measurement avg=%.4f"%(V_level,avg)
array_to_csv(test_data,readings,test_name,Vbat_meas_sampleRate,"time(sec)","volts",(3.0,10.0))
return avg
except Exception as e:
print "Exception in measure_Vbat(): " + repr(e)
raise e
def use_equation(eq,I_measurements,I_sourced_list):
try:
print "\nUsing best fit equation on measured data:"
test_report.write("\nUsing best fit equation on measured data:\n")
for j in range(len(I_measurements)):
print " measured value of %.4fA converted to %.4fA, %5.2f%% error"\
%(I_measurements[j],eq(I_measurements[j]),I_sourced_list[j])
test_report.write(" measured value of %.4fA converted to %.4fA, %5.2f%% error\n"\
%(I_measurements[j],eq(I_measurements[j]),I_sourced_list[j]))
except Exception as e:
print "Exception in use_equation(): " + repr(e)
def main():
try:
print "\n***************************************************************************"
print "Beginning Program ..."
#****************************************************************
#*********directory setup****************************************
now_text = time.strftime("%Y_%m_%d - %H_%M_%S")
dir_name = "Test Results - %s"%(now_text)
cwd = os.getcwd()
global test_dir
test_dir = cwd + "/" + dir_name
os.mkdir(test_dir)
plot_dir = (cwd + "/" + dir_name + "/test_plots")
os.mkdir(plot_dir)
global graphs
graphs = data_plots(plot_dir)
global test_report
test_report = open(test_dir + "/Test_Report_" + now_text + ".txt",'w')
test_report.write("Test report for Battery Current/Voltage Measurement Fixture: ")
test_report.write(": %s\n\n"%(now_text))
global test_data
test_data = open(test_dir + "/Test_Data_" + now_text + ".csv",'w')
test_data.write("Test Name,samples per second,x-axis units,y-axis units,data\n")
#*********************************************************************
#*********************************************************************
#***********DAQ card setup********************************************
BNC_6259 = 'Dev1'
global Ibat_meas_sampleRate
Ibat_meas_sampleRate = 100000.0
Ibat_meas_time_sec = 0.5
num_Ibat_meas_samples = int(Ibat_meas_sampleRate * Ibat_meas_time_sec)
Ibat_meas_minV = -10.0
Ibat_meas_maxV = 10.0
global Ibat_meas
Ibat_meas = pyNIDAQ.AI_Voltage_Channels()
Ibat_meas.Config_Finite_Voltage_Measurement("Dev1/ai0,Dev1/ai2",Ibat_meas_minV,Ibat_meas_maxV,
Ibat_meas_sampleRate,num_Ibat_meas_samples,
meas_type='DIFF')
global Ibat_source
Ibat_source = pyNIDAQ.AI_Voltage_Channels()
Ibat_source.Config_Finite_Voltage_Measurement("Dev1/ai2",Ibat_meas_minV,Ibat_meas_maxV,
Ibat_meas_sampleRate,num_Ibat_meas_samples,
meas_type='DIFF')
global Vbat_meas_sampleRate
Vbat_meas_sampleRate = 100000.0
Vbat_meas_time_sec = 0.5
num_Vbat_meas_samples = int(Vbat_meas_sampleRate * Vbat_meas_time_sec)
Vbat_meas_minV = -10.0
Vbat_meas_maxV = 10.0
global Vbat_meas
Vbat_meas = pyNIDAQ.AI_Voltage_Channel()
Vbat_meas.Config_Finite_Voltage_Measurement(BNC_6259,"ai1",Vbat_meas_minV,Vbat_meas_maxV,
Vbat_meas_sampleRate,num_Vbat_meas_samples,
meas_type='DIFF')
#*********************************************************************
#*********************************************************************
#**********instrument setup******************************************
global ps
ps=Instruments.PowerSupply_E3632A("GPIB::7","power_supply")
ps.output_on()
global sm_2400
sm_2400 = Instruments.sourcemeter_2400("GPIB::4",'Voltage Source')
sm_2400.reset()
sm_2400.set_Vsource()
sm_2400.set_source_level(9.5)
sm_2400.set_compliance_level(.001)
sm_2400.output_on()
global dmm
dmm = Instruments.DMM_34401A("GPIB::22",'Agilent 34401A DMM')
global dmm_range
dmm_range = 10
#*********************************************************************
#*********************************************************************
#***********variables********************************************
global tot_resistance
tot_resistance = 1.017
I_list = [0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0]
V_list = [4.0,4.5,5.0,5.5,6.0,6.5,7.0,7.5,8.0,8.5,9.0,9.5,10.0]
I_sourced_list = []
I_fixture_list = []
#stores the offset of the DAQ channel measuring the total circuit voltage
global I_sourced_offset
I_sourced_offset = -0.0001
#stores the offset of the DAQ channel measuring the fixture output 'current measure'
global fixture_Imeas_Voffset
fixture_Imeas_Voffset = -0.0002
#stores the offset of the DAQ channel measuring the fxture output 'Vbat measure'
global fixture_Vbat_Voffset
fixture_Vbat_Voffset = 0.0001
#*********************************************************************
#*********************************************************************
#***********main test loop********************************************
print "\nUsing total system resistance of %.4f ohms for calculation of actual current..."%(tot_resistance)
print "Applying voltage offset correction of %.6fV on channel measuring total circuit..."%(I_sourced_offset)
print "Applying voltage offset correction of %.6fV on channel measuring Imeas fixture output..."%(fixture_Imeas_Voffset)
print "Applying voltage offset correction of %.6fV on channel measuring Vbat fixture output..."%(fixture_Vbat_Voffset)
print "\nTesting current detection circuit(-1 gain expected):"
test_report.write( "\nUsing total system resistance of %.4f ohms for calculation of actual current...\n"%(tot_resistance))
test_report.write( "Applying voltage offset correction of %.6fV on channel measuring total circuit...\n"%(I_sourced_offset))
test_report.write( "Applying voltage offset correction of %.6fV on channel measuring fixture Imeas output...\n"%(fixture_Imeas_Voffset))
test_report.write( "Applying voltage offset correction of %.6fV on channel measuring fixture Vbat output...\n"%(fixture_Vbat_Voffset))
test_report.write( "\nTesting current detection circuit(-1 gain expected):\n")
for I in I_list:
ps.set_voltage_limit(I)
ps.output_on()
time.sleep(.3)
(Isource,Ifixture) = measure_Ibat(I)
I_sourced_list.append(Isource)
I_fixture_list.append(Ifixture)
ps.output_off()
time.sleep(5)
ps.set_voltage_limit(0.0)
ps.output_off()
graphs.plot_currents(I_fixture_list,I_sourced_list,y_scale=-1)
print "\nTesting Vbat monitoring circuit:"
test_report.write("\nTesting Vbat monitoring circuit:\n")
for V in V_list:
sm_2400.set_source_level(V)
time.sleep(0.2)
measure_Vbat(V)
sm_2400.set_source_level(0.0)
sm_2400.output_off()
#*********************************************************************
except Exception as e:
print "Exception in main(): " + repr(e)
raise e
finally:
print "\n\n"
if __name__ == '__main__':
main()
| true |
636e8bb6aa5ca59f049f76a26a2fc5589c0bea23 | Python | Jackrwal/Draughts-Game-AI | /Draughts/Move.py | UTF-8 | 1,236 | 3.5625 | 4 | [] | no_license | class Move:
__doc__ = "An object to contain the information relative to a move in a Draughts game"
__player = ""
__piece = object
__target = ""
__score = -1
# used for tracking sequences of moves
__next = object
__prev = object
# constructs a Move object
# player - The Player object representing the player that makes the move
# piece - A piece object representing the piece being moved
# target - consists of the coordinate of the space to move too
def __init__(self, player, piece, target):
self.__player = player
self.__piece = piece
self.__target = target
# adds a new move to the sequence of moves, after this object
def addToSequence(self, move):
__next = move
move.setPrev(self)
def getPlayer(self):
return self.__player
def getPiece(self):
return self.__piece
def getTarget(self):
return self.__target
def getScore(self):
return self.__score
def setScore(self, score):
self.__score = score
def getNext(self):
return self.__next
def getPrev(self):
return self.__prev
def setPrev(self, move):
self.__prev = move
| true |
9ea4afce7f09df7adbd9ced5a7ec61fa51801f31 | Python | michaeldashiell/Job-Interview-Tests-Python- | /fizzBUZZ.py | UTF-8 | 341 | 3.546875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun May 3 10:41:59 2020
@author: Michael
"""
###
count = 0
while count < 100:
count = count + 1
if count % 15 == 0:
print('fizzbuzz')
elif count % 3 == 0:
print('fizz')
elif count % 5 == 0:
print('buzz')
else:
print(count)
| true |
be55f5ac4cad7a1ea0b2fcb3ecd0cbee71ed4f22 | Python | nulipinbobuyanqi/APITestFramework | /Utils/operation_yml.py | UTF-8 | 1,990 | 2.984375 | 3 | [] | no_license | # coding:utf-8
# @Author: wang_cong
# @File: operation_yml.py
# @Project: AutoCreateCaseTool
# @Time: 2021/5/21 15:51
import os
def get_yaml_data(yaml_path, yaml_file_name):
"""
读取yaml文件,返回一个dict类型的数据
:param yaml_path: yaml文件所在路径,不含最后一个"/"符号
:param yaml_file_name: yaml文件名称,不含".yaml"后缀名
:return: 返回dict类型的数据
"""
import yaml
yaml_file = yaml_path + "/" + yaml_file_name + ".yml"
if not os.path.exists(yaml_file):
raise Exception("{}.yml文件,不存在!".format(yaml_file_name))
with open(yaml_file, "r", encoding="utf-8") as f:
str_yml_data = f.read()
dict_yaml_data = yaml.load(str_yml_data, Loader=yaml.FullLoader)
return dict_yaml_data
def save_ruamel_data(yaml_path, yaml_file_name, yaml_data):
"""
将yaml字符串写入yaml文件,会按照顺序写入文件,建议采用这种方式写入yaml文件
:param yaml_path: yaml文件所在路径,不含最后一个"/"符号
:param yaml_file_name: yaml文件名称,不含".yaml"后缀名
:param yaml_data: 待写入的yaml文件内容
:return:
"""
from ruamel import yaml
yaml_file = yaml_path + "/" + yaml_file_name + ".yml"
with open(yaml_file.replace("\\", "/"), "w", encoding="utf-8") as f:
yaml.dump(yaml_data, f, Dumper=yaml.RoundTripDumper, allow_unicode=True)
def save_yaml_data(yaml_path, yaml_file_name, yaml_data):
"""
将yaml字符串写入yaml文件,不会按照顺序写入文件
:param yaml_path: yaml文件所在路径,不含最后一个"/"符号
:param yaml_file_name: yaml文件名称,不含".yaml"后缀名
:param yaml_data: 待写入的yaml文件内容
:return:
"""
import yaml
yaml_file = yaml_path + "/" + yaml_file_name + ".yml"
with open(yaml_file.replace("\\", "/"), "w", encoding="utf-8") as f:
yaml.dump(yaml_data, f, allow_unicode=True)
| true |
ff320966c434452a38a2bfbf4a776d8b609520a0 | Python | acedit/Python_kurs | /week2'3/sum_divisors.py | UTF-8 | 118 | 3.5 | 4 | [] | no_license | n=input("Enter n:")
n=int(n)
suma=0
for delitel in range(1,n):
if n%delitel==0:
suma+=delitel
print(suma)
| true |
1e164de79039e7fde41b089bb9b83ce61ccc0129 | Python | wangzaogen/python_learn | /static/cmd.py | UTF-8 | 593 | 3.078125 | 3 | [] | no_license | import os
import sched
import time
from datetime import datetime
schedule = sched.scheduler(time.time, time.sleep)
def tip(inc):
os.system("msg * 是时候喝口水动一动了")
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
schedule.enter(inc, 0, tip, (inc,))
def main(inc=60):
# enter四个参数分别为:间隔事件、优先级(用于同时间到达的两个事件同时执行时定序)、被调用触发的函数,
# 给该触发函数的参数(tuple形式)
schedule.enter(0, 0, tip, (inc,))
schedule.run()
# 10s 输出一次
main(1800)
| true |
1039558c67005f61f83ef3dbddc58ca341c772c5 | Python | alexbodin/EENX15_Machinelearning | /coordiantes.py | UTF-8 | 2,556 | 3.125 | 3 | [] | no_license |
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
import cv2
import os
# returns the image data from a file-path
def load_img_from_file(path):
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
# input: an image to locate boxes in
# output: list of coordinates in the format [boxType, x1, y1, x2, y2]
def locate_boxes(img, amount_boxes=3, xywh_format=False):
# TODO: use computervision model to locate boxes...
# following code is temporary
res = []
(hImg, wImg) = img.shape[:2]
for box in range(0, 3):
boxType = int(random.randint(0, 3))
w = random.randint(48, 256)
h = random.randint(64, 384)
x = random.randint(int(w/2), int(wImg - w/2 - 1))
y = random.randint(int(h/2), int(hImg - h/2 - 1))
if xywh_format:
res.append([boxType, x, y, w, h])
else:
res.append([boxType,
int(x-w/2), int(y-h/2),
int(x+w/2), int(y+h/2)])
print(res)
return res
# draws the boundingboxes and displays them
# accepts coordinates in the format [boxType, x, y, w, h]
# if you want to save to file, add a path
def display_boxes(img, boxes, path=""):
# creates a new figure (ax are like subplots in matlab)
fig, ax = plt.subplots()
# extract dimensions
(hImg, wImg) = img.shape[:2]
# display the image in the figure without the axis
ax.imshow(img)
ax.axis('off')
# draws every box
for box in boxes:
w = box[3]
h = box[4]
x = int(box[1]-w/2)
y = int(box[2]-h/2)
rect = patches.Rectangle((x, y), w, h, linewidth=1,
edgecolor='r', facecolor='none')
ax.add_patch(rect)
ax.annotate((str(box[0]) + ", x: " + str(x) + ", y: " + str(y)),
(x, y-16),
color='w',
weight='bold',
bbox=dict(alpha=.5, fc="k"),
fontsize=6,
ha='left',
va='bottom')
if path != "":
plt.savefig(path[:-4] + "_annotated" + path[-4:], bbox_inches='tight')
plt.show()
#base_dir = 'yolo_dataset/images/train'
#imgFile = 'IMG_3654_JPG.rf.f4311000e64ae0d17e346a006a9a3e23.jpg'
base_dir = "./"
imgFile = "IMG_3711.JPG"
path = os.path.join(base_dir, imgFile)
img = load_img_from_file(path)
boxes = locate_boxes(img, amount_boxes=3, xywh_format=True)
display_boxes(img, boxes) # , path)
| true |
326461dc09f243bc2d7b2b1c01556489f046e2b9 | Python | byAbaddon/Basics-Course-Python-March-2020 | /Exam - 28 and 29 March 2020/02. Cat Walking/02. Cat Walking.py | UTF-8 | 378 | 3.546875 | 4 | [] | no_license | minutes = int(input())
day_walk = int(input())
calories = int(input())
all_walk = minutes * day_walk
burnet_calories = all_walk * 5
if calories / 2 <= burnet_calories:
print(f'Yes, the walk for your cat is enough. Burned calories per day: {burnet_calories}.')
else:
print(f'No, the walk for your cat is not enough. Burned calories per day: {burnet_calories}.')
| true |
99b5ca86df1b78848acb868103a4ab0ec66e743b | Python | ilgazyuksel/data_engineering_capstone | /scripts/global_temperatures.py | UTF-8 | 1,873 | 2.765625 | 3 | [] | no_license | """
Global temperatures etl script.
"""
from pyspark.sql import DataFrame
from utils.helper import add_decade_column
from utils.io import (
create_spark_session,
get_config_path_from_cli,
provide_config,
read_with_meta,
write_with_meta
)
def rename(df: DataFrame) -> DataFrame:
"""
Rename dataframe columns
:param df: global temperatures dataframe
:return: global temperatures dataframe
"""
df = (
df
.withColumnRenamed("dt", "date")
.withColumnRenamed("LandAverageTemperature", "land_avg_temperature")
.withColumnRenamed("LandAverageTemperatureUncertainty", "land_avg_temperature_uncertainty")
.withColumnRenamed("LandMaxTemperature", "land_max_temperature")
.withColumnRenamed("LandMaxTemperatureUncertainty", "land_max_temperature_uncertainty")
.withColumnRenamed("LandMinTemperature", "land_min_temperature")
.withColumnRenamed("LandMinTemperatureUncertainty", "land_min_temperature_uncertainty")
.withColumnRenamed("LandAndOceanAverageTemperature", "land_ocean_avg_temperature")
.withColumnRenamed("LandAndOceanAverageTemperatureUncertainty",
"land_ocean_avg_temperature_uncertainty")
)
return df
def main():
"""
Run pipeline:
- Create spark session
- Get config
- Read with meta
- Rename dataframe
- Add decade column
- Write with meta
:return: None
"""
spark = create_spark_session()
config_path = get_config_path_from_cli()
config = provide_config(config_path).get('scripts').get('global_temperatures')
df = read_with_meta(spark, df_meta=config['input_meta'], header=True)
df = rename(df=df)
df = add_decade_column(df=df, date_col='date')
write_with_meta(df, df_meta=config['output_meta'])
if __name__ == "__main__":
main()
| true |
ed49452befaeb881e5f8a55d6b4a33c353c96446 | Python | artheadsweden/python_advanced_nov_17 | /day2/async_idea4.py | UTF-8 | 2,653 | 3.21875 | 3 | [] | no_license | from collections import deque
from math import sqrt
import time
class Task:
next_id =0
def __init__(self, routine):
self.id = Task.next_id
Task.next_id += 1
self.routine = routine
class Scheduler:
def __init__(self):
self.runnable_tasks = deque()
self.completed_task_results = {}
self.failed_task_errors = {}
def add(self, routine):
task = Task(routine)
self.runnable_tasks.append(task)
return task.id
def run_to_completion(self):
while len(self.runnable_tasks) != 0:
task = self.runnable_tasks.popleft()
print(f"Running task {task.id} ", end='')
try:
yielded = next(task.routine)
except StopIteration as stopped:
print(f"completed with result: {stopped.value}")
self.completed_task_results[task.id] = stopped.value
except Exception as e:
print(f"failed with exception {e}")
self.failed_task_errors[task.id] = e
else:
assert yielded is None
print("now yielded")
self.runnable_tasks.append(task)
def fib():
a, b = 0, 1
yield a
while True:
yield b
a, b = b, a + b
def async_search(iterable, predicate):
for item in iterable:
if predicate(item):
return item
yield
raise ValueError("Not Found")
def async_print_matches(iterable, predicate):
for item in iterable:
#When is prime is async
matches = yield from predicate(item)
#if predicate(item):
if matches:
print(f"Found: {item}")
#yield - no longer needed
def async_sleep(interval_seconds):
start = time.time()
expiry = start + interval_seconds
while True:
yield
now = time.time()
if now >= expiry:
break
def async_repetitive_message(message, interval_seconds):
while True:
print(message)
yield from async_sleep(interval_seconds)
def async_is_prime(x):
if x < 2:
return False
for i in range(2, int(sqrt(x)+1)):
if x % i == 0:
return False
# yield
yield from async_sleep(0)
return True
def main():
scheduler = Scheduler()
scheduler.add(async_print_matches(fib(), lambda x: not any(x//i == x/i for i in range(x-1, 1, -1)))) # BLOCKING!!
#scheduler.add(async_print_matches(fib(), async_is_prime))
scheduler.add(async_repetitive_message("Mind the gap", 2))
scheduler.run_to_completion()
if __name__ == '__main__':
main() | true |
5adca2e50a59259b2f57600269a62470891940af | Python | madisonchamberlain/connect_n | /connectn.py | UTF-8 | 6,931 | 3.609375 | 4 | [] | no_license | #Make empty board
def make_board(num_rows: int, num_cols: int, blank_char: str) -> list:
board = []
for row_number in range(num_rows):
row = [blank_char] * num_cols
board.append(row)
return board
def display_game_state(board: list) -> None:
print(end=' ')
for col_num in range(len(board[0])):
print(col_num, end=' ' * 3)
print()
for row_num, row in reversed(list(enumerate(board))):
print(row_num, end=' ')
row_image = (' '.join(row))
print(row_image)
#return row_image
def display_game_state_two(board: list) -> str :
for row_num, row in reversed(list(enumerate(board))):
row_image = (' '.join(row))
return row_image
def fill(board: list) -> int:
for row_num, row in enumerate(board):
row_number = row_num
def row_win(iterable,n, blank_char: str) -> bool:
matches = 0
for element in iterable:
for char in element:
if char != blank_char and char != ' ':
if char == iterable[iterable.index(char)]:
index = iterable.index(element) + 1
while char == iterable[index]:
matches +=1
index += 1
if matches == n:
return True
else:
return False
return
def column_win(num_rows: int, num_cols: int, blank_char: str, n: int,col_check: int) -> bool:
col_check -= 1
board = make_board(num_rows, num_cols, blank_char)
col_string = []
row = board[0]
for row in board:
for index in row:
char = row[col_check]
col_string.append(char)
if row_win(col_string, n, blank_char):
return True
def right_diag_win(num_rows: int, num_cols: int, blank_char: str, n: int) -> bool:
board = make_board(num_rows, num_cols, blank_char)
diag_list = []
index = 0
if num_rows != num_cols:
return False
else:
for pos, row in enumerate(board):
diag_char = board[pos][index]
diag_list.append(diag_char)
index += 1
if row_win(diag_list,n, blank_char):
return True
else:
return False
def get_player_attributes():
while True:
row = is_valid_board_input('Enter the number of rows: ')
row = int(row)
cols = is_valid_board_input('Enter the number of columns: ')
cols = int(cols)
n = is_valid_board_input('Enter the number of pieces in a row to win: ')
n = int(n)
return (row, cols, n)
def is_valid_board_input(prompt):
inp = input(prompt).strip()
while (not inp.isdigit()) or (inp.isdigit() and inp == 0):
inp = input(prompt).strip()
if inp.isdigit() and inp != 0:
inp = inp
return inp
def make_board(num_rows: int, num_cols: int, blank_char: str) -> list:
board = []
for row_number in range(num_rows):
row = [blank_char] * num_cols
board.append(row)
return board
def display_game_state(board: list) -> None:
print(end=' ')
for col_num in range(len(board[0])):
print(col_num, end=' ' * 3)
print()
for row_num, row in reversed(list(enumerate(board))):
print(row_num, end=' ')
row_image = (' '.join(row))
print(row_image)
def is_valid_move(col_choice, amount_of_cols_specified, amount_of_rows_specified, board):
if col_choice.isdigit():
if int(col_choice) >= 0 and int(col_choice) < amount_of_cols_specified:
if not board[int(amount_of_rows_specified)-1][int(col_choice)] != '*':
return True
else:
return False
def get_player_move(cols,rows, board, blank_char):
player_move = input('Enter the column you want to play in: ').strip()
while not is_valid_move(player_move, cols, rows, board):
player_move = input('Enter the column you want to play in: ').strip()
move = int(player_move)
for i in range(rows):
if board[i][move] == blank_char:
#update_game_state()
row_to_return = i
return [row_to_return,move]
def tie_game_check(board):
star_count = 0
for row in board:
for col in row:
if board[board.index(row)][row.index(col)] == "*":
star_count += 1
if star_count == 0:
display_game_state(board)
print("Tie Game")
return True
else:
return False
def row_win(row, col, n, board):
sum_left = 0
sum_right = 0
player = board[row][col]
for currCol in range(col+1, len(board)):
if board[row][currCol] == player:
sum_right += 1
else:
break
for currCol in range(col-1, 0, -1):
if board[row][currCol] == player:
sum_right += 1
else:
break
if sum_left + sum_right + 1 >= n:
if player == "X":
display_game_state(board)
print('Player 1 won!')
else:
display_game_state(board)
print('Player 2 won!')
return True
return False
def col_win(row, col, n, board):
sum_left = 0
sum_right = 0
player = board[row][col]
for currRow in range(row + 1, len(board)):
if board[currRow][col] == player:
sum_right += 1
else:
break
for currRow in range(row - 1, 0, -1):
if board[currRow][col] == player:
sum_right += 1
else:
break
if sum_left + sum_right + 1 >= n:
if player == "X":
display_game_state(board)
print('Player 1 won!')
else:
display_game_state(board)
print('Player 2 won!')
return True
return False
def change_turn(turn: int):
if turn == 0:
return 1
elif turn == 1:
return 0
def update_game_state(piece: str, rows, cols, board) -> list:
board[rows][cols] = piece
return board
def play_game():
player_attributes = get_player_attributes()
rows = player_attributes[0]
cols = player_attributes[1]
n = player_attributes[2]
board = make_board(rows, cols, "*")
blank_char = '*'
player_turn = 0
while True:
display_game_state(board)
everything_to_return = get_player_move(cols, rows, board,blank_char)
row_to_return = everything_to_return[0]
column_to_return = everything_to_return[1]
pieces = 'XO'
update_game_state(pieces[player_turn], row_to_return, column_to_return, board)
if tie_game_check(board):
return False
if row_win(row_to_return, column_to_return, n, board):
return False
if col_win(row_to_return, column_to_return, n, board):
return False
player_turn = change_turn(player_turn)
def main():
play_game()
main()
| true |
5d7303f0b84591eaeab8cdfe4ad85fb7b482303d | Python | solstice333/Arymatic | /lib/settings.py | UTF-8 | 14,079 | 3.234375 | 3 | [] | no_license | from lib.custom_exceptions import *
from collections.abc import Mapping
import json
import re
class Settings(Mapping):
"""class for accessing settings"""
_REPAT = r'(?P<pat>.*?):re(:(?P<flag>[AILMSX]+))?$'
@staticmethod
def _get_type_to_one_of():
"""return a dict of string types to one of method"""
return {
'primitive': Settings._is_in_prim,
'list': Settings._is_sublist_in_one_of_lists,
'dict': Settings._is_dict_in_one_of_dicts
}
@staticmethod
def _is_primitive(val):
"""return True if |val| is a JSON primitive, False otherwise"""
prims = [int, float, str, bool]
for prim in prims:
if isinstance(val, prim):
return True
return False
@staticmethod
def _is_list(val):
"""return True if |val| is an instance of list, False otherwise"""
return isinstance(val, list)
@staticmethod
def _is_dict(val):
"""return True if |val| is an instance of dict, False otherwise"""
return isinstance(val, dict)
@staticmethod
def _is_wildcard_match(s, wildcard):
"""return True if |wildcard| string matches |s| string. A valid wildcard
string is in the format of '*[:<type>]`. For instance, '*', '*:str' are
both valid. Any leading or trailing whitespace in |wildcard| is
automatically removed. If |wildcard| is invalid, then an
InvalidWildcardError is raised
"""
wildcard = wildcard.strip()
glob_pat = re.compile(r'\*(:(?P<type>\w+))?$')
m = glob_pat.match(wildcard)
if m:
if m.group('type'):
type_to_meth = globals()['__builtins__']
type_to_meth = {k:v for k,v in type_to_meth.items()
if k in ['str','int','float','bool']}
try:
return isinstance(s, type_to_meth[m.group('type')])
except KeyError:
raise InvalidWildcardError("{} is an invalid type in {}".format(
m.group('type'), wildcard))
return True
raise InvalidWildcardError(wildcard)
@staticmethod
def _is_regex_match(s, pat):
"""return True if regex pattern string |pat| matches string |s|. A valid
wildcard string is in the format of '<regex pat>:re[:<flag>[<flag>...]]'.
For instance, r'\d+:re' or r'h[i]:re:I' are valid. Flags can be stacked
and valid flags are the same as the single character flags that the
Python re module uses, i.e. AILMSX. For instance, r'h.i:re:IS' would be
valid. Trailing whitespace is stripped. If a regex pattern is invalid,
and InvalidRegexError is raised.
"""
pat = pat.rstrip()
m = re.search(Settings._REPAT, pat)
if m:
flags_combined = 0
if m.group('flag'):
char_to_flag = {
'A':re.A, 'I':re.I, 'L':re.L, 'M':re.M, 'S':re.S, 'X':re.X}
for flag in list(m.group('flag')):
flags_combined |= char_to_flag[flag]
return bool(re.search(m.group('pat'), s, flags_combined))
raise InvalidRegexError(pat)
@staticmethod
def _is_in_prim(v, valid_v):
"""return True if |v| is in |valid_v|. |v| should be a primitive of
either int, float, str, or bool. |valid_v| should be a list of any
possible legal primitive, wildcard, or regex values. |valid_v| can also
be a single primitive value, which will implicitly be converted to a list
containing one element. Return False otherwise.
"""
if not isinstance(valid_v, list):
valid_v = [valid_v]
for pat in valid_v:
if isinstance(pat, str):
if '*' in pat:
if Settings._is_wildcard_match(v, pat):
return True
elif re.search(Settings._REPAT, pat):
if Settings._is_regex_match(str(v), pat):
return True
if v == pat:
return True
return False
@staticmethod
def _is_sublist_in_one_of_lists(sublist, lists):
"""return True if every element in list |sublist| is in one of the lists
contained in |lists|, False otherwise. Legal elements in |sublist| or the
lists in |lists| are any primitive (int, float, str, bool), list, or
dict. If an illegal element exists in |sublist|, an InvalidSettingError
is raised
"""
type_to_one_of = Settings._get_type_to_one_of()
for vl in lists:
next_vl = False
for e in sublist:
if Settings._is_primitive(e):
t = 'primitive'
elif Settings._is_list(e):
vl = [l for l in vl if isinstance(l, list)]
t = 'list'
elif Settings._is_dict(e):
vl = [d for d in vl if isinstance(d, dict)]
t = 'dict'
else:
raise InvalidSettingError()
if not type_to_one_of[t](e, vl):
next_vl = True
break
if next_vl:
continue
return True
return False
@staticmethod
def _is_dict_in_one_of_dicts(d, dicts):
"""return True if dict |d| is in one of the dicts in |dicts|, False
otherwise. |dicts| is obviously just a list of dictionaries. Legal
elements in the dictionaries are the typical primitives (int, float,
bool, str), lists, and dicts.
"""
for vd in dicts:
if Settings._is_in_dict(d, vd):
return True
return False
@staticmethod
def _is_in_list(l, valid_l):
"""return True if all elements in list |l| is in one of the lists
contained in |valid_l|, False otherwise. Legal elements in the lists are
the typical primitives (int, float, bool, str), lists, and dicts.
"""
for elem in l:
if Settings._is_primitive(elem):
if not Settings._is_in_prim(elem, valid_l):
return False
elif Settings._is_list(elem):
valid_lists = [l for l in valid_l if isinstance(l, list)]
if not Settings._is_sublist_in_one_of_lists(elem, valid_lists):
return False
elif Settings._is_dict(elem):
valid_dicts = [d for d in valid_l if isinstance(d, dict)]
if not Settings._is_dict_in_one_of_dicts(elem, valid_dicts):
return False
else:
raise InvalidSettingError()
return True
@staticmethod
def _has_all_keys_from(d, valid_d):
"""return True if dict |d| has all keys in dict |valid_d|. False
otherwise.
"""
for k, v in valid_d.items():
if k not in d:
return False
return True
@staticmethod
def _is_in_dict(d, valid_d):
"""return True if all dict |d| keys are in dict |valid_d|, values in |d|
are legal values with respect to the valid values defined in |valid_d|,
and all |valid_d| keys are in |d|. Values in |d| are
determined legal based on Settings._is_in_prim(), Settings._is_list(), or
recursively Settings._is_in_dict(). False otherwise.
"""
for k, v in d.items():
if k not in valid_d:
return False
else:
if Settings._is_primitive(v):
if not Settings._is_in_prim(v, valid_d[k]):
return False
elif Settings._is_list(v):
if not Settings._is_in_list(v, valid_d[k]):
return False
elif Settings._is_dict(v):
if isinstance(valid_d[k], dict):
if not Settings._is_in_dict(v, valid_d[k]):
return False
elif isinstance(valid_d[k], list):
if not Settings._is_dict_in_one_of_dicts(v, valid_d[k]):
return False
else:
raise InvalidSettingError()
else:
raise InvalidSettingError()
return Settings._has_all_keys_from(d, valid_d)
@staticmethod
def _primitive_validity_check(v, valid_v):
"""raise InvalidSettingError if primitive (int, float, bool, str) value
|v| is not in list |valid_v|
"""
if not Settings._is_in_prim(v, valid_v):
raise InvalidSettingError()
@staticmethod
def _list_validity_check(l, valid_l):
"""raise InvalidSettingError if list |l| is not in list |valid_l| where
\"in\" semantics are aligned with Settings._is_in_list(), so see the doc
for that
"""
if not Settings._is_in_list(l, valid_l):
raise InvalidSettingError()
@staticmethod
def _dict_validity_check(d, valid_d):
"""raise InvalidSettingError if dict |d| is not in dict |valid_d| where
\"in\" semantics are aligned with Settings._is_in_dict(), so see the doc
for that
"""
if not Settings._is_in_dict(d, valid_d):
raise InvalidSettingError()
@staticmethod
def _validity_check(settings, valid):
"""error check |settings| and |valid|. Both are dict types. |settings|
represents the user settings where each pair is a setting name associated
to a chosen setting value. |valid| represents all valid user settings
where each pair is a setting name associated to legal valid
setting values.
"""
Settings._dict_validity_check(settings, valid)
@staticmethod
def _inject_defaults(settings, defaults):
"""inject any defaults specified in |defaults| into settings. Default
values will only be applied if a key exists in |defaults| and doesn't
exist in |settings|, or if a key in |settings| has an associating value
of None. If |defaults| is None, |settings| is returned as is.
"""
new_settings = {}
if defaults is None:
return settings
elif settings is None or len(settings) == 0:
new_settings = defaults
else:
for k, v in settings.items():
if isinstance(v, dict) or v is None:
new_settings[k] = Settings._inject_defaults(v, defaults[k])
else:
new_settings[k] = settings[k]
for k, v in defaults.items():
if k not in settings:
new_settings[k] = defaults[k]
return new_settings
def __init__(self, settings, valid, defaults=None):
"""create a Settings object. |settings| can be a dict or path to json
file. If a dict, then values in |settings| must be a primitive
(int, float, bool, str), list, or dict. |valid| must be a dict.
|settings| represents the user settings where each pair is a setting
name associated to a chosen setting value. |valid| represents all valid
user settings where each pair is a setting name associated to possible
legal setting values. Here's some examples,
# value associated to 'foo' must be either 'b' or 'a'
Settings({'foo':'b'}, {'foo':['b','a']}
# value associated to 'foo' can be a list of either 'a','b','c', and/or 'd'
Settings({'foo':['a','b']}, {'foo':[['c', 'b', 'd', 'a']]}
# value associated to 'foo' can be a list of either 'a','b','c', and/or 'd'
Settings({'foo':['a','b']}, {'foo':[['c', 'b', 'd', 'a']]}
# value associated to 'foo' can be a list of lists where each nested
# list can be one or more combinations of
# ['a'], ['b'], ['c'], ['d'], ['c', 'd'], ['b', 'a']
# where order doesn't matter. In other words, each user sublist must
# contain 0 or more elements from any individual valid sublist.
# A sublist cannot contain a mix of items from two or more valid
# sublists.
Settings({'foo':[['a','b']]}, {'foo':[['c', 'd'], ['b', 'a']]}
# Associating to the example above, this would raise an InvalidSettingError
Settings({'foo':[['b','d']]}, {'foo':[['c', 'd'], ['b', 'a']]}
# value associated to 'foo' must have a valid nested dict where 'bar'
# is the only key accepting values of 'b' or 'a'
Settings({'foo':{'bar':'a'}}, {'foo':{'bar':['b','a']}})
# value associated to 'foo' must be one of the valid nested dicts
Settings({'foo':{'bar':'a'}}, {'foo':[{'baz':['c','d']},{'bar':['b','a']}]})
Settings({'foo':{'bar':'a','mu':'e'}}, {'foo':[{'baz':['c','d']},{'bar':['b','a'],'mu':['e','f']}]})
Settings({'foo':{'baz':'d'}}, {'foo':[{'baz':['c','d']},{'bar':['b','a'],'mu':['e','f']}]})
Finally, the |defaults| dictionary is optional, and specifies any
default values for any key in the user settings that's nonexistent
or has an associating value of None. The entries in |defaults| are
injected into |settings| before the validity check is done. If the
validity check fails, an InvalidSettingError is raised.
"""
try:
with open(settings, 'r') as settings_file:
self._settings = json.load(settings_file)
except TypeError:
self._settings = dict(settings)
self._settings = Settings._inject_defaults(self._settings, defaults)
Settings._validity_check(self._settings, valid)
def __getitem__(self, name):
"""return the value associated to setting name |name|. Raise KeyError
if not in Settings"""
return self._settings[name]
def __iter__(self):
"""return an iterator over the names of the Settings"""
return iter(self._settings)
def __len__(self):
"""return the number of settings"""
return len(self._settings)
"""__contains__(self, item)
return True if |item| exists, False otherwise"""
"""keys(self)
return a new view of the setting names"""
"""items(self)
return a new view of the setting (name, value) pairs"""
"""get(self, key, default=None)
return the value for |key| if |key| is a valid setting, else |default|.
|default| defaults to None so this method never raises a KeyError"""
"""__eq__(self, other)
return True if self is equal to |other|, False otherwise"""
"""__ne__(self, other)
return True if self is not equal to |other|, False otherwise"""
| true |
61075e62e16bfb41fb47f04b9662a7cacee4b901 | Python | devindhaliwal/EEG-Plots | /EEG Plot - User Choice Color Electrode Type/eeg_user_choice_plot.py | UTF-8 | 3,133 | 3.515625 | 4 | [] | no_license | import pandas as pd
import plotly.express as px
# function to display user choice menu and get correct option
def menu():
choice = input("Which category of electrodes would you like highlighted?\n1. None\n2. Peripheral Electrodes"
"\n3. 10-20 Electrodes\n4. 10-10 Electrodes\n5. Quit\nEnter number choice: ")
# checking if valid
while choice.isdigit() == False or int(choice) > 5 or int(choice) < 1:
choice = input("Which category of electrodes would you like highlighted?\n1. None\n2. Peripheral Electrodes"
"\n3. 10-20 Electrodes\n4. 10-10 Electrodes\n5. Quit\nEnter number choice: ")
return int(choice)
# getting and cleaning data
def get_clean_data():
eeg_data = pd.read_csv("../129Channels_v2.csv")
eeg_data.rename(columns={"Unnamed: 0": "Electrode", "Peripheral Channel": "Peripheral Electrode"}, inplace=True)
eeg_data["Peripheral Electrode"].fillna(0, inplace=True)
eeg_10_20_data = pd.read_excel("../10-10 and 10-20 electrodes.xlsx", engine='openpyxl')
# merging dfs
all_eeg_data = eeg_data.merge(eeg_10_20_data, on='Electrode', how='left')
all_eeg_data["10-10/10-20 Name"].fillna("N/A", inplace=True)
all_eeg_data["10-20 Electrode"].fillna(0, inplace=True)
all_eeg_data["10-10 Electrode"].fillna(0, inplace=True)
# column for electrode color labels
all_eeg_data["Electrode Type"] = ["Electrode"] * len(eeg_data)
return all_eeg_data
# plotting electrodes with user choice group highlighted
def plot_eeg(eeg_data, plot_type):
if plot_type is None:
# no electrode group selected
fig = px.scatter_3d(eeg_data, x='X', y='Y', z='Z', title='EEG Electrode Plot - No Category',
color='Electrode Type', text='Electrode', color_discrete_map={"Electrode": "grey"})
else:
# getting plot type variables
highlighted_group_type = plot_type[0]
highlighted_group_color = plot_type[1]
# labeling electrode group to be highlighted
eeg_data['Electrode Type'].where(cond=eeg_data[highlighted_group_type] != 1, other=highlighted_group_type,
inplace=True)
# plotting electrodes with highlighted group
fig = px.scatter_3d(eeg_data, x='X', y='Y', z='Z', title='EEG Electrode Plot - '+highlighted_group_type+'s',
color='Electrode Type', text='Electrode',
color_discrete_map={"Electrode": "grey", highlighted_group_type: highlighted_group_color})
fig.show()
def main():
eeg_data = get_clean_data()
choice = menu()
choice_map = {1: None, 2: ["Peripheral Electrode", "red"], 3: ["10-10 Electrode", "green"], 4: ["10-20 Electrode", "blue"]}
while 1:
if choice == 5:
return
else:
# plotting electrodes
plot_eeg(eeg_data, choice_map[choice])
# resetting column for electrode color labels
eeg_data["Electrode Type"] = ["Electrode"] * len(eeg_data)
# allowing user another choice
choice = menu()
main() | true |