blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3c18289032093747dca17826852aa04c11fccbbd | Python | xiaoouLi/Artificial-Intelligence-Projects | /DecisionTree/decisionTree.py | UTF-8 | 9,348 | 3.46875 | 3 | [] | no_license | import sys, math, re
import cPickle as pickle
import readARFF
import copy
import random
# import readARFF2
### takes as input a list of class labels. Returns a float
### indicating the entropy in this data.
###Entropy is, of course, about proportions of positive
###versus negative examples
def entropy(data) :
vals = [float(data.count(c))/len(data) for c in set(data)]
entropy = 0
for i in vals:
entropy += i * (math.log(i)/math.log(2))
return -entropy
### Compute remainder - this is the amount of entropy left in the data after
### we split on a particular attribute. Let's assume the input data is of
### the form:
### [(value1, class1), (value2, class2), ..., (valuen, classn)]
def remainder(data) :
possibleValues = set([item[0] for item in data])
r = 0.0
vl = [item[0] for item in data]
totalE = entropy([item[1] for item in data])
for value in possibleValues :
c = vl.count(value)
r += entropy([item[1] for item in data if item[0] == value]) * (float(c) / len(data) )
r = totalE - r
return r
### selectAttribute: choose the index of the attribute in the current
### dataset that minimizes the remainder.
### data is in the form [[a1, a2, ..., c1], [b1,b2,...,c2], ... ]
### where the a's are attribute values and the c's are classifications.
### and attributes is a list [a1,a2,...,an] of corresponding attribute values
def selectAttribute(data, attributes) :
maxV = -1
maxIndex = -1
for i in attributes.keys():
listA = [(v[i],v[-1]) for v in data]
r = remainder(listA)
if r > maxV:
maxV = r
maxIndex = i
return maxIndex
### a TreeNode is an object that has either:
### 1. An attribute to be tested and a set of children; one for each possible
### value of the attribute.
### 2. A value (if it is a leaf in a tree)
class TreeNode :
def __init__(self, attribute, value) :
self.attribute = attribute
self.value = value
self.children = {}
def __repr__(self) :
if self.attribute :
return self.attribute
else :
return self.value
### a node with no children is a leaf
def isLeaf(self) :
return self.children == {}
### return the value for the given data
### the input will be:
### data - an object to classify - [v1, v2, ..., vn]
### attributes - the attribute dictionary
def classify(self, data, attributes) :
if self.attribute:
v = data[attributes.index(self.attribute)]
if v not in self.children.keys():
return self.value
result = self.children[v]
if result.attribute:
return result.classify(data,attributes)
else:
return result.value
else:
return self.value
def printTree(self):
print "+++"
print self.attribute," ",self.value
print "has children: ===="
for c in self.children:
print c,":",self.children[c]
print "\n"
for c in self.children:
if self.children[c].attribute:
self.children[c].printTree()
### a tree is simply a data structure composed of nodes (of type TreeNode).
### The root of the tree
### is itself a node, so we don't need a separate 'Tree' class. We
### just need a function that takes in a dataset and our attribute dictionary,
### builds a tree, and returns the root node.
### makeTree is a recursive function. Our base case is that our
### dataset has entropy 0 - no further tests have to be made. There
### are two other degenerate base cases: when there is no more data to
### use, and when we have no data for a particular value. In this case
### we use either default value or majority value.
### The recursive step is to select the attribute that most increases
### the gain and split on that.
### assume: input looks like this:
### dataset: [[v1, v2, ..., vn, c1], [v1,v2, ..., c2] ... ]
### attributes: [a1,a2,...,an] }
def makeTree(dataset, attributes, defaultValue) :
# you write; See assignment & notes for description of algorithm
if len(dataset) == 0:
return TreeNode(None,defaultValue)
#calculate entropy for whole dataset
entropyD = entropy([item[-1] for item in dataset])
if entropyD == 0:
return TreeNode(None,dataset[0][-1])
if len(attributes) == 0:
return TreeNode(None, readARFF.computeZeroR(attributes, dataset))
copyAttr = copy.copy(attributes)
dV = readARFF.computeZeroR(attributes,dataset)
attrSpread = selectAttribute(dataset,attributes) # index
vlist = attributes[attrSpread].values()[0]
del copyAttr[attrSpread]
node = TreeNode(attributes[attrSpread].keys()[0],None)
for v in vlist:
## for each value of that removed attribute
## get tuples for a specific value of that removed attribute
subDataset = [item for item in dataset if item[attrSpread] == v]
if len(subDataset) == 0:
node.children[v] = TreeNode(None, readARFF.computeZeroR(attributes,dV))
node.children[v] = makeTree(subDataset,copyAttr,dV)
return node
def computePrecision(TP,FP,TN,FN):
if float(TP + FP) == 0:
return "----"
return round(float(TP)/(TP + FP),3)
def computeRecall(TP,FP,TN,FN):
if float(TP + FN) == 0:
return "----"
return round(float(TP)/(TP + FN),3)
def computeAccuracy(TP,FP,TN,FN):
return round(float(TP+TN)/(TP + TN + FP + FN),3)
def evaluate(root, data,alist, classification):
classification = classification.values()[0]
evalResult = {}
for c in classification:
TPCount = 0
TNCount = 0
FPCount = 0
FNCount = 0
for d in data:
cl = root.classify(d[:-1], alist)
if d[-1] == cl:
if c == cl:
TPCount += 1
else:
TNCount += 1
else:
if c == cl:
FPCount += 1
else:
FNCount += 1
p = computePrecision(TPCount,FPCount,TNCount,FNCount)
r = computeRecall(TPCount,FPCount,TNCount,FNCount)
a = computeAccuracy(TPCount,FPCount,TNCount,FNCount)
evalResult[c] = (p,r,a)
drawChart(evalResult)
return evalResult
def drawChart(Result):
for c in Result.keys():
print "Class: ",c
print "Precision: ",Result[c][0]
print "Recall: ",Result[c][1]
print "Accuracy: ",Result[c][2]
print "\n"
print "------------------------------"
def evalZeroR(trainDataset,testDataset,classification,attrs):
classification = classification.values()[0]
evalResult = {}
zeroR = readARFF.computeZeroR(attrs,trainDataset)
for c in classification:
TPCount = 0
TNCount = 0
FPCount = 0
FNCount = 0
if zeroR == c:
for i in testDataset:
if i[-1] == zeroR:
TPCount += 1
else:
FPCount += 1
else:
for i in testDataset:
if i[-1] == zeroR:
TNCount += 1
else:
FNCount += 1
p = computePrecision(TPCount,FPCount,TNCount,FNCount)
r = computeRecall(TPCount,FPCount,TNCount,FNCount)
a = computeAccuracy(TPCount,FPCount,TNCount,FNCount)
evalResult[c] = (p,r,a)
drawChart(evalResult)
return evalResult
if __name__ == '__main__' :
if len(sys.argv) < 2 :
print "Usage: decisionTree.py #datasetName"
sys.exit(-1)
fname = sys.argv[-1]
(attrs, data, classification) = readARFF.readArff(open(fname))
resultTest = {}
resultTrain = {}
resultZeroR = {}
for time in range(5):
print "Round ",time+1,":"
index = range(len(data))
trainSample = random.sample(index,int(len(data)*0.8))
testSample = [i for i in index if i not in trainSample]
trainDataset = [data[i] for i in trainSample]
testDataset = [data[i] for i in testSample]
print "\nUsing ZeroR:"
rz = evalZeroR(trainDataset,testDataset,classification,attrs)
for k in rz:
if k in resultZeroR:
resultZeroR[k] += rz[k]
else:
resultZeroR[k] = rz[k]
alist = [i.keys()[0] for i in attrs.values()]
defaultValue = readARFF.computeZeroR(attrs,trainDataset)
root = makeTree(trainDataset,attrs,defaultValue)
print "\nTest Set: "
r1 = evaluate(root,testDataset,alist, classification)
for k in r1:
if k in resultTest:
resultTest[k] += r1[k]
else:
resultTest[k] = r1[k]
print "\nTraining Set:"
r2 = evaluate(root,trainDataset,alist, classification)
for k in r1:
if k in resultTrain:
resultTrain[k] += r2[k]
else:
resultTrain[k] = r2[k]
print "------------------------------"
print "\nSummary: "
print "Using ZeroR:"
drawChart(resultZeroR)
print "Test Set:"
drawChart(resultTest)
print "Training Set:"
drawChart(resultTrain)
print "------------------------------"
| true |
044cf904e05ee611cf10f9763b8a89fa0ee44598 | Python | Raj-kar/Python | /Pattern exercise/pattern 13.py | UTF-8 | 260 | 4 | 4 | [] | no_license | # Write a Python program to construct the following pattern, using a nested loop number.
# Expected Output:
# 1
# 22
# 333
# 4444
# 55555
# 666666
# 7777777
# 88888888
# 999999999
row = int(input("Enter a range: "))
for i in range(1,row+1):
print(f"{i}"*i) | true |
b5b054ea431407ed18be42ddc45e882920161e3d | Python | magrco/zmirror | /zmirror/lru_dict.py | UTF-8 | 1,167 | 2.875 | 3 | [
"MIT"
] | permissive | # coding=utf-8
from collections import OrderedDict
class LRUDictManual(OrderedDict): # pragma: no cover
"""一个手动实现的LRUDict"""
def __init__(self, size=32):
super().__init__()
self.maxsize = size
def __getitem__(self, key):
value = super().__getitem__(key)
try:
self.move_to_end(key)
except:
pass
return value
# noinspection PyMethodOverriding
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.popitem(last=False)
if key in self:
del self[key]
super().__setitem__(key, value)
def keys(self):
return list(reversed(list(super().keys())))
def values(self):
return list(reversed(list(super().values())))
def items(self):
return list(reversed(list(super().items())))
def get_size(self):
return len(self)
def set_size(self, size):
self.maxsize = size
try:
# 如果安装了 lru-dict, 则导入, 否则使用上面的手动实现的 LRUDict
from lru import LRU
except:
LRUDict = LRUDictManual
else:
LRUDict = LRU
| true |
5754c7cdbad404ac7f31a29047b4914788d59a9f | Python | DJreyaB/Colt-Steele-Python-Aglorithms-DataStructures | /LinkedList/Doubly.py | UTF-8 | 252 | 3.015625 | 3 | [] | no_license | class Node:
def __init__(self, val) -> None:
self.val = val
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self) -> None:
self.head = None
self.tail = None
self.length = 0 | true |
3ce01bd0d15d420779d4c570c7af445c5e695f7c | Python | NorthcoteHS/10MCOD-Vincent-CROWE | /user/hACKING THE MATRIX.py | UTF-8 | 47 | 3.421875 | 3 | [] | no_license | x = 1
while x > 0:
x = x * 2
print (x)
| true |
13fb1e63dc8329af4b57212a13d311443af7ae49 | Python | MrZhangzhg/nsd_2018 | /nsd1808/devops/day05/deploy_web.py | UTF-8 | 2,248 | 2.828125 | 3 | [] | no_license | import wget
import os
import requests
import hashlib
import tarfile
def has_new_version(live_url, live_fname):
if not os.path.isfile(live_fname):
return True # 如果本地没有版本文件,意味着有新版本
with open(live_fname) as fobj:
local_version = fobj.read()
r = requests.get(live_url)
if r.text != local_version:
return True # 本地版本和服务器版本不一样,意味着有新版本
return False # 如果以上判断都不成立,意味着没有新版本
def md5sum(fname):
m = hashlib.md5()
with open(fname, 'rb') as fobj:
while True:
data = fobj.read(4096)
if not data:
break
m.update(data)
return m.hexdigest()
def deploy(app_fname, deploy_dir):
os.chdir(deploy_dir)
tar = tarfile.open(app_fname, 'r:gz')
tar.extractall()
tar.close()
app_path = os.path.basename(app_fname) # mysite_1.0.tar.gz
app_path = app_path.replace('.tar.gz', '') # mysite_1.0
app_path = os.path.join(deploy_dir, app_path)
dest_path = '/var/www/html/nsd1808'
if os.path.exists(dest_path):
os.unlink(dest_path) # 如果目标链接已存在,先删除再创建
os.symlink(app_path, dest_path)
if __name__ == '__main__':
live_url = 'http://192.168.4.3/deploy/live_version'
live_fname = '/var/www/deploy/live_version'
if not has_new_version(live_url, live_fname):
print('没有新版本')
exit()
r = requests.get(live_url)
download_dir = '/var/www/download'
deploy_dir = '/var/www/deploy'
app_url = 'http://192.168.4.3/deploy/packages/mysite_%s.tar.gz' % (r.text.strip())
wget.download(app_url, download_dir)
app_md5_url = app_url + '.md5'
wget.download(app_md5_url, download_dir)
if os.path.exists(live_fname):
os.remove(live_fname) # 如果本地有版本文件则删除,下载最新的
wget.download(live_url, deploy_dir)
app_fname = os.path.join(download_dir, app_url.split('/')[-1])
local_md5 = md5sum(app_fname)
r = requests.get(app_md5_url)
if local_md5 != r.text.strip():
print('文件校验失败')
exit(1)
deploy(app_fname, deploy_dir)
| true |
1b5f6c942169b9a3d818474a375e378424b98a8e | Python | larsweiler/TiLDA | /progressbar.py | UTF-8 | 746 | 2.921875 | 3 | [] | no_license | ### Author: Lars Weiler
### Description: progress bar
### Category: fun
### License: THE NERD-WARE LICENSE (Revision 2)
### Appname: progressbar
import pyb
import ugfx
ugfx.init()
h = ugfx.height()
w = ugfx.width()
ugfx.clear(ugfx.BLACK)
lw = 240 # progress bar width
lh = 40 # progress bar height
m = 5 # margin
s = 1 # step
ugfx.box((w//2)-(lw//2), (h//2)-(lh//2), lw, lh, ugfx.WHITE)
for i in range(m, lw-2*m, s):
# use the 30bit random number, bitshift by 24 bits, which will give 0-65
# and use this number for the delay in milliseconds
pyb.delay(pyb.rng()>>24)
ugfx.area((w//2)-(lw//2-m), (h//2)-(lh//2-m), i, (lh-2*m), ugfx.WHITE)
# wait 500ms and blank the screen
pyb.delay(500)
ugfx.clear(ugfx.BLACK)
| true |
23cb664ba10e7d37176b4c4f904c27b07f9b7904 | Python | gbroques/cozplay-demos | /horseshoe/horse_shoe_slot.py | UTF-8 | 513 | 2.953125 | 3 | [] | no_license | '''
Horse Shoe game slot class to store current state information
@class HorseShoeSlot
@author - Team Cozplay
'''
class HorseShoeSlot:
def __init__(self, state=0, active=0):
self._state = state
self._active = active
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def active(self):
return self._active
@active.setter
def active(self, value):
self._active = value
| true |
9b9300d6fe3944c13536f4e07a6bf2951d9a04d7 | Python | lilitotaryan/eventnet-back-end | /event_crud/errors.py | UTF-8 | 2,894 | 2.515625 | 3 | [] | no_license | from main_app.errors import MainAppException
class EventCrudException(MainAppException):
default_code = "event_crud_error"
class EventDataNotValid(EventCrudException):
def __init__(self):
super().__init__(code=16,
message='Event Data is not valid.',
default_code='event_data_not_valid')
class UserHasNoEvent(EventCrudException):
def __init__(self):
super().__init__(code=17,
message='User has no registered event.',
default_code='user_has_no_event')
class NoPublicIdSpecified(EventCrudException):
def __init__(self):
super().__init__(code=18,
message='No public id is specified.',
default_code='no_public_id_specified')
class EventDateNotValid(EventCrudException):
def __init__(self):
super().__init__(code=20,
message='Provided event date is not valid. Please, increase the interval till the event.',
default_code='event_date_not_valid')
class EventAlreadyExists(EventCrudException):
def __init__(self):
super().__init__(code=21,
message='Event with specified title already exists.',
default_code='event_already_exists')
class EventHasNoCategory(EventCrudException):
def __init__(self):
super().__init__(code=22,
message='Event has no selected categories.',
default_code='event_has_no_categories')
class EventHasAddress(EventCrudException):
def __init__(self):
super().__init__(code=23,
message='Event already have registered the address.',
default_code='event_has_address')
class EventHasNoAddress(EventCrudException):
def __init__(self):
super().__init__(code=24,
message='Event has no added address.',
default_code='event_has_no_address')
class EventShouldHaveAtLeastOneCategory(EventCrudException):
def __init__(self):
super().__init__(code=25,
message='Event should have at least one category.',
default_code='event_at_least_one_category')
class CannotDeleteEventHavingTickets(EventCrudException):
def __init__(self):
super().__init__(code=26,
message='Cannot delete event that has tickets.',
default_code='cannot_delete_event_having_tickets')
class UserCanBuyAtMostFreeTickets(EventCrudException):
def __init__(self, ticket_amount):
super().__init__(code=27,
message='User can buy at most {} free tickets.'.format(ticket_amount),
default_code='user_can_buy_at_most_free_tickets') | true |
d888d843f5257ca7f265ce4654961fae643ea3b9 | Python | aliemelo/ssh-exercise | /copy_file.py | UTF-8 | 2,263 | 2.71875 | 3 | [] | no_license | import sys
import argparse
import logging
import os
import util.loggerinitializer as utl
from util import ssh_manip
# Initialize log object
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
utl.initialize_logger(os.getcwd(), logger)
def main():
parser = argparse.ArgumentParser(description="A tool to copy files to and from a remote server")
parser.add_argument('-c', '--credentials', action='store', help='A config file. See <<config.ini.template>>')
subparsers = parser.add_subparsers(title='subcommands', description='valid commands', help='Use copy_file.py {'
'subcommand} -h for '
'help with each '
'subcommand')
# subcommand put
parser_put = subparsers.add_parser('put', help='Send files to remote server')
parser_put.add_argument('--src', action='store', dest='src_put',help='The path of the file to be copied')
parser_put.add_argument('--dest', action='store', dest='dest_put',help='The path to where the file should be '
'copied to in the remote server')
# subcommand get
parser_get = subparsers.add_parser('get', help='Send files to remote server')
parser_get.add_argument('--src', action='store', dest='src_get', help='The path of the file to be copied')
parser_get.add_argument('--dest', action='store', dest='dest_get', help='The path to where the file should be '
'copied to in the local machine')
args = parser.parse_args()
credentials =ssh_manip.get_credentials(args.credentials, logger)
sftp = ssh_manip.connect_to_client_copy(credentials, logger)
if hasattr(args, 'src_put'):
ssh_manip.copy_file_to_server(sftp, args.src_put, args.dest_put, logger)
if hasattr(args, 'dest_get'):
ssh_manip.get_file_from_server(sftp, args.src_get, args.dest_get, logger)
if __name__ == '__main__':
main() | true |
32f86be1093e34746d85dae47e5d2e7729806011 | Python | Dieter97/Hashcode2021 | /streets/car.py | UTF-8 | 823 | 3.34375 | 3 | [] | no_license | from __future__ import annotations
from typing import List
from streets.street import Street
class Car:
def __init__(self, n_streets):
self.n_streets: int = n_streets
self.streets: List[Street] = []
self.time_from_end_of_street: int = 0
self.current_street_index: int = 0
def is_at_end_of_street(self) -> bool:
return self.time_from_end_of_street == 0
def get_closer_to_end_of_street(self,step) -> None:
if self.time_from_end_of_street > 0:
self.time_from_end_of_street = max(0,self.time_from_end_of_street-step)
def move_into_new_street(self, street: Street):
self.time_from_end_of_street = street.time
self.current_street_index += 1
def has_arrived(self):
return self.current_street_index == self.n_streets
| true |
b8bd58d2a6af62149f8987542fba618f7c674aab | Python | himl/boson | /SVM/EvaluatingEstimator.py | UTF-8 | 2,475 | 3.375 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
DEFAULT_FOLDS_NUMBER = 5
def cross_validation(estimator, data, target, folds_number=DEFAULT_FOLDS_NUMBER):
""" This function used "K-fold Cross Validation"
"KFold divides all the samples: k groups of samples, called folds
(if k = n, this is equivalent to the Leave One Out strategy), of equal sizes (if possible).
The prediction function is learned using k - 1 folds, and the fold left out is used for test."
to see http://scikit-learn.org/stable/modules/cross_validation.html#k-fold
This function return three objects:
'meanScore' is the mean of percent of right predicted samples from a test;
'standardDeviation' is a standard deviation from the mean value;
'time' is a number of work this function() """
from sklearn.cross_validation import KFold
kf = KFold(len(target), n_folds=folds_number)
# 'scores' is numpy array. An index is a number of a fold. A value is a percent of right
# predicted samples from a test.
import numpy as np
scores = np.zeros(folds_number)
import time
start = time.time()
index = 0
for train, test in kf:
x_train, x_test, y_train, y_test = data[train], data[test], target[train], target[test]
clf = estimator.fit(x_train, y_train)
scores[index] = clf.score(x_test, y_test)
index += 1
# print("Iteration %d from %d has done! Score: %f" % (index, folds_number,
# scores[index - 1]))
finish = time.time()
return scores.mean(), scores.std() * 2, (finish - start)
def cross_validation_for_grid(estimator, data, target, folds_number=DEFAULT_FOLDS_NUMBER):
""" Uses when estimator is GridSearchCV() """
from sklearn.cross_validation import KFold
kf = KFold(len(target), n_folds=folds_number)
# 'scores' is numpy array. An index is a number of a fold. A value is a percent of right
# predicted samples from a test.
import numpy as np
scores = np.zeros(folds_number)
import time
start = time.time()
index = 0
for train, test in kf:
x_train, x_test, y_train, y_test = data[train], data[test], target[train], target[test]
clf = estimator.fit(x_train, y_train)
print(clf.best_estimator_)
scores[index] = clf.score(x_test, y_test)
index += 1
finish = time.time()
return scores.mean(), scores.std() * 2, (finish - start)
| true |
e3973c0573bd3bab5f512238b1a9cbb760d16aa5 | Python | tgbmangel/PersonalUPUPUP | /Meiju/mjtt.py | UTF-8 | 1,337 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Project : upupup
# @Time : 2018/5/7 17:08
# @Author :
# @File : mjtt.py
# @Software: PyCharm Community Edition
import requests
import re
import os
class Meiju():
def __init__(self,home_url):
self.home_url = home_url
self.ed2k_url = re.compile('href="(ed2k.*?)"')
self.coding_format='gbk'
def get_download_url(self):
# download_url_list=[]
rsp=requests.get(self.home_url)
rsp.encoding=self.coding_format
HTML_content = rsp.text
__all_download_re = re.findall(self.ed2k_url, HTML_content)
return __all_download_re
if __name__=="__main__":
home_url = "http://www.meijutt.com/content/meiju23109.html"
A=Meiju(home_url)
downloadurls=A.get_download_url()
downloaded_list=os.listdir("F:\MeiJu")
print("-"*50)
print('已下载清单:')
print(downloaded_list)
print("-" * 50)
os.chdir(r"D:\Program Files (x86)\Thunder Network\MiniThunder\Bin")
for x in downloadurls:
print("-" * 20)
print('获取到:{}'.format(x))
if x.split("|")[2] in downloaded_list:
print("检测到已下载,跳过!")
else:
print("启动下载器!")
os.system("start ThunderMini.exe -StartType:DesktopIcon \"%s\"" %x)
print("-" * 50)
| true |
7b5e921a422b4c571b84a7df6f7bf780e8dec11e | Python | karenL-M/R1_Patrones | /R1 PATRONES/composite.py | UTF-8 | 984 | 3.15625 | 3 | [] | no_license | from abc import ABC, abstractmethod
class Pelicula(Reproducible):
def reproduccion(self):
pass
class Reproducible():
@abstractmethod
def reproduccion(self):
pass
class AlbumPelicula(Reproducible):
def __init__(self):
def reproduccion(self):
for cant in self._cantidad:
cant.reproduccion()
def add(self, reproducible):
self._cant.add(reproducible)
def remove(self, reproducible):
self._cant.discard(reproducible)
class Terror(Pelicula):
def reproduccion(self):
print('Reproducción solo sin entrar a tu Album')
class Comedia(Pelicula):
def reproduccion(self):
print('Reproducción solo sin entrar a tu Album')
class Infantil(Pelicula):
def reproduccion(self):
print('Reproducción solo sin entrar a tu Album')
def main():
movie = Movie()
album = AlbumPeliculas()
album.add(movie)
album.reproduccion()
if __name__ == "__main__":
main()
| true |
41595110062ce8fc281882964ff1e9c48c84f999 | Python | tobby-lie/Multi-Instrument-RNN-Generation | /LSTM-ABC_Notation/Music_Generator_Train.py | UTF-8 | 8,114 | 3.234375 | 3 | [] | no_license | import os
import json
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import LSTM, Dropout, TimeDistributed, Dense, Activation, Embedding
import time
import numpy
import sys
numpy.set_printoptions(threshold=sys.maxsize)
data_directory = "/Users/tobbylie/Documents/CSCI_5931/Final_Project_DL"
data_file = "jigs.txt"
charIndex_json = "char_to_index.json"
model_weights_directory = "/Users/tobbylie/Documents/CSCI_5931/Final_Project_DL"
# batch size and seq length can be anything we define them to be
BATCH_SIZE = 16
SEQ_LENGTH = 64
#-------------------------------------------------------------------------------------------
# Method: read_batches
#
# Description:
# From a given array of all characters and number of unique characters, this
# method produces batches for model to train in batches. Each batch is of size 16
# with X being 16 batches each containing 64 length sequences of characters based
# on the data, Y being 16 batches each containing 64 length sequences, also containing
# a third dimension representing a character from 87 unique characters that is the
# correct next character in the sequence, this is represented in one hot encoding
# format.
#
# parameters:
# all_chars - numpy array of all characters form data file
# unique_chars - variable representing number of unique characters in data file
#
# returns (yields in order to return value and continue execution):
# X - batch of quantity 16, each batch contains a sequence of length 64
# Y - for each sequence of length 64 in each batch of 16 batches, each
# sequence character in each batch must have a correct label which
# is the next character in the sequence, this will be one hot encoded
#-------------------------------------------------------------------------------------------
def read_batches(all_chars, unique_chars):
# length equals all characters in data
length = all_chars.shape[0]
# number of character in batch is equal to length/BATCH_SIZE
# for example 155222/16 = 9701
batch_chars = int(length / BATCH_SIZE)
# batch_chars - SEQ_LENGTH = 9701 - 64 = 9637
# (0, 9637, 64) from 0 to 9637 in intervals of 64
# number of batches = 151 => 9637/64
for start in range(0, batch_chars - SEQ_LENGTH, 64):
# (16,64) => with all zeros
X = np.zeros((BATCH_SIZE, SEQ_LENGTH))
# (16, 64, 87) => with all zeros
Y = np.zeros((BATCH_SIZE, SEQ_LENGTH, unique_chars))
# for each row in a batch since first dimension of X and Y are both 16
for batch_index in range(0, 16):
# each column in a batch => represents each character in that sequence
# there are 64 characters in a sequence and each character must be defined
for i in range(0, 64):
# time-step character in a sequence
# X at batch_index, i means X at a certain batch at character i
# this is equal to, from all characters, we are taking which ever
# batch we are at multiplied by batch_chars plus start plus i
# this represents taking size 9701 steps in 16 intervals offset by
# start and i
X[batch_index, i] = all_chars[batch_index * batch_chars + start + i]
print(X[batch_index, i])
# '1' because the correct label will be the next character in the sequence
# so next character denoted by all_chars[batch_index * batch_chars + start + i + 1]
# this is at batch index, at character i and at next character
Y[batch_index, i, all_chars[batch_index * batch_chars + start + i + 1]] = 1
# Suspends function's execution and sends a value to caller, but retains
# enough state to enable function to resume where it left off
yield X, Y
def built_model(batch_size, seq_length, unique_chars):
model = Sequential()
model.add(Embedding(input_dim = unique_chars, output_dim = 512, batch_input_shape = (batch_size,
seq_length)))
model.add(LSTM(256, return_sequences = True, stateful = True))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences = True, stateful = True))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences = True, stateful = True))
model.add(Dropout(0.2))
model.add(TimeDistributed(Dense(unique_chars)))
model.add(Activation("softmax"))
return model
def training_model(data, epochs = 80):
# mapping character to index via a dictionary
# char as key and index as value
# set(data) produces an unordered collection of characters from data with no duplicates
# that is then turned into a list and then sorted to be looped through
char_to_index = {ch: i for (i, ch) in enumerate(sorted(list(set(data))))}
# print out total number of unique characters in data
print("Number of unique characters in our whole tunes database = {}".format(len(char_to_index))) #87
# define a path to our charIndex_json file and put the contents of char_to_index into it
with open(os.path.join(data_directory, charIndex_json), mode = "w") as f:
json.dump(char_to_index, f)
# create dict flipping keys and values from char_to_index so the keys
# become values and values become keys
index_to_char = {i: ch for (ch, i) in char_to_index.items()}
# unique characeter is the number of elements in char_to_index
unique_chars = len(char_to_index)
# use method built_model to return an RNN model of specified batch size,
# sequence length and with input dimension = unique_chars
model = built_model(BATCH_SIZE, SEQ_LENGTH, unique_chars)
# print summary of made model
model.summary()
# train the model
model.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
# go character by character in data and get index of that character as array element
# assign this numpy array to all_characters
all_characters = np.asarray([char_to_index[c] for c in data], dtype = np.int32)
# total number of characters should be 155222
print("Total number of characters = "+str(all_characters.shape[0]))
# create three empty lists for epoch number, loss, and accuracy
epoch_number, loss, accuracy = [], [], []
# for each epoch print which epoch we are in
# and add epoch number to list of epoch numbers
for epoch in range(epochs):
print("Epoch {}/{}".format(epoch+1, epochs))
# initialize final epoch loss and final epoch accuracy to 0 to accumulate
# for each epoch
final_epoch_loss, final_epoch_accuracy = 0, 0
epoch_number.append(epoch+1)
# for i => index and tuple (x, y) => training batch
for i, (x, y) in enumerate(read_batches(all_characters, unique_chars)):
# update final epoch loss and final epoch accuracy from model trained from batch (x, y)
#check documentation of train_on_batch here: https://keras.io/models/sequential/
final_epoch_loss, final_epoch_accuracy = model.train_on_batch(x, y)
# print out batch, loos, accuracy retreived from train on batch
# here, we are reading the batches one-by-one and training our model on each batch one-by-one.
print("Batch: {}, Loss: {}, Accuracy: {}".format(i+1, final_epoch_loss, final_epoch_accuracy))
# add final epoch loss and final epoch accuracy to our loss and accuracy lists respectively
loss.append(final_epoch_loss)
accuracy.append(final_epoch_accuracy)
#saving weights after every 10 epochs
if (epoch + 1) % 10 == 0:
# if directory does not exist then make it
if not os.path.exists(model_weights_directory):
os.makedirs(model_weights_directory)
# save weights to .h5 file
model.save_weights(os.path.join(model_weights_directory, "Weights_{}.h5".format(epoch+1)))
# specify which multple of 10 epoch
print('Saved Weights at epoch {} to file Weights_{}.h5'.format(epoch+1, epoch+1))
#creating dataframe and record all the losses and accuracies at each epoch
log_frame = pd.DataFrame(columns = ["Epoch", "Loss", "Accuracy"])
log_frame["Epoch"] = epoch_number
log_frame["Loss"] = loss
log_frame["Accuracy"] = accuracy
log_frame.to_csv("/Users/tobbylie/Documents/CSCI_5931/Final_Project_DL/log.csv", index = False)
file = open(os.path.join(data_directory, data_file), mode = 'r')
data = file.read()
file.close()
if __name__ == "__main__":
training_model(data)
log = pd.read_csv(os.path.join(data_directory, "log.csv"))
| true |
c5c8ce6c6e20f00b510821b1e4fc68b3837de40e | Python | teamgeek-io/dummyzarid | /dummyzarid/__init__.py | UTF-8 | 1,797 | 3.234375 | 3 | [
"MIT"
] | permissive | import re
from random import randrange, choice
from enum import Enum
class Gender(Enum):
FEMALE = "4"
MALE = "5"
class Citizenship(Enum):
CITIZEN = "0"
RESIDENT = "1"
def calculate_check_digit(digits):
digits_arr = list(re.sub(r"\D", "", digits))
num_digits = list(map(lambda d: int(d), digits_arr))
num_digits.reverse()
check_sum = 0
for idx, d in enumerate(num_digits):
if idx % 2 == 0:
d = d * 2
if d > 9:
d = d - 9
check_sum = check_sum + d
return check_sum * 9 % 10
def generate_dummy_number(
year="93",
month="10",
day="19",
gender=Gender.MALE,
citizenship=Citizenship.CITIZEN,
sequence="896",
):
values = [year, month, day, gender.value, sequence, citizenship.value, "8"]
no_check_digit = "".join(values)
id_number = no_check_digit + str(calculate_check_digit(no_check_digit))
return id_number
def generate_random_dummy_number(
year=None,
month=None,
day=None,
gender=None,
citizenship=None,
sequence=None,
):
if year is None:
year = str(randrange(20, 99))
if month is None:
month_num = randrange(1, 12)
month = f'{month_num:02d}'
if day is None:
day_num = randrange(1, 31)
day = f'{day_num:02d}'
if gender is None:
gender = choice([Gender.MALE, Gender.FEMALE])
if citizenship is None:
citizenship = choice([Citizenship.CITIZEN, Citizenship.RESIDENT])
if sequence is None:
sequence_num = randrange(0, 999)
sequence = f'{sequence_num:03d}'
return generate_dummy_number(
year=year,
month=month,
day=day,
gender=gender,
citizenship=citizenship,
sequence=sequence,
)
| true |
b22df407793c026e4eddfd2f117d254f0aef7057 | Python | camjohn47/tripadvisor-nlp | /nlp_pipeline.py | UTF-8 | 8,251 | 2.75 | 3 | [] | no_license | from sklearn.tree import DecisionTreeClassifier as dtc
from sklearn.ensemble import RandomForestClassifier as rfc
from sklearn.model_selection import train_test_split as split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation as LDA
from collections import defaultdict
from textblob import TextBlob
import numpy as np
from itertools import product
from sklearn.metrics import accuracy_score as accuracy
class NLPPipeline():
def __init__(self, text, Y, train_size=.85):
self.model_builders = {'dtc': dtc, 'rfc': rfc}
steps = ['tfidf', 'feature_engineering', 'lda', 'model']
self.pipeline_dic = {step: None for step in steps}
self.text_train, self.text_test, self.Y_train, self.Y_test = split(text, Y, train_size=train_size, stratify=Y)
self.keep_tfidf = lambda tfidf_dic: (tfidf_dic == self.pipeline_dic['tfidf'])
self.keep_features = lambda features_dic: (features_dic == self.pipeline_dic['features'])
self.prob_info = lambda prob: -prob * np.log(prob)
self.pipeline_dic = {step: "Default" for step in steps}
self.train_size = train_size
def update_tfidf(self, tfidf_dic):
self.pipeline_dic['tfidf'] = tfidf_dic
self.tfidf = TfidfVectorizer(**tfidf_dic)
self.tfidf_train = self.tfidf.fit_transform(self.text_train)
self.tfidf_train = self.tfidf_train.toarray()
self.tokenizer = self.tfidf.build_tokenizer()
self.tfidf_test = self.tfidf.transform(self.text_test)
self.tfidf_test = self.tfidf_test.toarray()
self.feature_names = self.tfidf.get_feature_names()
def update_lda(self, lda_dic):
def calc_topics_words(num_top_words):
topics_words = []
for ix, topic in enumerate(self.lda.components_):
top_word_inds = topic.argsort()[:-num_top_words - 1:-1]
topic_words = set([self.feature_names[i] for i in top_word_inds])
topics_words.append(topic_words)
return topics_words
num_top_words = lda_dic['num_top_words'] if 'num_top_words' in lda_dic else 10
lda_model_dic = {k: v for k, v in lda_dic.items() if k!= 'num_top_words'}
self.lda = LDA(**lda_model_dic)
self.lda.fit_transform(self.tfidf_train)
self.topics_words = calc_topics_words(num_top_words)
def calc_entropy(self, text):
''' Many other equivalent ways to calculate entropy. This seems to be the fastest. 5 x faster than scipy's entropy method.'''
word_counts = defaultdict(int)
text_size = float(len(text))
for word in text:
word_counts[word] += 1
word_counts = np.array(list(word_counts.values()))
word_probs = word_counts / text_size
entropy = -1 * sum(map(self.prob_info, word_probs))
return entropy
def calc_lda_features(self, tokenized_text):
num_topics = len(self.topics_words)
unique_words = set(tokenized_text)
num_unique_words = float(len(unique_words))
lda_features = [len(unique_words.intersection(topic_words))
/ num_unique_words for topic_words in self.topics_words]
return lda_features
def calc_sentiment_features(self, text):
min_polarity, max_polarity = -.1, .1
blob = TextBlob(text)
polarities = [sentence.sentiment.polarity for sentence in blob.sentences]
polarities = [round(polarity, 2) for polarity in polarities]
polarity_entropy = self.calc_entropy(polarities)
polarity_var = np.var(polarities)
num_pos_sents = len([polarity for polarity in polarities if polarity > max_polarity])
num_neg_sents = len([polarity for polarity in polarities if polarity < min_polarity])
num_sents = float(len(polarities))
pos_sent_freq, neg_sent_freq = num_pos_sents / num_sents, num_neg_sents/num_sents
num_neutral_sents = num_sents - num_pos_sents - num_neg_sents
max_pol, min_pol= np.max(polarities) if polarities else 0, min(polarities) if polarities else 0
subjectivities = [sentence.sentiment.subjectivity for sentence in blob.sentences]
subjectivities = [round(x, 2) for x in subjectivities]
subj_var = np.var(subjectivities)
max_subj, min_subj = np.max(subjectivities) if polarities else 0, min(subjectivities) if polarities else 0
sentiment_features = [polarity_entropy, polarity_var, num_pos_sents, num_neg_sents, num_neutral_sents, pos_sent_freq, neg_sent_freq,
num_sents, max_pol, min_pol, subj_var, max_subj, min_subj]
return sentiment_features
def update_features(self, features_dic):
"""
From a dictionary containing parameter labels and values used for building features (currently just LDA),
updates feature matrices by re-calculating features for each text.
Arguments
features_dic (dictionary): A dictionary with string parameter names as keys and ints/floats as values.
Example:
features_dic = {'n_components': 10, 'n_words': 10}
"""
def calc_features(text):
words = self.tokenizer(text)
entropy = self.calc_entropy(words)
lda_features = self.calc_lda_features(words)
sentiment_features = self.calc_sentiment_features(text)
features = [entropy, *lda_features, *sentiment_features]
return features
self.pipeline_dic['features'] = features_dic
self.update_lda(features_dic)
self.X_train = np.hstack((self.tfidf_train, np.array([np.array(calc_features(text)) for text in self.text_train])))
self.X_test = np.hstack((self.tfidf_test, np.array([np.array(calc_features(text)) for text in self.text_test])))
def grid_search(self, step_grids):
"""
From a nested dictionary containing grids for each pipeline step, fit and score each possible pipeline
permutation (nested permutation of the step permutations).
Arguments
step_grids: A nested dictionary containing the step grid for each step.
Example: step_grids = {'tfidf' = {'min_df': [0.1]},
'features' = {'n_components': [10], num_top_words: [10]},
'model' = {'type': ['rfc']} }
Returns
pipeline_scores: A sorted list of 2-tuples containing the pipeline dictionary and score of each pipeline permutation.
"""
def get_step_perms(grid):
"""
From grid (dict) mapping each parameter name to a list of values for that parameter,
returns the list of all permutations (dicts) that can be made by choosing a different value
for each parameter from its values list.
Arguments
grid ({string: list}): A dictionary mapping parameter names to a list of parameter values.
Example: grid = {'min_df': [0.1], 'max_df': [0.8, 0.9]}
Returns
step_perms ([dict]): A list of all dictionary permutations for the step that can be made
by choosing different parameter values from each parameter's domain.
Example: For the above grid example, we'd have
step_perms = [{'min_df': 0.1, 'max_df: 0.8'}, {'min_df: 0.1', max_df: 0.9}]
"""
param_names = list(grid.keys())
param_val_perms = list(product(*list(grid.values())))
num_params = len(param_names)
step_perms = [{param_names[j]: param_val_perm[j] for j in range(num_params)} for param_val_perm in param_val_perms]
return step_perms
steps = list(step_grids.keys())
num_steps = len(steps)
grids = list(step_grids.values())
step_perms = list(map(get_step_perms, grids))
pipeline_perms = list(product(*step_perms))
pipeline_perms = [{steps[i]: pipeline_perm[i] for i in range(num_steps)} for pipeline_perm in pipeline_perms]
pipeline_scores = [[pipeline_perm, round(self.score(pipeline_perm), 3)] for pipeline_perm in pipeline_perms]
pipeline_scores.sort(key=lambda x: x[1], reverse=True)
return pipeline_scores
def score(self, pipeline_dic):
tfidf_vectorizer = TfidfVectorizer(**pipeline_dic['tfidf'])
keep_tfidf = self.keep_tfidf(pipeline_dic['tfidf'])
if not keep_tfidf:
self.update_tfidf(pipeline_dic['tfidf'])
keep_features = keep_tfidf and self.keep_features(pipeline_dic['features'])
if not keep_features:
self.update_features(pipeline_dic['features'])
self.model_builder = self.model_builders[pipeline_dic['model']['type']]
model_dic = {key: value for key, value in pipeline_dic['model'].items() if key != 'type'}
self.model = self.model_builder(**model_dic)
self.model.fit(self.X_train, self.Y_train)
Y_pred = self.model.predict(self.X_test)
score = accuracy(Y_pred, self.Y_test)
print(f"Params = {pipeline_dic}, score = {round(score, 3)}. \n")
return score
| true |
34c0bff9f5735d715ed5ec4c18e8b2c60e4dc369 | Python | paulinaJaworska/lightweight-erp | /hr/hr.py | UTF-8 | 3,656 | 3.546875 | 4 | [] | no_license | """ Human resources module
Data table structure:
* id (string): Unique and random generated identifier
at least 2 special characters (except: ';'), 2 number, 2 lower and 2 upper case letters)
* name (string)
* birth_year (number)
"""
# everything you'll need is imported:
# User interface module
import ui
# data manager module
import data_manager
# common module
import common
lables = ["name", "birth_year"]
FILE_NAME_1 = "hr/persons.csv"
table = data_manager.get_table_from_file(FILE_NAME_1)
def start_module():
"""
Starts this module and displays its menu.
* User can access default special features from here.
* User can go back to main menu from here.
Returns:
None
"""
title = "HR"
list_options = ["show table", "add", "remove", "update", "get_the oldest person", "get the person closest to the average"]
exit_message = "Back to main menu"
ui.print_menu(title, list_options, exit_message)
message = "There is no such option"
choice_input = ui.get_inputs(["Choose a special feature:"], "")
choice = choice_input[0]
if choice == '1':
show_table(table)
elif choice == '2':
data_manager.write_table_to_file(FILE_NAME_1, common.add_item(labels, table))
elif choice == '3':
id_ = ui.get_inputs(["Please enter an id: "], "")
data_manager.write_table_to_file(FILE_NAME_1, common.delete_item(id_, table))
elif choice == '4':
id_ = ui.get_inputs(["Please enter an id: "], "")
common.update(id_, table, labels)
elif choice == '5':
get_oldest_person
elif choice == '6':
get_persons_closest_to_average
elif choice == '0':
common.menu_back()
else:
ui.print_error_message(message)
def show_table(table):
"""
Display a table
Args:
table (list): list of lists to be displayed.
Returns:
None
"""
ui.print_table(table, ["id", "name", "birth year"])
def add(table):
"""
Asks user for input and adds it into the table.
Args:
table (list): table to add new record to
Returns:
list: Table with a new record
"""
return table
#def remove(table, id_):
"""
Remove a record with a given id from the table.
Args:
table (list): table to remove a record from
id_ (str): id of a record to be removed
Returns:
list: Table without specified record.
"""
# your code
def update(table, id_):
"""
Updates specified record in the table. Ask users for new data.
Args:
table (list): list in which record should be updated
id_ (str): id of a record to update
Returns:
list: table with updated record
"""
IDINDEX = 0
list = []
list = list + id_
for row in table:
if id_[IDINDEX] in row:
list = list + ui.get_inputs(labels, "Please provide updated information: ")
for i in range(len(row)):
row[i] = list[i]
return table
# special functions:
# ------------------
def get_oldest_person(table):
"""
Question: Who is the oldest person?
Args:
table (list): data table to work on
Returns:
list: A list of strings (name or names if there are two more with the same value)
"""
# your code
def get_persons_closest_to_average(table):
"""
Question: Who is the closest to the average age?
Args:
table (list): data table to work on
Returns:
list: list of strings (name or names if there are two more with the same value)
"""
# your code
| true |
0a11fb2df0d39c6ed8cd887b54b8315f4b583db7 | Python | gameboy1024/ProjectEuler | /src/problem_17.py | UTF-8 | 1,706 | 3.890625 | 4 | [] | no_license | '''
If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.
Answer: 21124 Completed on Mon, 13 Oct 2014, 19:39
https://projecteuler.net/problem=17
@author Botu Sun
'''
letter_count = {0: 0,
1: 3,
2: 3,
3: 5,
4: 4,
5: 4,
6: 3,
7: 5,
8: 5,
9: 4,
10: 3,
11: 6,
12: 6,
13: 8,
14: 8,
15: 7,
16: 7,
17: 9,
18: 8,
19: 8,
20: 6,
30: 6,
40: 5,
50: 5,
60: 5,
70: 7,
80: 6,
90: 6,
100: 7,
1000: 8}
def GetLetterCount(i):
if i == 1000:
return letter_count[1] + letter_count[1000]
elif i >= 100:
return letter_count[i / 100] + letter_count[100] + ((GetLetterCount(i % 100) + 3) if i % 100 != 0 else 0)
elif i >= 20:
return letter_count[i - i % 10] + letter_count[i % 10]
else:
return letter_count[i]
sum = 0
for i in xrange(1, 1001):
sum += GetLetterCount(i)
print sum | true |
b79e014eda08426c93a295bf90e8b48d7eaa6a14 | Python | sethjuarez/Digitz | /LearnDigitz/pytorch_train.py | UTF-8 | 2,052 | 2.65625 | 3 | [
"MIT"
] | permissive | import os
import sys
import torch
import argparse
import numpy as np
import torch.nn as nn
import torch.optim as optim
from datetime import datetime
from misc.digits import Digits
import torch.nn.functional as F
from misc.helpers import print_info, print_args, check_dir, info, save_model
def main(args):
# digit data
digits = Digits(args.data, args.batch)
test_x, test_y = digits.test
_, size_x = test_x.shape
_, size_y = test_y.shape
device = torch.device('cuda' if torch.cuda.is_available else 'cpu')
model = nn.Sequential(nn.Linear(size_x, size_y))
loss = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr)
# optimization loop
for epoch in range(args.epochs):
info("Epoch {}".format(epoch+1))
for i, (train_x, train_y) in enumerate(digits):
optimizer.zero_grad()
y = model(train_x)
cost = loss(y, train_y)
c = cost.item()
cost.backward()
optimizer.step()
print("\r Batch {}/{} - Cost {}".format(i+1, digits.total,c), end="")
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='CNN Training for Image Recognition.')
parser.add_argument('-d', '--data', help='directory to training and test data', default='data')
parser.add_argument('-e', '--epochs', help='number of epochs', default=10, type=int)
parser.add_argument('-b', '--batch', help='batch size', default=100, type=int)
parser.add_argument('-l', '--lr', help='learning rate', default=0.001, type=float)
parser.add_argument('-o', '--output', help='output directory', default='output')
args = parser.parse_args()
args.data = check_dir(os.path.abspath(args.data))
args.output = os.path.abspath(args.output)
unique = datetime.now().strftime('%m.%d_%H.%M')
args.log = check_dir(os.path.join(args.output, 'logs', 'log_{}'.format(unique)))
args.model = check_dir(os.path.join(args.output, 'models', 'model_{}'.format(unique)))
main(args) | true |
d96bd59b277ee18f811e14fefad4f89b50804743 | Python | Hank-Liao-Yu-Chih/document | /OpenCV讀者資源/讀者資源/程式實例/ch12/ch12_9.py | UTF-8 | 313 | 2.953125 | 3 | [] | no_license | # ch12_9.py
import cv2
import numpy as np
src = cv2.imread("btree.jpg")
kernel = np.ones((3,3),np.uint8) # 建立3x3內核
dst = cv2.morphologyEx(src,cv2.MORPH_OPEN,kernel) # 開運算
cv2.imshow("src",src)
cv2.imshow("after Opening 3 x 3",dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
ef756b533a6e12dcf7d202cbacb818241daf539e | Python | javaTheHutts/Java-the-Hutts | /src/unittest/python/test_blur_manager.py | UTF-8 | 4,057 | 2.9375 | 3 | [
"BSD-3-Clause"
] | permissive | """
----------------------------------------------------------------------
Authors: Stephan Nell
----------------------------------------------------------------------
Unit tests for the Blur Manager
----------------------------------------------------------------------
"""
import pytest
import cv2
import os
from hutts_verification.image_preprocessing.blur_manager import BlurManager
TEMPLATE_DIR = "{base_path}/../../main/python/hutts_verification/image_preprocessing/templates/".format(
base_path=os.path.abspath(os.path.dirname(__file__)))
test_image_colour = cv2.imread(TEMPLATE_DIR + "temp_flag.jpg")
def test_thresholding_constructor_incorrect_type():
"""
Test to see if constructor check for invalid type i.e not String type
"""
with pytest.raises(TypeError):
BlurManager(1, (1, 1))
def test_thresholding_constructor_correct_type():
"""
Test to see if constructor valid type
"""
manager = BlurManager("gaussian", (1, 1))
assert manager.blur_type is "gaussian"
assert manager.kernel_size == (1, 1)
def test_apply():
"""
Test apply function with Gaussian
"""
blur_manger = BlurManager("gaussian", [(7, 7)])
blur_manger.apply(test_image_colour)
assert blur_manger.blur_type is "gaussian"
assert blur_manger.kernel_size == [(7, 7)]
def test_apply_2():
"""
Test apply function with Median
"""
blur_manger = BlurManager("median", [3])
blur_manger.apply(test_image_colour)
assert blur_manger.blur_type is "median"
assert blur_manger.kernel_size == [3]
def test_apply_3():
"""
Test apply function with Normal
"""
blur_manger = BlurManager("normal", [(3, 3)])
blur_manger.apply(test_image_colour)
assert blur_manger.blur_type is "normal"
assert blur_manger.kernel_size == [(3, 3)]
def test_apply_4():
"""
Test apply function with Incorrect value
"""
blur_manger = BlurManager("Mango", [(3, 3)])
with pytest.raises(NameError):
blur_manger.apply(test_image_colour)
def test_apply_5():
"""
Test apply function with Invalid Blur kernel for Normal blur (Not a valid list)
"""
blur_manger = BlurManager("normal", (3, 3))
with pytest.raises(TypeError):
blur_manger.apply(test_image_colour)
def test_apply_6():
"""
Test apply function with Invalid Blur kernel for Normal blur (Not valid length)
"""
blur_manger = BlurManager("normal", [(3, 3, 6)])
with pytest.raises(ValueError):
blur_manger.apply(test_image_colour)
def test_apply_7():
"""
Test apply function with Invalid Blur kernel for Normal blur (Not valid length)
"""
blur_manger = BlurManager("normal", [3])
with pytest.raises(TypeError):
blur_manger.apply(test_image_colour)
def test_apply_8():
"""
Test apply function with Invalid Blur kernel for Gaussian blur (Not a valid list)
"""
blur_manger = BlurManager("gaussian", (3, 3))
with pytest.raises(TypeError):
blur_manger.apply(test_image_colour)
def test_apply_9():
"""
Test apply function with Invalid Blur kernel for Gaussian blur (Not valid length)
"""
blur_manger = BlurManager("gaussian", [(3, 3, 6)])
with pytest.raises(ValueError):
blur_manger.apply(test_image_colour)
def test_apply_10():
"""
Test apply function with Invalid Blur kernel for Gaussian blur (Not valid length)
"""
blur_manger = BlurManager("gaussian", [3])
with pytest.raises(TypeError):
blur_manger.apply(test_image_colour)
def test_apply_11():
"""
Test apply function with Invalid Blur kernel for Median blur (Not a valid int)
"""
blur_manger = BlurManager("median", ('A', 3))
with pytest.raises(TypeError):
blur_manger.apply(test_image_colour)
def test_apply_12():
"""
Test apply function with Invalid Blur kernel for Median blur (Not of length 1)
"""
blur_manger = BlurManager("median", (3, 3))
with pytest.raises(ValueError):
blur_manger.apply(test_image_colour)
| true |
0d2a0733deaf7b3bff8948a8712c40964e2e4262 | Python | SamHashemiCA/image-registration-cnn | /utils/dataset.py | UTF-8 | 597 | 2.5625 | 3 | [] | no_license | from torch.utils.data import Dataset
import os
class CTScanDataset(Dataset):
'''
__getitem__ returns the 3D numpy arrays pair (source,target)
for the given index after applying the specified transforms.
Waiting for data access approval from
NCTN/NCORP Data Archive to implement the function.
'''
def __init__(self, data_dir, transform=None):
super(CTScanDataset, self).__init__()
self.dir = data_dir
self.transform = transform
def __len__(self):
return len(os.listdir(self.dir))//2
def __getitem__(self, index):
pass
| true |
d7b01c27e12d35434e8c0063f42294aac8abe2e1 | Python | Amit006/Python-competitive | /practice/dog.py | UTF-8 | 218 | 2.671875 | 3 | [] | no_license | from pet import pet;
class dog(pet):
def __init__(self,name,chases_cats):
pet.__init__(self,name,"dog")
self.chases_cats=chases_cats
def chasesCats(self):
return self.chases_cats
| true |
cfa5665ee8b91a455a1e279a6fb8423124c8030d | Python | rhosse/Team-Lyrical | /data_lemmatization.py | UTF-8 | 2,071 | 3.140625 | 3 | [] | no_license | '''
data_lemmatization.py
Data lemmatization generator
keeps only noun, adj, verb, adverb
1. read in Data.csv
2. tokenize using gensim
3. run function to lemmatize using SpaCy
4. send lemmatization to output for input to topic modeling code (e.g., Script_TM_30.py)
'''
import numpy as np
import pandas as pd
import re, nltk, spacy, gensim
# Sklearn
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from pprint import pprint
# Plotting tools
import pyLDAvis
import pyLDAvis.sklearn
import matplotlib.pyplot as plt
#%matplotlib inline
# Import Dataset
df = pd.read_csv('/home/ubuntu/Data.csv', delimiter = ',')
#df = df.sample(n=500000)
print(df.shape)
df.dropna()
#tokenize
def sent_to_words(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
data_words = list(sent_to_words(data))
print(data_words[:2])
#Lemmatization
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append(" ".join([token.lemma_ if token.lemma_ not in ['-PRON-'] else '' for token in doc if token.pos_ in allowed_postags]))
return texts_out
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# Run in terminal: python3 -m spacy download en
#!python spacy download en_core_web_sm
#in cmd line type python -m spacy.en.download all
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# Do lemmatization keeping only Noun, Adj, Verb, Adverb
data_lemmatized = lemmatization(data_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
import pickle
with open('data_lemmatized_output2','wb') as fp:
pickle.dump(data_lemmatized, fp)
print(data_lemmatized[:2])
| true |
6361cfec83834818df703d6824955eff69ebcb8c | Python | frankbreetz/RealEstateScraping | /Scrape.py | UTF-8 | 2,547 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | import requests
from bs4 import BeautifulSoup
import pandas as pd
input = pd.read_csv('search_results.csv')
df = pd.DataFrame(columns=['Parcel Number',
'Name',
'Address',
'Sale Date',
'Sale Price',
"Year Built",
"Total Rooms",
"# Bedrooms",
"# Full Bathrooms",
"# Half Bathrooms",
"Last Sale Date",
"Last Sale Amount",
"Conveyance Number",
"Deed Type",
"Deed Number",
"# of Parcels Sold",
"Acreage",
"Board of Revision",
"Rental Registration",
"Homestead",
"Owner Occupancy Credit",
"Foreclosure",
"Special Assessments",
"Market Land Value",
"CAUV Value",
"Market Improvement Value",
"Market Total Value",
"TIF Value",
"Abated Value",
"Exempt Value",
"Taxes Paid",
"Tax as % of Total Value"])
entry = 0
for index, row in input.iterrows():
row_list = []
row_list.append(row['Parcel Number'])
row_list.append(row['Name'])
row_list.append(row['Address'])
row_list.append(row['Sale Date'])
row_list.append(row['Sale Price'])
#URL = 'https://wedge1.hcauditor.org/view/re/5920001000100/2019/summary'
URL = f'https://wedge1.hcauditor.org/view/re/{row["Parcel Number"].replace("-","")}/2019/summary'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
# print(soup.prettify())
results = soup.find_all('table', class_="datagrid ui-widget summary")
x = 0
for result in results:
rows = result.find_all('tr')
for row in rows:
cells = row.find_all('td')
for cell in cells:
x += 1
# print(x)
if x % 2 == 0:
row_list.append(cell.text)
df.loc[entry] = row_list
entry += 1
print(entry)
x = 1
df.to_csv('result.csv')
| true |
3196906c610ece84990ed1ab7772ddea2570c8f7 | Python | Manisha3112/Python-programs | /overloading.py | UTF-8 | 674 | 3.890625 | 4 | [] | no_license | class Welcome:
def wish(self, user_name=None):
if user_name is not None:
print('Hi ' + user_name)
else:
print('Hi')
def product(self,a=None,b=None):
if a!=None and b!=None:
print("Product= ",(a*b))
elif a!=None:
num=int(input("please enter b value = "))
print("Product=",(a*num))
else:
num1=int(input("please enter a value = "))
num2=int(input("please enter b value = "))
print("Product=",(num1*num2))
obj =Welcome()
obj.wish()
obj.wish('Manisha')
obj.product(10,20)
obj.product()
obj.product(10)
| true |
76e33f74a5a163aefc9f6a933423c6eb35c83e2c | Python | krishna07210/com-python-core-repo | /src/main/py/04-Loops/For-Loop.py | UTF-8 | 298 | 3.109375 | 3 | [] | no_license | #!/usr/bin/python3
def main():
fh = open('lines.txt')
for line in fh.readlines():
print(line, end='')
print('\n')
for line in [1, 2, 3, 4, 5, 6]:
print(line, end='')
print('\n')
for line in 'string':
print(line)
if __name__ == "__main__": main()
| true |
862b4236619e88d0a6f1087198bb334e97233473 | Python | webdagger/rfrp | /BE/Face/face/face.py | UTF-8 | 1,997 | 2.59375 | 3 | [] | no_license | import os
import pickle
import sys
import tempfile
from exceptions import ImageManipulationError, FaceRecognitionExeption
import face_encodings
import face_locations
import face_recognition
import numpy as np
from image_manipulation import oriented_thumbnail
from PIL import Image
# Load face encodings
try:
with open("dataset_faces.dat", "rb") as f:
all_face_encodings = pickle.load(f)
except FileNotFoundError:
all_face_encodings = {}
# Grab the list of names and the list of encodings
names = list(all_face_encodings.keys())
encodings = np.array(list(all_face_encodings.values()))
all_unknown_images = []
directory = "unknown/"
for root, dirs, files in os.walk(directory):
for file in files:
if file.lower().endswith(".jpg".lower()):
all_unknown_images.append(os.path.join(root, file))
for image in all_unknown_images:
try:
im = oriented_thumbnail(Image.open(image))
with tempfile.NamedTemporaryFile(mode="wb", suffix='.jpg', delete=True, prefix=os.path.basename(__file__)) as tf:
im.save(tf, im.format)
unknown_face_locations = face_locations.face_locations(tf.name, number_of_times_to_upsample=2, model="hog")
print(tf.name)
unknown_face = face_encodings.FaceEncodings(
tf.name, known_face_locations=unknown_face_locations
).get_encodings()
result = face_recognition.compare_faces(encodings, unknown_face)
print(result)
names_with_result = list(zip(names, result))
except ImageManipulationError as e:
print(e)
print(image)
print('odood')
pass
except FaceRecognitionExeption as e:
print(e)
print(image)
print('ljdjdjdj')
pass
except Exception as e:
print(image)
print(e)
#sys.exit(0)
pass
finally:
tf.close()
# Print the result as a list of names with True/False
print(names_with_result) | true |
906a337d1bd5a427696ceadba3e27e4d3076f101 | Python | vanessmeyer/quizproject | /quiz/views.py | UTF-8 | 4,342 | 3.15625 | 3 | [] | no_license | from django.shortcuts import render
#This import pulls in Quiz models so we can connect views to database data
from quiz.models import Quiz
from django.shortcuts import redirect
# Create your views here. These are view functions.
def startpage(request):
context = {
"quizzes": Quiz.objects.all(),
}
return render(request, "startpage.html", context)
# Why the quiz_number -1? Because quizzes is a list and therefore quiz #1 is the first one in the list (i.e. place 0)
def quiz(request, quiz_number):
context = {
"quiz": Quiz.objects.get(quiz_number=quiz_number),
"quiz_number": quiz_number,
}
return render(request, "quiz.html", context)
#The question view definition needs to first identify which quiz these questiosn belong to so it can use the quiz_number that with the urls.py to question view. We use quiz.objects.get to get out the quiz from the database.
#Then we save the quiz as a variable that we call quiz.
#Then we need to get out all the question from the list to a list by witting quiz.question.all().This will get out all the question that belong to that specific quiz.
# Then we use question_number, the number that comes with the question view, to get out the right "position" from the list of questions (i.e. each question has its own url).
def question(request, quiz_number, question_number):
quiz = Quiz.objects.get(quiz_number=quiz_number)
questions = quiz.questions.all()
question = questions[question_number - 1]
context = {
"question_number": question_number,
"question": question.question,
"answer1": question.answer1,
"answer2": question.answer2,
"answer3": question.answer3,
"quiz": quiz,
"quiz_number": quiz_number,
}
return render(request, "question.html", context)
# Vi ska räkna ut hur många rätt användaren hade genom att hämta ut alla frågor och deras rätta svar, och jämföra dem med de som finns sparade i sessionen. Allt detta gör vi genom att ändra i quizprojejct/quiz/views.py i completed-vyn:
def completed(request, quiz_number):
quiz = Quiz.objects.get(quiz_number=quiz_number)
questions = list(quiz.questions.all())
saved_answers = request.session.get(str(quiz_number), {})
# Then we need to loop through the questions one by one and compared them with the right answers. When a correct answer is found we increase num_correct_answers with 1.
num_correct_answers = 0
for question_number, answer in saved_answers.items():
correct_answer = questions[int(question_number) -1].correct
if correct_answer == answer:
num_correct_answers = num_correct_answers + 1
# change context-variabeln i completed så att den skickar tillbaka vilka svar som var korrekta såhär:
num_questions = quiz.questions.count()
context = {
"correct": num_correct_answers,
"total": num_questions,
}
return render(request, "completed.html", context)
# Here we define a new view (i.e. logic) for when a user answers to the quiz. This logic defines what we will do with those answers.
# The request.POST gets out the choice the user made to the question.
# Then we get out the dictionary with all the user's answers so far and save the users latest answer to that list (i.e. saved_answers).
# We then save thier latest answer and connect to the question they answered (i.e. saved_answers connects to question_number)
# Then we save the whole list with the questions the user has answered.
# Finally, we redirect the user to the next question by taking the question we are on (i.e. question_page) and adding 1 (i.e +1) to the question_number
def answer(request, quiz_number, question_number):
answer = request.POST["answer"]
saved_answers = request.session.get(str(quiz_number), {})
saved_answers[question_number] = int(answer)
request.session[quiz_number] = saved_answers
# If the user has answerd the last question, then you need to send them to the completed page. This is the command for that. The if-statement basically says if the total count of questions is less than the question number the user is on, then send the user to the completed_page, otherwise send them to the next question.
quiz = Quiz.objects.get(quiz_number=quiz_number)
num_questions = quiz.questions.count()
if num_questions <= question_number:
return redirect("completed_page", quiz_number)
else:
return redirect("question_page", quiz_number, question_number + 1) | true |
1f6d5c4ddf4bee22da572d1d29605502b8fcff46 | Python | MarcioPorto/rlib | /rlib/algorithms/maddpg/agent.py | UTF-8 | 10,969 | 2.6875 | 3 | [
"MIT"
] | permissive | import copy
import os
import random
from collections import namedtuple, deque
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from rlib.algorithms.base import Agent
from rlib.algorithms.maddpg.model import Actor, Critic
from rlib.shared.noise import OUNoise
from rlib.shared.replay_buffer import ReplayBuffer
from rlib.shared.utils import hard_update, soft_update
class MADDPGAgent(Agent):
"""MADDPG implementation."""
REQUIRED_HYPERPARAMETERS = {
"buffer_size": int(1e6),
"batch_size": 64,
"gamma": 0.99,
"tau": 1e-3,
"learning_rate_actor": 1e-4,
"learning_rate_critic": 1e-3,
"weight_decay": 1e-2,
"learn_every": 4,
"hard_update_every": 5
}
def __init__(self,
state_size,
action_size,
num_agents,
agents=None,
new_hyperparameters=None,
seed=0,
device="cpu",
model_output_dir=None,
enable_logger=False,
logger_path=None,
logger_comment=None,
opt_soft_update=False):
"""Initialize a MADDPGAgent wrapper.
Args:
state_size (int): dimension of each state
action_size (int): dimension of each action
num_agents (int): the number of agents in the environment
"""
raise NotImplementedError()
super(DDPG, self).__init__(
new_hyperparameters=new_hyperparameters,
enable_logger=enable_logger,
logger_path=logger_path,
logger_comment=logger_comment
)
self.state_size = state_size
self.action_size = action_size
self.num_agents = num_agents
self.seed = random.seed(seed)
self.device = device
self.time_step = 0
if agents:
self.agents = agents
else:
self.agents = [DDPGAgent(state_size, action_size, agent_id=i+1, handler=self) for i in range(num_agents)]
# Replay memory
self.memory = ReplayBuffer(self.BUFFER_SIZE, self.BATCH_SIZE, self.device, seed)
# User options
self.opt_soft_update = opt_soft_update
self.model_output_dir = model_output_dir
def reset(self):
"""Resets OU Noise for each agent."""
for agent in self.agents:
agent.reset()
def act(self, observations, add_noise=False, logger=None):
"""Picks an action for each agent given their individual observations
and the current policy."""
actions = []
for agent, observation in zip(self.agents, observations):
action = agent.act(observation, add_noise=add_noise)
actions.append(action)
return np.array(actions)
def step(self, observations, actions, rewards, next_observations, dones, logger=None):
"""Save experience in replay memory, and use random sample from buffer to learn."""
observations = observations.reshape(1, -1)
actions = actions.reshape(1, -1)
next_observations = next_observations.reshape(1, -1)
self.memory.add(observations, actions, rewards, next_observations, dones)
# Learn every `learn_every` time steps
self.time_step += 1
if self.time_step % self.LEARN_EVERY == 0:
if len(self.memory) > self.BATCH_SIZE:
for a_i, agent in enumerate(self.agents):
experiences = self.memory.sample()
self.learn(experiences, a_i, logger=logger)
def learn(self, experiences, agent_number, logger=None):
"""Helper to pick actions from each agent for the `experiences` tuple that
will be used to update the weights to agent with ID = `agent_number`.
Each observation in the `experiences` tuple contains observations from each
agent, so before using the tuple of update the weights of an agent, we need
all agents to contribute in generating `next_actions` and `actions_pred`.
This happens because the critic will take as its input the combined
observations and actions from all agents."""
next_actions = []
actions_pred = []
states, _, _, next_states, _ = experiences
next_states = next_states.reshape(-1, self.num_agents, self.state_size)
states = states.reshape(-1, self.num_agents, self.state_size)
for a_i, agent in enumerate(self.agents):
agent_id_tensor = self._get_agent_number(a_i)
state = states.index_select(1, agent_id_tensor).squeeze(1)
next_state = next_states.index_select(1, agent_id_tensor).squeeze(1)
next_actions.append(agent.actor_target(next_state))
actions_pred.append(agent.actor_local(state))
next_actions = torch.cat(next_actions, dim=1).to(device)
actions_pred = torch.cat(actions_pred, dim=1).to(device)
agent = self.agents[agent_number]
agent.learn(experiences, next_actions, actions_pred, logger=logger)
def _get_agent_number(self, i):
"""Helper to get an agent's number as a Torch tensor."""
return torch.tensor([i]).to(device)
class DDPGAgent(Agent):
"""Interacts with and learns from the environment."""
def __init__(self,
state_size,
action_size,
agent_id,
handler,
actor_local=None,
actor_target=None,
actor_optimizer=None,
critic_local=None,
critic_target=None,
critic_optimizer=None,
seed=0,
device="cpu"):
"""Initialize a DDPGAgent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
agent_id (int): identifier for this agent
"""
self.state_size = state_size
self.action_size = action_size
self.agent_id = agent_id
self.seed = random.seed(seed)
self.device = device
# Actor Network (w/ Target Network)
self.actor_local = actor_local if actor_local else Actor(state_size, action_size, seed).to(device)
self.actor_target = actor_target if actor_target else Actor(state_size, action_size, seed).to(device)
self.actor_optimizer = actor_optimizer if actor_optimizer else optim.Adam(self.actor_local.parameters(), lr=self.handler.LEARNING_RATE_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = critic_local if critic_local else Critic(state_size, action_size, seed).to(device)
self.critic_target = critic_target if critic_target else Critic(state_size, action_size, seed).to(device)
self.critic_optimizer = critic_optimizer if critic_optimizer else optim.Adam(self.critic_local.parameters(), lr=self.handler.LEARNING_RATE_CRITIC, weight_decay=self.handler.WEIGHT_DECAY)
self.noise = OUNoise(action_size)
self.noise_amplification = self.handler.NOISE_AMPLIFICATION
self.noise_amplification_decay = self.handler.NOISE_AMPLIFICATION_DECAY
# Ensure local and target networks have the same initial weight
hard_update(self.actor_local, self.actor_target)
hard_update(self.critic_local, self.critic_target)
def __str__(self):
"""Helper to print network architecture for this agent's actors and critics."""
print("Agent #{}".format(self.agent_id))
print("Actor (Local):")
print(self.actor_local)
print("Actor (Target):")
print(self.actor_target)
print("Critic (Local):")
print(self.critic_local)
print("Critic (Target):")
print(self.critic_target)
if self.agent_id != NUM_AGENTS:
print("_______________________________________________________________")
def act(self, state, add_noise=False, logger=None):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
self._decay_noise_amplification()
return np.clip(action, -1, 1)
def learn(self, experiences, next_actions, actions_pred, logger=None):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(next_state) -> action
critic_target(next_state, next_action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
next_actions (list): next actions computed from each agent
actions_pred (list): prediction for actions for current states from each agent
"""
states, actions, rewards, next_states, dones = experiences
agent_id_tensor = torch.tensor([self.agent_id - 1]).to(device)
### Update critic
self.critic_optimizer.zero_grad()
Q_targets_next = self.critic_target(next_states, next_actions)
Q_targets = rewards.index_select(1, agent_id_tensor) + (self.handler.GAMMA * Q_targets_next * (1 - dones.index_select(1, agent_id_tensor)))
Q_expected = self.critic_local(states, actions)
# Minimize the loss
critic_loss = F.mse_loss(Q_expected, Q_targets)
critic_loss.backward()
self.critic_optimizer.step()
### Update actor
self.actor_optimizer.zero_grad()
# Minimize the loss
actor_loss = -self.critic_local(states, actions_pred).mean()
actor_loss.backward()
self.actor_optimizer.step()
### Update target networks
if self.opt_soft_update:
soft_update(self.actor_local, self.actor_target, self.handler.TAU)
soft_update(self.critic_local, self.critic_target, self.handler.TAU)
elif self.time_step % self.handler.HARD_UPDATE_EVERY == 0:
hard_update(self.actor_local, self.actor_target)
hard_update(self.critic_local, self.critic_target)
if logger:
actor_loss = actor_loss.cpu().detach().item()
critic_loss = critic_loss.cpu().detach().item()
logger.add_scalars(
'loss', {
"actor loss": actor_loss,
"critic loss": critic_loss,
}, self.time_step
)
def _decay_noise_amplification(self):
"""Helper for decaying exploration noise amplification."""
self.noise_amplification *= self.noise_amplification_decay
| true |
686d24820e3134cac7a74bf9859e80e5521405c6 | Python | Le-Bot/cerebro | /cerebro/neuron/manager.py | UTF-8 | 1,233 | 2.671875 | 3 | [
"MIT"
] | permissive | import abc
import constants as const
class AbstractManager(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_all(self):
raise NotImplementedError()
@abc.abstractmethod
def add(self, obj):
raise NotImplementedError()
@abc.abstractmethod
def is_valid(self, neuron):
raise NotImplementedError()
@abc.abstractmethod
def get(self, keyword):
raise NotImplementedError()
@abc.abstractmethod
def is_exists(self, keyword):
raise NotImplementedError()
@abc.abstractmethod
def execute(self, keyword, args=None):
raise NotImplementedError()
class NeuronsManager(AbstractManager):
def __init__(self):
self.neurons = {}
def get_all(self):
return self.neurons
def add(self, obj):
self.neurons.update(obj)
def is_valid(self, neuron):
return hasattr(neuron, const.STR_KEYWORDS)
def get(self, keyword):
return self.neurons.get(keyword)
def is_exists(self, keyword):
return self.neurons.has_key(keyword)
def execute(self, keyword, args=None):
return self.get(keyword)(args) if self.is_exists(keyword) else const.STR_DEFAULT_RESPONSE
| true |
7c33994fd688d2659d19ca672c12940695f79826 | Python | LinSiCong/smallTools | /dealImage/PngToJpg.py | UTF-8 | 741 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018/9/20
# @Time : 13:14
# @Author : LinSicong
# @File : PngToJpg.py
"""
将同目录下所有png文件保存为jpg文件
"""
import os
import cv2
if __name__ == '__main__':
work_dir = os.getcwd()
convert_dir = os.path.join(work_dir, "convertFile")
if not os.path.exists(convert_dir):
os.makedirs(convert_dir)
file_list = os.listdir(work_dir)
cot = 0
for name in file_list:
if os.path.splitext(name)[-1] == ".png":
img = cv2.imread(os.path.join(work_dir, name))
cv2.imwrite(os.path.join(convert_dir, name.replace(".png",".jpg")), img)
cot = cot + 1
print("Convert " + str(cot) + " images.")
| true |
b95dc321385fa68846c6b58bad3bb5bf6e668c6f | Python | gensyu/Mirai-con | /debug_comm.py | UTF-8 | 6,741 | 2.734375 | 3 | [] | no_license | import serial
import time
import construct as cs
from enum import Enum
RES_SIZE = 26 #byte
class ResponceError(Exception):
pass
class MOTORDIR(Enum):
CW = 0x01
CCW = 0x00
class DRIVINGMODE(Enum):
Disable: 0
LINETRACE: 1
TOF: 2
sendfmt = cs.BitStruct(
"header" / cs.Bytewise(cs.Const(b"\xAA")),
"rw" / cs.BitsInteger(1),
"addr" / cs.BitsInteger(7),
"wdata" / cs.BitsInteger(16),
)
recvfmt = cs.BitStruct(
"header" / cs.Bytewise(cs.Const(b'\xAA')),
"pad" / cs.Padding(1),
"addr" / cs.BitsInteger(7),
"rdata" / cs.BitsInteger(16),
"dammy" / cs.Bytewise(cs.Bytes(1)),
"tof_f" / cs.BitsInteger(16),
"tof_r" / cs.BitsInteger(16),
"tof_l" / cs.BitsInteger(16),
"tof_b" / cs.BitsInteger(16),
"mag_x" / cs.BitsInteger(16, signed=True),
"mag_y" / cs.BitsInteger(16, signed=True),
"mag_z" / cs.BitsInteger(16, signed=True),
"acc_x" / cs.BitsInteger(16, signed=True),
"acc_y" / cs.BitsInteger(16, signed=True),
"acc_z" / cs.BitsInteger(16, signed=True),
# "reserved" / cs.Bytewise(cs.Const(b'\x00\x00')),
"sum" / cs.BitsInteger(8)
)
def cal_checksum(data):
c_sum = 0
for byte_data in data:
c_sum = (c_sum + byte_data) % 256
return c_sum.to_bytes(1, 'big')
class CarDevice:
def __init__(self, port="/dev/ttyS0", baudrate=500000) -> None:
self._serial = serial.Serial(port=port, baudrate=baudrate)
self.software_reset()
def _send_frame(self, rw:int, addr:int, wdata:int,):
"""Send UART Frame - MOSI(Raspi→Arduino)
Args:
rw (int): 0x1: read, 0x0: write
addr (int): Register Address[7bit]. ex 0x00
wdata (int): Write Data[2byte]. 0x0000
Returns:
[int]: Number of bytes written.
"""
sendframe = sendfmt.build(dict(rw=rw, addr=addr, wdata=wdata))
sendframe = sendframe + cal_checksum(sendframe)
return self._serial.write(sendframe)
def _recv_frame(self):
"""Recive UART Frame - MISO(Arduino→Raspi)
Returns:
[dict like]: Responce Data
"""
cnt=0
recvframe = b""
while True:
recvframe = recvframe + self._serial.read(RES_SIZE)
if len(recvframe) >= 26:
break
cnt = cnt + 1
if cnt >= 10:
raise ResponceError("can't get UART Responce")
# print(recvframe)
return recvfmt.parse(recvframe)
def _query_frame(self, rw:int, addr:int, wdata:int, wait_time=20e-3):
"""Send UART Frame, after that recive UART frame
Args:
rw (int): 0x1: read, 0x0: write
addr (int): Register Address[7bit]. ex 0x00
wdata (int): Write Data[2byte]. 0x0000
wait_time ([float], optional): times sec. Defaults to 20e-3.
Returns:
[dict like]: Responce Data
"""
self._send_frame(rw, addr, wdata)
time.sleep(wait_time)
return self._recv_frame()
def get_sensordata(self) -> dict:
"""センサデータ取得
Returns:
[dict like]: Responce Data
"""
return self._query_frame(rw=0x1, addr=0x00, wdata=0x0000)
def software_reset(self):
"""ソフトウェアリセット実行 0x00
"""
self._send_frame(rw=0x0, addr=0x00, wdata=0x5A5A)
time.sleep(2)
self._serial.read_all()
def vsc3_enable(self):
"""リモコン操縦有効
"""
self._send_frame(rw=0x0, addr=0x01, wdata=0x01)
def vsc3_disable(self):
"""リモコン操縦無効
"""
self._send_frame(rw=0x0, addr=0x01, wdata=0x00)
def left_motor(self, direcion: int, duty: float):
"""左モータ制御 0x10
Parameters
----------
direcion : int
CW: 1, CCW: 0
duty : float
PWM duty
"""
duty_int = int(duty * (2**8-1))
if direcion == MOTORDIR.CW:
wdata = 0x0100 + duty_int
elif direcion == MOTORDIR.CCW:
wdata = 0x0000 + duty_int
else:
raise
self._send_frame(rw=0x0, addr=0x10, wdata=wdata)
def right_motor(self, direcion: int, duty: float):
"""右モータ制御 0x11
Parameters
----------
direcion : int
CW: 1, CCW: 0
duty : float
PWM duty
"""
duty_int = int(duty * (2**8-1))
if direcion == MOTORDIR.CW:
wdata = 0x0100 + duty_int
elif direcion == MOTORDIR.CCW:
wdata = 0x0000 + duty_int
else:
raise
self._send_frame(rw=0x0, addr=0x11, wdata=wdata)
def change_adm_mode(self, mode: int):
"""Auto Line Trace モード変更 0x20
Parameters
----------
mode : int
Disable: 0
Use Line sensor: 1
Use TOF sensor: 2
"""
wdata = [0x00, 0x10, 0x20][mode]
self._send_frame(rw=0x0, addr=0x20, wdata=wdata)
def linetrace_max_speed(self, duty: float):
"""LineTrace モード変更 0x21
Parameters
----------
duty : float
"""
duty_int = int(duty * (2^8-1))
self._send_frame(rw=0x0, addr=0x21, wdata=duty_int)
def change_auto_brake(self, mode: int, distance: int=50):
"""自動ブレーキモード変更 0x22
Parameters
----------
mode : int
Disable: 0, enable (by Front TOF): 1
distance : int, optional
threshold distance [mm], by default 50 mm
"""
if mode == 0:
wdata = 0x0000
else:
wdata = distance
self._send_frame(rw=0x0, addr=0x21, wdata=wdata)
def change_linetrace_threshold(self, pattern: int):
"""ADM Linetrace しきい値設定 0x23
Parameters
----------
pattern : int
"""
pass
def change_straight_tof_threshold(
self, left_distance: float, right_distance: float
):
"""ADM TOFセンサ しきい値設定 0x24, 0x25
Parameters
----------
left_distance [mm]: int
right_distance [mm]: int
"""
self._send_frame(rw=0x0, addr=0x24, wdata=left_distance)
self._send_frame(rw=0x0, addr=0x25, wdata=right_distance)
def change_cam_angle(self, angle: int=0):
"""カメラアングル変更 0x30
Parameters
----------
angle -90 to 90 [deg] : int
"""
wdata = int(angle * 127/90) + 128
self._send_frame(rw=0x0, addr=0x30, wdata=wdata)
| true |
7e0d169aa60813c22e164af1ee2f429d081a2b3b | Python | izgebayyurt/asteroids | /ship.py | UTF-8 | 7,488 | 3.28125 | 3 | [] | no_license | # Template by Bruce A Maxwell
# Fall 2018
# CS 152 Project 11
#
# Make an Asteroids-like ship move around
#
# slightly modified by Eric Aaron, Fall 2018
#
# import useful packages
import math
import time
import graphics as gr
import physics_objects as pho
# make a ship object, treat it as a ball
# but it needs to be able to rotate
# should probably have a parent rotator class that does most of this for you
class Ship(pho.Thing):
def __init__(self, win, x0=0, y0=0, mass=1, radius=3):
pho.Thing.__init__(self, win, "ball")
# could use pho.Thing.__init__(self, win, "ball", mass=mass, radius=radius) instead, if Thing.__init__ has those defaults
self.setMass(mass)
self.setRadius(radius)
self.setPosition([x0, y0])
# anchor point is by default the center of the ship/circle so we don't need it
self.angle = 0.
self.dangle = 0.
# visualization properties
# This is a two-part visualization
# the ship is a triangle
self.bodypts = [ (radius, 0),
(- radius*0.5, 1.732*radius*0.5),
(- radius*0.5, - 1.732*radius*0.5) ]
# the exhaust is another triangle
self.flamepts = [ (- radius*0.5, 0.5*radius),
(- radius*0.5, - 0.5*radius),
(- radius*1.732, 0) ]
self.corners = [[-1000,1000],[-1000,999],[-1000,998]]
self.scale = 10.
self.vis = []
self.drawn = False
# these are for handling the flicker of the exhaust
self.flickertime = 6
self.flicker = False
self.countdown = 0
# colors for flickers
self.flickerColors = ["yellow","orange"]
#########
# these functions are identical to the rotating block
# a smart coder would make a parent rotator class
# draw the object into the window
def draw(self):
for item in self.vis:
item.undraw()
self.render()
for item in self.vis:
item.draw(self.win)
self.drawn = True
# undraw the object from the window
def undraw(self):
for item in self.vis:
item.undraw()
self.drawn = False
# get and set the angle of the object
# these are unique to rotators
def getAngle(self):
return self.angle
# setAngle has to update the visualization
def setAngle(self, a):
self.angle = a
if self.drawn:
self.draw()
# get and set rotational velocity
def setRotVelocity(self, rv):
self.dangle = rv # degrees per second
def getRotVelocity(self):
return self.dangle
def getBodyPoints(self):
return self.corners[:]
def setBodyPoints(self,s):
self.corners = s
def getPosition(self):
return self.pos[:]
# incrementally rotate by da (in degrees)
# has to update the visualization
def rotate(self, da):
self.angle += da
if self.drawn:
self.draw()
# special ship methods
def setFlickerOn(self, countdown = 20):
self.flicker = True
self.countdown = countdown
def setFlickerOff(self):
self.countdown = 0
self.flicker = False
# simplified render function since the ship always rotates around its center
def render(self):
# get the cos and sin of the current orientation
theta = math.pi * self.angle / 180.
cth = math.cos(theta)
sth = math.sin(theta)
# rotate each point around the object's center
pts = []
cornerpts = []
for vertex in self.bodypts + self.flamepts:
# move the object's center to 0, 0, which it is already in model coordinates
xt = vertex[0]
yt = vertex[1]
# rotate the vertex by theta around the Z axis
xtt = cth*xt - sth*yt
ytt = sth*xt + cth*yt
# move the object's center back to its original location
xf = xtt + self.pos[0]
yf = ytt + self.pos[1]
# create a point with the screen space coordinates
pts.append( gr.Point(self.scale * xf, self.win.getHeight() - self.scale * yf) )
cornerpts.append([xf,yf])
# make the two objects
self.corners = cornerpts[:3]
self.vis = [ gr.Polygon( pts[:3] ), gr.Polygon( pts[3:] ) ]
self.vis[0].setFill("silver")
self.vis[0].setOutline("dark red")
self.vis[1].setOutline(self.flickerColors[0])
def setFlickerColor(self,colors): # sets the flicker colors to colors
self.flickerColors = colors
def lifeModifier(self): # undraws the flames for life graphics
try:
self.vis[1].undraw()
except IndexError:
return
# update the various state variables
# add a unique flicker touch
def update(self, dt):
# update the angle based on rotational velocity
da = self.dangle * dt
if da != 0.0: # don't bother updating if we don't have to
self.rotate( da )
else: # but re-draw the ship anyway
if self.drawn:
self.draw()
# flicker the flames
# this should be a field of the object
if self.flicker and self.countdown > 0:
if self.countdown % self.flickertime < self.flickertime/2:
self.vis[1].setFill(self.flickerColors[0])
else:
self.vis[1].setFill(self.flickerColors[1])
self.countdown -= 1
else:
self.vis[1].undraw()
# call the parent update for the rest of it
pho.Thing.update(self, dt)
def main():
# make a window
win = gr.GraphWin('Ship', 500, 500, False)
# make ship, draw it, wait for a mouse click
ship = Ship(win, 25, 25)
ship.draw()
gamma = 10 # incrementation of rotational velocity
delta = 1 # incrementation of acceleration
winWidth = 50 # width of the window (scale is 10 so its 500/10)
winHeight = 50 # height of the window (scale is 10 so its 500/10)
dt = 0.01
frame = 0
key = ""
while key != "q":
key = win.checkKey()
ship.update(dt)
if key == "Left":
ship.setRotVelocity(ship.getRotVelocity()+gamma)
ship.setFlickerOn()
elif key == "Right":
ship.setRotVelocity(ship.getRotVelocity()-gamma)
ship.setFlickerOn()
elif key == "space":
a = ship.getAngle()
theta = a * math.pi / 180
v = ship.getVelocity()
v_new_x = v[0] + math.cos(theta) * delta
v_new_y = v[1] + math.sin(theta) * delta
ship.setVelocity([v_new_x,v_new_y])
ship.setFlickerOn()
moveit = False
p = ship.getPosition()
if p[0] < 0:
p[0] += win.getWidth()/10
moveit = True
elif p[0] > win.getWidth()/10:
p[0] -= win.getWidth()/10
moveit = True
if p[1] < 0:
p[1] += win.getHeight()/10
moveit = True
elif p[1] > win.getHeight()/10:
p[1] -= win.getHeight()/10
moveit = True
if moveit:
ship.setPosition(p)
moveit = False
frame += 1
if frame % 10 == 0:
win.update()
time.sleep(dt*0.5)
# all done
win.close()
if __name__ == "__main__":
main()
| true |
629f37285b5ea58b2ded9c4c670a11813975afec | Python | junzhang19/CS7641 | /HW3/DimensionReduction.py | UTF-8 | 6,504 | 2.609375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import scipy.sparse as sps
import matplotlib.pyplot as plt
from collections import defaultdict
from itertools import product
from matplotlib.ticker import MaxNLocator
from scipy.linalg import pinv
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
from sklearn.random_projection import SparseRandomProjection
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics.pairwise import pairwise_distances
rand_state=20
class PCA_DR():
def __init__(self, data, target, dim_param, title, plot=False):
self.data = data
self.target = target
self.dim_param = dim_param
self.gen_plot = plot
self.title = title
def run(self, param):
pca = PCA(n_components=param, random_state=rand_state)
newdata = pca.fit_transform(self.data)
return newdata
def tester(self):
for k in self.dim_param:
pca = PCA(n_components=k)
pca.fit_transform(self.data)
eigenvalue = pca.explained_variance_
explainedRatio = pca.explained_variance_ratio_
if self.gen_plot:
self.plot(eigenvalue, explainedRatio)
def plot(self, eigenvalue, explainedRatio):
#Eigenvalue
ax = plt.figure().gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.plot(self.dim_param, eigenvalue, 'o-')
plt.xlabel('n_components')
plt.ylabel('Eigenvalue')
plt.title(self.title + '-PCA-Eigenvalue')
plt.savefig('../plots/'+ self.title + '-PCA-Eigenvalue')
#ratios explained
ax = plt.figure().gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.plot(self.dim_param, explainedRatio, 'o-')
plt.xlabel('n_components')
plt.ylabel('Explained Varience Ratio')
plt.title(self.title + '-PCA-Explained_Ratio')
plt.savefig('../plots/'+ self.title + '-PCA-Explained_Ratio')
class ICA_DR():
def __init__(self, data, target, dim_param, title, plot=False):
self.data = data
self.target = target
self.dim_param = dim_param
self.gen_plot = plot
self.title = title
def run(self, param):
ica = FastICA(n_components=param, random_state=rand_state)
newdata = ica.fit_transform(self.data)
return newdata
def tester(self):
kurtosis = {}
for k in self.dim_param:
ica = FastICA(n_components=k)
tmp = ica.fit_transform(self.data)
tmp = pd.DataFrame(tmp)
tmp = tmp.kurt(axis=0)
kurtosis[k] = tmp.abs().mean()
kurtosis = pd.Series(kurtosis)
if self.gen_plot:
self.plot(kurtosis)
def plot(self, kurtosis):
#Kurtosis
ax = plt.figure().gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.plot(self.dim_param, kurtosis, 'o-')
plt.xlabel('n_components')
plt.ylabel('Kurtosis')
plt.title(self.title + '-ICA-Kurtosis')
plt.savefig('../plots/'+ self.title + '-ICA-Kurtosis')
class RP_DR(): ##tried different random states
def __init__(self, data, target, dim_param, title, plot=False):
self.data = data
self.target = target
self.dim_param = dim_param
self.gen_plot = plot
self.title = title
def run(self, param):
rp = SparseRandomProjection(n_components=param, random_state=rand_state)
newdata = rp.fit_transform(self.data)
return newdata
#refer to Chad's code
def pairwise_dist_corr(self, x1, x2):
assert x1.shape[0] == x2.shape[0]
d1 = pairwise_distances(x1)
d2 = pairwise_distances(x2)
return np.corrcoef(d1.ravel(), d2.ravel())[0, 1]
def reconstruction_error(self, projection, x):
w = projection.components_
if sps.issparse(w):
w = w.todense()
p = pinv(w)
reconstructed = ((p@w)@(x.T)).T #Unproject projected data
x = np.matrix(x)
errors = np.square(x - reconstructed)
return np.nanmean(errors)
def tester(self):
corr = defaultdict(dict)
err = defaultdict(dict)
for i, k in product(range(5), self.dim_param):
rp = SparseRandomProjection(random_state=i, n_components=k)
corr[k][i] = self.pairwise_dist_corr(rp.fit_transform(self.data), self.data)
err[k][i] = self.reconstruction_error(rp, self.data)
rp.components_
corr = pd.DataFrame(corr).T
err = pd.DataFrame(err).T
#print(corr[corr.columns[1]])
#print(err)
if self.gen_plot:
self.plot(err, corr)
def plot(self, err, corr):
#Reconstruction err
ax = plt.figure().gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.plot(self.dim_param, err, 'o-')
plt.xlabel('n_components')
plt.ylabel('Reconstruction Error')
plt.title(self.title + '-RP-Reconstruction_Err')
plt.savefig('../plots/'+ self.title + '-RP-Reconstruction_Err')
#Pairwise Dist Correlation
ax = plt.figure().gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.plot(self.dim_param, corr, 'o-')
plt.xlabel('n_components')
plt.ylabel('Pairwise Dist Corr')
plt.title(self.title + '-RP-Pairwise_Dist_Corr')
plt.savefig('../plots/'+ self.title + '-RP-Pairwise_Dist_Corr')
class RF_DR():
def __init__(self, data, target, dim_param, title, plot=False):
self.data = data
self.target = target
self.dim_param = dim_param
self.gen_plot = plot
self.title = title
def run(self, threshold):
rfc = RandomForestClassifier(n_estimators=100, class_weight='balanced', random_state=rand_state, n_jobs=1)
imp = rfc.fit(self.data, self.target).feature_importances_
imp = pd.DataFrame(imp,columns=['Feature Importance'], index=self.data.columns)
imp.sort_values(by=['Feature Importance'],inplace=True,ascending=False)
imp['CumSum'] = imp['Feature Importance'].cumsum()
imp = imp[imp['CumSum']<=threshold]
top_cols = imp.index.tolist()
newdata = self.data[top_cols]
return newdata
def tester(self):
rfc = RandomForestClassifier(n_estimators=100, class_weight='balanced', random_state=rand_state, n_jobs=1)
rfc = rfc.fit(self.data, self.target)
importances = rfc.feature_importances_
if self.gen_plot:
self.plot(importances)
def plot(self, importances):
#ratios explained
ax = plt.figure().gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.plot(range(1, self.data.shape[1]+1), importances, 'o-')
plt.xlabel('n_components')
plt.ylabel('Feature Importance')
plt.title(self.title + '-RF-Feature_Importance')
plt.savefig('../plots/'+ self.title + '-RF-Feature_Importance') | true |
af7925bea472facfc1ddf44780cb1a0f7c22122a | Python | movermeyer/nicedjango | /tests/a3/models.py | UTF-8 | 896 | 2.546875 | 3 | [
"MIT"
] | permissive | """
Multiple inheritance sample 2 from docs.
Notes:
* not more than one review per book looks like a wrong example.
* have to define review differently for 1.7, despite docs say this is for 1.7:
CommandError: System check identified some issues:
ERRORS:
a3.BookReview: (models.E005) The field 'piece_ptr' from parent model 'a3.book' clashes with the
field 'piece_ptr' from parent model 'a3.article'.
"""
from django import VERSION
from django.db import models
class Piece(models.Model):
class Meta:
app_label = 'a3'
class Article(Piece):
headline = models.CharField(max_length=10)
class Book(Piece):
title = models.CharField(max_length=10)
if VERSION[:2] == (1, 7):
class BookReview(Book):
article_ptr = models.OneToOneField(Article)
book_ptr = models.OneToOneField(Book)
else:
class BookReview(Book, Article):
pass
| true |
21ff94594cb23c9fc1987415f0cb8e83efed119a | Python | jdvpl/Python | /Universidad Nacional/monitorias/Ejercicios/diccionarios/diccionario.py | UTF-8 | 89 | 2.546875 | 3 | [] | no_license | ports={22:"ssh",23:"telner",80:"http"}
for k,v in ports.items():
print(f"{k} => {v}") | true |
2e8283f9091b5196020bcdcafd0b55536e129799 | Python | JunhoKim94/HEVEN_Path_Planning | /Database/Platform.py | UTF-8 | 7,262 | 2.53125 | 3 | [] | no_license | import time
import sys
import os
sys.path.append(os.path.dirname(__file__))
from Flag import Flag
import serial
class Platform:
def __init__(self, port, baud, flag: Flag):
self.__recv_data = SerialPacket()
self.__send_data = SerialPacket()
self.flag = flag
self.__platform_initializing_success = False
try:
self.__serial = serial.Serial(port, baud)
self.__platform_initializing_success = True
print("[Platform Intializing \tOk ]")
except serial.serialutil.SerialException as e:
print("[Platform Intializing \tFail] \tCheck your COMPORT: ", e)
def main(self):
if self.__platform_initializing_success:
time.sleep(1)
print("Start Platform \t- Success\n")
self.__run()
else:
print("Start Platform \t- Fail: \tPlatform doesn't initialize succeessfully. Therefore, Platform will not run.")
print("\t\t\t\t-->\tTerminate Platform")
def __run(self):
while not self.flag.system_stop:
if self.flag.platform_stop:
time.sleep(0.1)
else:
self.__send()
self.__read()
'''
https://github.com/HongBeenKim/pams-skku/blob/master/thinkingo/car_platform.py에 있는 코드인데 이해를 아직 못해서 주석으로 남겨둠
이후에, 이해 후 수정이 필요함.
if not self.data.debug_flag and self.data.read_packet.aorm == SerialPacket.AORM_MANUAL:
self.data.reset_to_default()
'''
time.sleep(0.1)
print("Terminating Platform")
self.__serial.close()
def __read(self):
try:
message = self.__serial.read(18)
self.__recv_data.read_bytes(message)
except Exception as e:
print("car_platform RECEIVE ERROR: ", e)
def __send(self):
self.__send_data.alive = self.__recv_data.alive
try:
self.__serial.write(self.__send_data.write_bytes())
except Exception as e:
print("car_platform SEND ERROR: ", e)
@property
def recv_data(self):
return self.__recv_data
@property
def send_data(self):
return self.__send_data
"""
통신 코드를 위한 시리얼 패킷 API
https://github.com/Jueun-Park/HEVEN_AutonomousCar_2018/blob/master/src/serial_packet.py
김진웅 (2018-05)
패킷 세부 형식(byte array)은 플랫폼 안내 책자 참조
"""
import sys
import os
sys.path.append(os.path.dirname(__file__))
import numpy as np
import struct
class SerialPacket(object):
START_BYTES = [0x53, 0x54, 0x58]
END_BYTES = [0x0D, 0x0A]
AORM_MANUAL = 0x00
AORM_AUTO = 0x01
AORM_DEFAULT = AORM_AUTO
ESTOP_OFF = 0x00
ESTOP_ON = 0x01
ESTOP_DEFAULT = ESTOP_OFF
GEAR_FORWARD = 0x00
GEAR_NEUTRAL = 0x01
GEAR_BACKWARD = 0x02
GEAR_DEFAULT = GEAR_FORWARD
SPEED_MIN = 0
STEER_MAXLEFT = -2000
STEER_STRAIGHT = 0
STEER_MAXRIGHT = 2000
BRAKE_NOBRAKE = 1
BRAKE_FULLBRAKE = 33
BRAKE_DEFAULT = BRAKE_NOBRAKE
BRAKE_MAXBRAKE = 200
def __init__(self, data=None, start_bytes=START_BYTES,
aorm=AORM_DEFAULT, estop=ESTOP_DEFAULT, gear=GEAR_DEFAULT,
speed=0, steer=0, brake=BRAKE_DEFAULT,
enc=0, alive=0,
end_bytes=END_BYTES):
if data is not None: self.read_bytes(data); return
self.start_bytes = start_bytes
self.aorm = aorm
self.estop = estop
self.gear = gear
self.speed = speed
self.steer = steer
self.brake = brake
self.enc = enc
self.alive = alive
self.end_bytes = end_bytes
def __setattr__(self, attr, v):
if attr == 'start_bytes': super().__setattr__(attr, np.array(v, np.uint8)); return
if attr == 'aorm': super().__setattr__(attr, np.uint8(v)); return
if attr == 'estop': super().__setattr__(attr, np.uint8(v)); return
if attr == 'gear': super().__setattr__(attr, np.uint8(v)); return
if attr == 'speed': super().__setattr__(attr, np.uint16(v)); return
if attr == 'steer': super().__setattr__(attr, np.int16(v)); return
if attr == 'brake': super().__setattr__(attr, np.uint8(v)); return
if attr == 'enc': super().__setattr__(attr, np.int32(v)); return
if attr == 'alive': super().__setattr__(attr, np.uint8(v)); return
if attr == 'end_bytes': super().__setattr__(attr, np.array(v, np.uint8)); return
super().__setattr__(attr, v)
def default(self):
self.start_bytes = SerialPacket.START_BYTES
self.aorm = SerialPacket.AORM_DEFAULT
self.estop = SerialPacket.ESTOP_DEFAULT
self.gear = SerialPacket.GEAR_DEFAULT
self.speed = SerialPacket.SPEED_MIN
self.steer = SerialPacket.STEER_STRAIGHT
self.brake = SerialPacket.BRAKE_DEFAULT
self.enc = 0
self.alive = 0
self.end_bytes = SerialPacket.END_BYTES
def get_attr(self, mode=None):
if mode is None:
return self.gear, self.speed, self.steer, self.brake
if mode == 'a':
return self.aorm, self.estop, self.gear, self.speed, self.steer, self.brake, self.enc, self.alive
if mode == 'ra':
return self.start_bytes, self.aorm, self.estop, self.gear, self.speed, self.steer, self.brake, self.enc, self.alive, self.end_bytes
return 'wrong mode'
def read_bytes(self, b):
if len(b) == 0:
return
try:
u = struct.unpack('<3sBBBHhBiB2s', b)
except Exception as e:
print('[SerialPacket| READ ERROR:', b, e)
print('-Set to default value]')
self.default()
return
self.start_bytes = bytearray(u[0])
self.aorm = u[1]
self.estop = u[2]
self.gear = u[3]
self.speed = u[4]
self.steer = u[5]
self.brake = u[6]
self.enc = u[7]
self.alive = u[8]
self.end_bytes = bytearray(u[9])
def write_bytes(self):
try:
b = struct.pack('!3sBBBHhBB2s', bytes(self.start_bytes), self.aorm, self.estop, self.gear, self.speed,
self.steer, self.brake, self.alive, bytes(self.end_bytes))
except:
print('[SerialPacket| WRITE ERROR]')
print('-Set to default value]')
self.default()
b = struct.pack('!3sBBBHhBB2s', bytes(self.start_bytes), self.aorm, self.estop, self.gear, self.speed,
self.steer, self.brake, self.alive, bytes(self.end_bytes))
return b
def verify(self):
if (self.start_bytes != SerialPacket.START_BYTES).any(): return False
if (self.end_bytes != SerialPacket.END_BYTES).any(): return False
return True
if __name__ == '__main__':
a = SerialPacket(bytearray.fromhex("53545800 00000000 00000100 00000000 0D0A"))
a.read_bytes(bytearray.fromhex("53545800 00000000 00000100 00000000 0D0A"))
a.default()
print(a.start_bytes, a.end_bytes)
print(str(a.write_bytes())) | true |
9acb3381ed62b1ee3a91ebdf9a2a60df33e6c020 | Python | cyrilwelschen/reservationen_package | /reservationen_package/push_to_dropbox.py | UTF-8 | 718 | 2.96875 | 3 | [
"MIT"
] | permissive | import dropbox
import os
from dropbox.files import WriteMode
class TransferData:
def __init__(self, access_token):
self.access_token = access_token
def upload_file(self, file_from, file_to):
"""upload a file to Dropbox using API v2
"""
dbx = dropbox.Dropbox(self.access_token)
with open(file_from, 'rb') as f:
dbx.files_upload(f.read(), file_to, mode=WriteMode('overwrite'))
def upload(local_file_path, access_token, dropbox_folder=""):
transfer_data = TransferData(access_token)
file_from = local_file_path
file_to = dropbox_folder + '/' + os.path.basename(local_file_path)
# API v2
transfer_data.upload_file(file_from, file_to)
| true |
9b4edb7c1d7cdb170de7b66731ff123906625eea | Python | brunopace/metaevo | /simples/artificial.py | UTF-8 | 10,169 | 2.65625 | 3 | [] | no_license | import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import math
import random as rndm
import time
ta = time.time()
N = 5000
numyears = 20000
eps = 0.05 #acima de 0.26 nao eh estavel
alpha = 0.06
T1 = 4
T2 = 3
L = 8
l = 3
deg = 1
method = 'fraction'
landscape = nx.Graph()
rho = {}
for i in range(L + deg):
rho.update({i:[]})
for i in range(L-1):
landscape.add_node(i)
landscape.add_node(L-1)
for i in range(deg):
landscape.add_node(L + i)
for i in range(L-1):
landscape.add_edge(i, i + 1)
landscape.add_edge(0,0)
for i in range(L - l - 1):
for j in range(deg):
landscape.add_edge(i + l, L + j)
nx.draw(landscape, node_color=['#0000ff','#bb00ee','#cc0044','#ee6600','#ff8800','#ffcc00','#ccee00','#00cc44','#666666'])
plt.show()
population = [N if k == 0 else 0 for k in range(L + deg)]
def printtofile():
for i in range(L + deg):
fobj = open('artificial/'+ method + '/'+ 'eps'+str(eps).split('.')[0]+str(eps).split('.')[1]+'N'+str(N)+'_'+str(i)+'.dat', 'a')
for t in range(len(rho[i])):
fobj.write(str(t+1) + ' ' + str(rho[i][t]) + '\n')
fobj.close()
def kill():
nichttot = sum(population)
p = [population[j] for j in range(L + deg)]
for o in range(nichttot):
if rndm.random() < alpha:
i = 0
fila = p[0]
while o > fila:
i += 1
fila += p[i]
while population[i] == 0:
i += 1
population[i] -= 1
def divideboth(method):
global population
numorgs = 0
nextpop = [0 for i in range(L + deg)]
for g in range(L):
for n in range(population[g]):
r = rndm.random()
if r < eps:
nextpop[rndm.choice(landscape.neighbors(g))] += 1
else:
nextpop[g] += 1
for k in population:
numorgs += k
for k in nextpop:
numorgs += k
matar = numorgs - N if numorgs > N else 0
if method == 'killall':
for k in range(matar):
y = -1
people = 0
guy = rndm.randint(1, N - k)
while people < guy:
y += 1
people += population[y]
population[y] -= 1
for i in range(L + deg):
population[i] += nextpop[i]
if method == 'overlap':
for i in range(L + deg):
population[i] += nextpop[i]
for k in range(matar):
y = -1
people = 0
guy = rndm.randint(1, N + matar - k)
while people < guy:
y += 1
people += population[y]
population[y] -= 1
if method == 'fraction':
empty = N - sum(population)
frac = float(empty)/N
nextgen = [0 for j in range(L + deg)]
for i in range(L + deg):
for c in range(nextpop[i]):
if rndm.random() < frac:
nextgen[i] += 1
## while nextgen[0] + nextgen[1] > empty:
## r = rndm.randint(0, nextgen[0] + nextgen[1] - 1)
## if r < nextgen[0]:
## nextgen[0] -= 1
## else:
## nextgen[1] -= 1
for i in range(L + deg):
population[i] += nextgen[i]
kill()
## nichttot = sum(population)
## p = [population[j] for j in range(L + deg)]
##
## for o in range(nichttot):
## if rndm.random() < alpha:
## i = 0
## fila = p[0]
## while o > fila:
## i += 1
## fila += p[i]
## while population[i] == 0:
## i += 1
##
## population[i] -= 1
def divide1(method):
global population
numorgs = 0
nextpop = [0 for i in range(L + deg)]
for g in range(L-1): #para todos os L-1 primeiros genotipos
for n in range(population[g]): #para todos os organismos com o g
r = rndm.random()
if r < eps:
nextpop[rndm.choice(landscape.neighbors(g))] += 1
else:
nextpop[g] += 1
for k in population:
numorgs += k
for k in nextpop:
numorgs += k
matar = numorgs - N if numorgs > N else 0
if method == 'killall':
for k in range(matar):
y = -1
people = 0
guy = rndm.randint(1, N - k)
while people < guy:
y += 1
people += population[y]
population[y] -= 1
for i in range(L + deg):
population[i] += nextpop[i]
if method == 'overlap':
for i in range(L + deg):
population[i] += nextpop[i]
for k in range(matar):
y = -1
people = 0
guy = rndm.randint(1, N + matar - k)
while people < guy:
y += 1
people += population[y]
population[y] -= 1
if method == 'fraction':
empty = N - sum(population)
frac = float(empty)/N
nextgen = [0 for j in range(L + deg)]
for i in range(L + deg):
for c in range(nextpop[i]):
if rndm.random() < frac:
nextgen[i] += 1
## while nextgen[0] + nextgen[1] > empty:
## r = rndm.randint(0, nextgen[0] + nextgen[1] - 1)
## if r < nextgen[0]:
## nextgen[0] -= 1
## else:
## nextgen[1] -= 1
for i in range(L + deg):
population[i] += nextgen[i]
kill()
## nichttot = sum(population)
## p = [population[j] for j in range(L + deg)]
##
## for o in range(nichttot):
## if rndm.random() < alpha:
## i = 0
## fila = p[0]
## while o > fila:
## i += 1
## fila += p[i]
## while population[i] == 0:
## i += 1
## population[i] -= 1
def divide2(method):
global population
numorgs = 0
nextpop = [0 for i in range(L + deg)]
for n in range(population[L-1]):
r = rndm.random()
if r < eps:
nextpop[rndm.choice(landscape.neighbors(L-1))] += 1
else:
nextpop[L-1] += 1
for k in population:
numorgs += k
for k in nextpop:
numorgs += k
matar = numorgs - N if numorgs > N else 0
if method == 'killall':
for k in range(matar):
y = -1
people = 0
guy = rndm.randint(1, N - k)
while people < guy:
y += 1
people += population[y]
population[y] -= 1
for i in range(L + deg):
population[i] += nextpop[i]
if method == 'overlap':
for i in range(L + deg):
population[i] += nextpop[i]
for k in range(matar):
y = -1
people = 0
guy = rndm.randint(1, N + matar - k)
while people < guy:
y += 1
people += population[y]
population[y] -= 1
if method == 'fraction':
empty = N - sum(population)
frac = float(empty)/N
nextgen = [0 for j in range(L + deg)]
for i in range(L + deg):
for c in range(nextpop[i]):
if rndm.random() < frac:
nextgen[i] += 1
## while nextgen[0] + nextgen[1] > empty:
## r = rndm.randint(0, nextgen[0] + nextgen[1] - 1)
## if r < nextgen[0]:
## nextgen[0] -= 1
## else:
## nextgen[1] -= 1
for i in range(L + deg):
population[i] += nextgen[i]
kill()
##
## nichttot = sum(population)
## p = [population[j] for j in range(L + deg)]
##
## for o in range(nichttot):
## if rndm.random() < alpha:
## i = 0
## fila = p[0]
## while o > fila:
## i += 1
## fila += p[i]
## while population[i] == 0:
## i += 1
## population[i] -= 1
def showplots():
plt.plot(range(len(rho[0])), rho[0], '#0000ff')
plt.plot(range(len(rho[1])), rho[1], '#bb00ee')
plt.plot(range(len(rho[2])), rho[2], '#cc0044')
plt.plot(range(len(rho[3])), rho[3], '#ee6600')
plt.plot(range(len(rho[4])), rho[4], '#ff8800')
plt.plot(range(len(rho[5])), rho[5], '#ffcc00')
plt.plot(range(len(rho[6])), rho[6], '#ccee00')
plt.plot(range(len(rho[7])), rho[7], '#00cc44')
plt.plot(range(len(rho[8])), rho[8], '#000000')
## plt.plot(range(len(rho[L-1])), rho[L-1])
## plt.plot(range(len(rho[L-2])), rho[L-2])
## for i in range(L + deg):
## if i == L-1 or i == 0 or i == L-2:
## continue
## plt.plot(range(len(rho[i])), rho[i])
plt.show()
def rhoupdate(population):
for i in range(L + deg):
rho[i].append(population[i])
def continuefor(numy, lastend):
for t in [lastend + r for r in range(numy)]:
rhoupdate(population)
if (t+1)%1000 == 0:
print 'ano = ' + str(t+1)
if (t+1)%T1 == 0 and (t+1)%T2 == 0:
divideboth(method)
continue
if (t+1)%T1 == 0:
divide1(method)
continue
if (t+1)%T2 == 0:
divide2(method)
continue
if method == 'fraction':
kill()
for t in range(numyears):
rhoupdate(population)
if (t+1)%1000 == 0:
print 'ano = ' + str(t+1)
if (t+1)%T1 == 0 and (t+1)%T2 == 0:
divideboth(method)
continue
if (t+1)%T1 == 0:
divide1(method)
continue
if (t+1)%T2 == 0:
divide2(method)
continue
if method == 'fraction':
kill()
tb = time.time()
print 'tempo de execucao: ' + str(tb-ta)
showplots()
| true |
1e03e30383d19fde111ea88c3b65f75401ed88ef | Python | goldader/lbypl | /json_iter.py | UTF-8 | 2,433 | 2.71875 | 3 | [] | no_license | """module to unpack Truelayer json responses into arrays or individual items"""
def depth(x):
if type(x) is dict and x:
return 1 + max(depth(x[a]) for a in x)
if type(x) is list and x:
return 1 + max(depth(a) for a in x)
return 0
def dict_generator2(indict, pre=None):
pre = pre[:] if pre else []
if isinstance(indict, dict):
for key, value in indict.items():
if isinstance(value, dict):
for d in dict_generator2(value, [key] + pre):
yield d
elif isinstance(value, list) or isinstance(value, tuple):
for v in value:
for d in dict_generator2(v, [key] + pre):
yield d
else:
yield pre + [key, value]
else:
yield pre + [indict]
def json_output(json_input):
#flattens nested json for use in various data management activities
a = dict_generator2(json_input)
dataset = {}
try:
a = dict_generator2(json_input)
previous=None
count=0
while True:
value = next(a)
if len(value)==3:
value[0]+=".%s" % value[1]
value.pop(1)
if value[0].lower()==previous:
count+=1
value[0]+="_%s" % count
else:
count=0
previous=value[0].lower()
#print(value)
dataset[value[0].lower()] = value[1]
except StopIteration:
pass
finally:
return(dataset)
"""
import sqlite3
from auth import Auth, access_token
import requests
import tbl_maint
conn = sqlite3.connect('/Users/jgoldader/lbypl.db')
c = conn.cursor()
tbl_maint.Tbl_maint('tl_user_card_accounts')
Auth('bill@fred.com')
user=Auth.uid
c.execute("select distinct provider_id from tl_accounts where user_id=?",[user])
token = access_token(c.fetchone()[0])
info_url="https://api.truelayer.com/data/v1/cards"
token_phrase="Bearer %s" % token
headers = {'Authorization': token_phrase}
z=requests.get(info_url, headers=headers)
all_results=z.json()
results=all_results['results']
print("Results len %s - %s" % (len(results),results))
for i in range(0,len(results)):
json_output_results=json_output(results[i])
print("Json Output len %s - %s" % (len(json_output_results),json_output_results))
tbl_maint.Tbl_maint.create_tbl(json_output_results)
break
""" | true |
de5119b010591ca59fdb6c7eae7ecdf04de441c8 | Python | bhyun/daily-algorithm | /2021/BOJ18290_NM과 K(1).py | UTF-8 | 1,172 | 2.859375 | 3 | [] | no_license | import sys
input = sys.stdin.readline
def dfs(x, y, cnt, summary):
global answer
if cnt == k:
if summary > answer:
answer = summary
return
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
for i in range(x, n):
for j in range(y if i == x else 0, m):
# (i, j)에서 인접한 위치 탐색
if i == x and j == y:
continue
flag = True
for r in range(4):
nx = i + dx[r]
ny = j + dy[r]
if 0 <= nx < n and 0 <= ny < m:
if visited[nx][ny]:
flag = False
break
if flag:
visited[i][j] = True
dfs(i, j, cnt + 1, summary + board[i][j])
visited[i][j] = False
n, m, k = map(int, input().split())
board = []
for _ in range(n):
board.append(list(map(int, input().split())))
visited = [[False] * m for _ in range(n)] # __#
answer = -sys.maxsize
for i in range(n):
for j in range(m):
visited[i][j] = True
dfs(i, j, 1, board[i][j])
visited[i][j] = False
print(answer) | true |
209c5a5a55e3d94ea267f93497054988b519ce2c | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2591/60705/255349.py | UTF-8 | 215 | 3.375 | 3 | [] | no_license | n = int(input())
a = [51, 105, 917]
b = [102, 109, 893, 103]
for i in range(0, n):
line = int(input())
if line in a:
print("Yes")
elif line in b:
print("No")
else:
print(line) | true |
a68ab8568991239b49fae17b229875886c0378ed | Python | daniel-reich/ubiquitous-fiesta | /39utPCHvtWqt5vaz9_10.py | UTF-8 | 161 | 3.109375 | 3 | [] | no_license |
def direction(lst):
nlst = []
for item in lst:
nlst.append(item.replace("e", "w").replace("E", "W").replace("a", "e").replace("A", "E"))
return nlst
| true |
029ebfaf5f827711536c3f63aa4de8bd82ef6a0f | Python | g-d-l/project_euler | /done/068.py | UTF-8 | 1,151 | 2.921875 | 3 | [] | no_license | import itertools
from sets import Set
def main():
ngon = 5
values = range(1, 11)
triples = [[0, 0, 0] for _ in xrange(ngon)]
result = ''
for assignment in itertools.permutations(range(1, 10), ngon):
for i in xrange(ngon - 1):
triples[i][1], triples[i][2] = assignment[i], assignment[i + 1]
triples[ngon - 1][1], triples[ngon - 1][2] = assignment[ngon - 1], assignment[0]
pair_sums = [sum(x) for x in triples]
seen = Set()
has_dups = False
for ps in pair_sums:
if ps in seen:
has_dups = True
break
else:
seen.add(ps)
if has_dups:
continue
if max(pair_sums) > min(pair_sums) + 9:
continue
externals = [x for x in values if x not in assignment]
line_sum = sum(pair_sums) / ngon + sum(externals) / len(externals)
for i in xrange(ngon):
triples[i][0] = line_sum - pair_sums[i]
start_index = pair_sums.index(max(pair_sums))
ordered = [str(x) for sublist in triples[start_index:] + triples[:start_index] for x in sublist]
str_form = ''.join(ordered)
if str_form > result:
result = str_form
for triple in triples:
triple[0] = 0
print result
if __name__ == '__main__':
main()
| true |
65025e476f6cbbaf0a043dd74deacd4fa7011f8a | Python | danieldis/CS_3580_Data_Science_Algorithms | /CS_3580_Assignments/A2/assign2.py | UTF-8 | 6,853 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env python3
print("\nDaniel Salmond")
import csv
states = frozenset([
'Alabama','Alaska','Arizona','Arkansas','California','Colorado','Connecticut','Delaware','Florida','Georgia','Hawaii','Idaho','Illinois','Indiana',
'Iowa','Kansas','Kentucky','Louisiana','Maine','Maryland','Massachusetts','Michigan','Minnesota','Mississippi','Missouri','Montana','Nebraska',
'Nevada','New Hampshire','New Jersey','New Mexico','New York','North Carolina','North Dakota','Ohio','Oklahoma','Oregon','Pennsylvania',
'Rhode Island','South Carolina','Tennessee','Texas','Utah','Vermont','Virginia','Washington','Wisconsin','West Virginia','Wyoming'
])
with open("acs2015_county_data.csv", encoding='latin-1') as f:
reader = csv.reader(f)
header_counties = next(reader)
counties = list(reader)
class CountyIndices():
pass
i = CountyIndices()
i.state = header_counties.index("State")
i.total = header_counties.index("TotalPop")
i.unemployed = header_counties.index("Unemployment")
i.hispanic = header_counties.index("Hispanic")
i.white = header_counties.index("White")
i.black = header_counties.index("Black")
i.native = header_counties.index("Native")
i.asian = header_counties.index("Asian")
i.pacific = header_counties.index("Pacific")
stats = {}
for row in counties:
if row[i.state] not in states:
continue
try:
stats[row[i.state]]
except KeyError:
stats[row[i.state]] = {"total": 0,"unemployed":0,"hispanic":0,"white":0,
"black":0,"native":0,"asian":0,"pacific":0}
totalPop = int(row[i.total])
stats[row[i.state]]["total"] += totalPop
stats[row[i.state]]["unemployed"] += totalPop * (float(row[i.unemployed])/100)
stats[row[i.state]]["hispanic"] += totalPop * (float(row[i.hispanic])/100)
stats[row[i.state]]["white"] += totalPop * (float(row[i.white])/100)
stats[row[i.state]]["black"] += totalPop * (float(row[i.black])/100)
stats[row[i.state]]["native"] += totalPop * (float(row[i.native])/100)
stats[row[i.state]]["asian"] += totalPop * (float(row[i.asian])/100)
stats[row[i.state]]["pacific"] += totalPop * (float(row[i.pacific])/100)
races = []
unemployment = []
for k, v in stats.items():
races.append((k, v["hispanic"]/v["total"], v["white"]/v["total"], v["black"]/v["total"], v["native"]/v["total"],
v["asian"]/v["total"], v["pacific"]/v["total"]))
unemployment.append((k, v["unemployed"]/v["total"]))
print("\nPart 1:")
print("Hispanic:", max(races, key=lambda r: r[1])[0])
print("White:", max(races, key=lambda r: r[2])[0])
print("Black:", max(races, key=lambda r: r[3])[0])
print("Native:", max(races, key=lambda r: r[4])[0])
print("Asian:", max(races, key=lambda r: r[5])[0])
print("Pacific:", max(races, key=lambda r: r[6])[0])
print("\nPart 2:")
print("Highest Unemployment:", max(unemployment, key=lambda p: p[1])[0])
print("Lowest Unemployment:", min(unemployment, key=lambda p: p[1])[0])
del counties, header_counties, stats, races, unemployment
print("\nPart 3:")
#Average income greater or equal to $50,000
#Average poverty greater than 50%
with open("acs2015_census_tract_data.csv",encoding='latin-1') as f:
next(f)
for line in f:
line = line.strip()
column = line.split(',')
if (str(column[13]) == "" or str(column[17]) == ""):
continue
#try:
if(float(column[13]) >= float(50000) and float(column[17]) > float(50)):
print("Census Tract ID:" + str(column[0]) + ", State: " + str(column[1]) + ", County: " + str(column[2]) + ", Races: ", end=' ')
if(float(column[6]) > 1):
print("Hispanic", end=' ')
if(float(column[7]) > 1):
print("White", end=' ')
if(float(column[8]) > 1):
print("Black", end=' ')
if(float(column[9]) > 1):
print("Native", end=' ')
if(float(column[10]) > 1):
print("Asian", end=' ')
if(float(column[11]) > 1):
print("Pacific", end=' ')
print()
#except ValueError:
# continue
print("\nPart 4:")
#percentage of woman greater than 57%
#total population is at least 10,000
with open("acs2015_census_tract_data.csv",encoding='latin-1') as f:
next(f)
for line in f:
line = line.strip()
column = line.split(',')
try:
if(float(column[3]) > 9999 and (float(column[5])/float(column[3])) > 0.57):
print("Census Tract ID:" + str(column[0]) + ", State:" + str(column[1]) + ", County:" + str(column[2]) + ", Races:", end=' ')
if(float(column[6]) > 1):
print("Hispanic", end=' ')
if(float(column[7]) > 1):
print("White", end=' ')
if(float(column[8]) > 1):
print("Black", end=' ')
if(float(column[9]) > 1):
print("Native", end=' ')
if(float(column[10]) > 1):
print("Asian", end=' ')
if(float(column[11]) > 1):
print("Pacific", end=' ')
print()
except ValueError:
continue
print("\nPart 5:")
#of the 6 race categories at least four of them each have 15%.
#For example, White is 25%, Black is 16%, Hispanic is 18%, Pacific is 20%
with open("acs2015_census_tract_data.csv",encoding='latin-1') as f:
next(f)
for line in f:
line = line.strip()
column = line.split(',')
try:
dCounter = 0
races = []
if(float(column[6]) > 1):
races.append("Hispanic")
if(float(column[6]) >= 15):
dCounter += 1
if(float(column[7]) > 1):
races.append("White")
if(float(column[7]) >= 15):
dCounter += 1
if(float(column[8]) > 1):
races.append("Black")
if(float(column[8]) >= 15):
dCounter += 1
if(float(column[9]) > 1):
races.append("Native")
if(float(column[9]) >= 15):
dCounter += 1
if(float(column[10]) > 1):
races.append("Asian")
if(float(column[10]) >= 15):
dCounter += 1
if(float(column[11]) > 1):
races.append("Pacific")
if(float(column[11]) >= 15):
dCounter += 1
if(dCounter < 4):
continue
else:
print("Census Tract ID:", str(column[0]), "State:", str(column[1]), "County:", str(column[2]), "Races:", end=" ")
print(" ".join(races))
except ValueError:
continue | true |
7040127ebb729980fb94d2658d79158119ddf9a7 | Python | syurskyi/Algorithms_and_Data_Structure | /_algorithms_challenges/exercism/exercism-python-master/binary-search-tree/binary_search_tree.py | UTF-8 | 1,178 | 3.578125 | 4 | [] | no_license | class TreeNode(object):
def __init__(self, data, left, right):
self.data = data
self.left = left
self.right = right
def __str__(self):
fmt = 'TreeNode(data={}, left={}, right={})'
return fmt.format(self.data, self.left, self.right)
class BinarySearchTree(object):
def __init__(self, tree_data):
self._root = TreeNode(tree_data[0], None, None)
for data in tree_data[1:]:
branch = self._root
while branch is not None:
root = branch
attr = 'right' if root.data < data else 'left'
branch = getattr(root, attr)
setattr(root, attr, TreeNode(data, None, None))
def data(self):
return self._root
def sorted_data(self):
result = []
queue = [self._root]
while queue:
node = queue[-1]
if node.left is not None and node.left not in result:
queue.append(node.left)
else:
result.append(queue.pop())
if node.right is not None:
queue.append(node.right)
return [n.data for n in result]
| true |
f7f6f7f87499b17db8c3fec1d87637c7470998ce | Python | dryan9/Database-Management | /week13__2.py | UTF-8 | 1,042 | 2.515625 | 3 | [] | no_license | import csv
import pymysql
import configparser
config = configparser.ConfigParser()
config.read_file(open('credentials.py'))
dbhost = config['csc']['dbhost']
dbuser = config['csc']['dbuser']
dbpw = config['csc']['dbpw']
dbschema = 'dryan16'
dbconn = pymysql.connect(host=dbhost,
user=dbuser,
passwd=dbpw,
db=dbschema,
use_unicode=True,
charset='utf8mb4',
autocommit=True)
cursor = dbconn.cursor()
filename = 'peopleDOB.csv'
myRows = []
try:
with open(filename, 'r') as myCSV:
data = csv.reader(myCSV)
next(myCSV)
for row in data:
myRows.append(row)
myCSV.close()
except FileNotFoundError:
print('no file!')
query = 'UPDATE peopleData set dob = %s\
where primary_key = %s'
for item in myRows:
dob = item[1]
id = item[0]
cursor.execute(query,(dob,id))
print("--------") | true |
a2737b57994be57cbe9530f174a5eb1e98942fca | Python | z0x010/medusa | /medusacode/rabbitmq_pika_demo/02_work_queues/worker.py | UTF-8 | 3,143 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
# coding:utf-8
import pika
import datetime
import time
HOST = '192.168.100.100'
PORT = 5672
QUEUE_NAME = 'task_queue'
print '----------------------------------------------------------------------------------------------------'
connection = pika.BlockingConnection(
parameters=pika.ConnectionParameters(
host=HOST,
port=PORT,
)
)
print connection # <pika.adapters.blocking_connection.BlockingConnection object at 0x109564b50>
channel = connection.channel(
channel_number=None
)
print channel # <pika.adapters.blocking_connection.BlockingChannel object at 0x1098355d0>
qd = channel.queue_declare(
queue=QUEUE_NAME,
durable=True, # make the queue durable (survive reboots of the broker)
)
print qd # <METHOD(['channel_number=1', 'frame_type=1', "method=<Queue.DeclareOk(['consumer_count=0', 'message_count=0', 'queue=queue_test'])>"])>
print '----------------------------------------------------------------------------------------------------'
"""
Use the basic.qos method with the prefetch_count=1 setting.
This tells RabbitMQ not to give more than one message to a worker at a time.
Or, in other words, don't dispatch a new message to a worker until it has processed and acknowledged the previous one.
Instead, it will dispatch it to the next worker that is not still busy.
"""
bqos = channel.basic_qos(
prefetch_count=1,
all_channels=False,
)
print bqos # None
print '----------------------------------------------------------------------------------------------------'
"""
In order to make sure a message is never lost, RabbitMQ supports message acknowledgments.
An ack(nowledgement) is sent back from the consumer to tell RabbitMQ
that a particular message had been received, processed and that RabbitMQ is free to delete it.
If a consumer dies (its channel is closed, connection is closed, or TCP connection is lost) without sending an ack,
RabbitMQ will understand that a message wasn't processed fully and will re-queue it.
If there are other consumers online at the same time, it will then quickly redeliver it to another consumer.
That way you can be sure that no message is lost, even if the workers occasionally die.
There aren't any message timeouts; RabbitMQ will redeliver the message when the consumer dies.
It's fine even if processing a message takes a very, very long time.
"""
def callback(ch, method, properties, body):
print('[x] Received: %s' % body)
for n in range(int(body)):
time.sleep(1)
print '[*] work: %s %s' % (n+1, '.' * (n+1))
print '[x] Done'
ch.basic_ack(delivery_tag=method.delivery_tag) # An ack(nowledgement) is sent back from the consumer to tell the broker(RabbitMQ)
bc = channel.basic_consume(
consumer_callback=callback,
queue=QUEUE_NAME,
no_ack=False, # Tell the broker to expect an acknowledgement
)
print type(bc) # <type 'str'>
print bc # ctag1.da4a9beaf58b426aabdbefe390157bbf
print('[*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
print '----------------------------------------------------------------------------------------------------'
| true |
14ecd62924921c1741964b5148c0d552de668b56 | Python | gharv222/labs | /lab9.py | UTF-8 | 466 | 3.5 | 4 | [] | no_license | """
George Harvey]
COMP 525
Lab 9
"""
def count_words(file_in):
"""
Counts how many times each word in a text files appears
file_in: txt file
returns: a dictionary that keys are each word in the
txt file and their value is how many times it appears
"""
fin = open(file_in, 'r')
word_dict = {}
for line in fin:
tmp_list = line.split()
for word in tmp_list:
if word in word_dict:
word_dict[word] += 1
else:
word_dict[word] = 1
return word_dict | true |
0244af02405fd80db091f65da15f4cf3a259f2ab | Python | luguoxiang/level_pgserver | /test/query_test.py | UTF-8 | 2,378 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | import sqlite3
import psycopg2
from random import randint
conn = sqlite3.connect("reference.db")
myconn = psycopg2.connect(database="test", user="", password="", host="127.0.0.1", port="5433")
cur = conn.cursor()
mycur = myconn.cursor()
cur.execute('DROP TABLE IF EXISTS querytest')
cur.execute('CREATE TABLE querytest (c1 INTEGER, c2 INTEGER, c3 INTEGER, c4 INTEGER, c5 INTEGER, PRIMARY KEY(c1,c2,c3))');
mycur.execute("delete from querytest")
for i in range(1000):
insert_sql = 'INSERT INTO querytest VALUES({},{},{},{},{})'.format(
randint(0, 1000),
randint(0, 1000),
randint(0, 1000),
randint(0, 1000),
randint(0, 1000))
cur.execute(insert_sql);
mycur.execute(insert_sql);
conn.commit()
def check(a, b):
if len(a) != len(b):
return False
for j in range(len(a)):
if type(a[j]) == float:
if abs(a[j] - b[j]) > 0.0001:
return False
else:
if a[j] != b[j]:
return False
return True
def testQuery(query_sql):
cur.execute(query_sql)
mycur.execute(query_sql)
all_rows = cur.fetchall()
my_all_rows = mycur.fetchall()
assert(len(all_rows) == len(my_all_rows))
for i in range(len(all_rows)):
if not check(all_rows[i], my_all_rows[i]):
print(all_rows[i])
print(my_all_rows[i])
assert 0
testQuery("select * from querytest order by c1,c2,c3,c4,c5")
for i in range(100):
op = ["sum", "avg", "min", "max"]
groupby = set()
for i in range(randint(1, 5)):
column = "c{}".format(randint(1, 5))
groupby.add(column)
projection = set()
for i in range(randint(1, 5)):
column = "c{}".format(randint(1, 5))
func = op[randint(0, len(op) - 1)];
if column in groupby:
projection.add(column)
else:
projection.add("{}({})".format(func, column))
groupby = ",".join(groupby);
projection = ",".join(projection);
query_sql = 'SELECT {} FROM querytest group by {} order by {}'.format(projection, groupby, groupby)
print(query_sql)
testQuery(query_sql)
for i in range(100):
predicate = ""
op = ["=", ">", "<", ">=", "<=", "!="]
op2 = ["and", "or"]
for i in range(randint(1, 5)):
if predicate != "":
predicate += " and " if randint(0, 1) == 0 else " or "
predicate += "c{} {} {}".format(randint(1, 5), op[randint(0,len(op) - 1)], randint(0, 1000))
query_sql = 'SELECT * FROM querytest where {} order by c1,c2,c3,c4,c5'.format(predicate)
print(query_sql)
testQuery(query_sql)
conn.close()
| true |
871a24321296216b1344e461ee2aa9f71ee9f3ad | Python | MunskyGroup/rSNAPsim | /build/lib/rsnapsim/intensity_modifier.py | UTF-8 | 4,802 | 2.859375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 09:24:35 2020
@author: willi
"""
import pandas as pd
import matplotlib.pyplot as plt
import time
import numpy as np
def modify_intensity(intensity_vector, SNR, noise_type='AGWN'):
'''
given an intensity vector and signal to noise ratio this will add noise
according to SNR = mu_signal**2 / var_noise**2
awgn = additive white gaussian noise
'''
if noise_type.lower() in ['agwn']:
av_intensity = np.mean(intensity_vector)
var_intensity = np.var(intensity_vector)
sig_intensity = 10 * np.log10(av_intensity)
noise_sig_db = sig_intensity - SNR
noise_par = 10 ** (noise_sig_db / 10)
intensity_with_modifications = intensity_vector + noise_par*np.random.randn(*intensity_vector.shape)
if noise_type.lower() in ['poisson']:
var_intensity = np.var(intensity_vector)
var_poission = var_intensity / SNR
intensity_with_modifications = intensity_vector + np.random.poission(var_poission, size = intensity_vector.shape)
if noise_type.lower() in ['pink']:
ivec = np.copy(intensity_vector)
av_intensity = np.mean(ivec)
sig_intensity = 10 * np.log10(av_intensity)
noise_sig_db = sig_intensity - SNR
noise_par = 10 ** (noise_sig_db / 10)
for i in range(ivec.shape[0]):
ivec[i,:] = ivec[i,:] + voss(ivec.shape[1])*noise_par
intensity_with_modifications = ivec
return intensity_with_modifications
def slice_intensity(intensity_vector, total_frames, framerate):
'''
Given an intensity vector, this will slice the vector into the total frames and framerate desired
Note this will throw away some time points at the end to do the correct reshaping
'''
remove = intensity_vector.shape[1]%int((framerate*total_frames))
intensity_vector = intensity_vector[:,:-remove]
s0 = intensity_vector.shape[0]*intensity_vector.shape[1] / int((framerate*total_frames))
intensity_vector = intensity_vector.reshape(int(s0), int((framerate*total_frames)) )
#intensity_with_modifications = intensity_vector[0,::framerate]
return intensity_vector
def voss(nrows,ncols=16):
"""Generates standardized pink noise using the Voss-McCartney algorithm.
https://www.dsprelated.com/showarticle/908.php
nrows: number of values to generate
rcols: number of random sources to add
returns: NumPy array of standardized pink noise
"""
array = np.empty((nrows, ncols))
array.fill(np.nan)
array[0, :] = np.random.random(ncols)
array[:, 0] = np.random.random(nrows)
# the total number of changes is nrows
n = nrows
cols = np.random.geometric(0.5, n)
cols[cols >= ncols] = 0
rows = np.random.randint(nrows, size=n)
array[rows, cols] = np.random.random(n)
df = pd.DataFrame(array)
df.fillna(method='ffill', axis=0, inplace=True)
total = df.sum(axis=1)
signal = total.values - np.mean(total.values)
signal = signal/ np.std(signal)
return signal
def make_training_dataset(dataset, framerate, total_frames, SNR, noise_type = 'agwn' ):
ivec = slice_intensity(dataset,total_frames,framerate)
ivec_with_noise = modify_intensity(ivec, SNR, noise_type = noise_type)
return ivec_with_noise
def make_testing_dataset(dataset,framerate,total_frames,SNR, noise_type = 'agwn' ):
ivec = dataset[:,::framerate][:,:total_frames]
ivec_with_noise = modify_intensity(ivec, SNR, noise_type = noise_type)
return ivec_with_noise
###############################
# How to set up the datasets
###############################
# Loop over framerate and SNR and save accuracy.
framerate = 1
total_frames = 2000
SNR = .2
A = np.loadtxt('C:/Users/willi/Documents/GitHub/rSNAPsim/large_no_noise/trainingA_1s.txt' ,delimiter = ',')
A_training = make_training_dataset(A, framerate,total_frames,SNR)
B = np.loadtxt('C:/Users/willi/Documents/GitHub/rSNAPsim/large_no_noise/trainingB_1s.txt' ,delimiter = ',')
B_training = make_training_dataset(B, framerate,total_frames,SNR)
C = np.loadtxt('C:/Users/willi/Documents/GitHub/rSNAPsim/large_no_noise/trainingC_1s.txt' ,delimiter = ',')
C_training = make_training_dataset(C,framerate,total_frames,SNR)
# testing = np.loadtxt('./mixture_framerate1s.txt' ,delimiter = ',')
# testing = make_testing_dataset(testing, framerate,total_frames,SNR)
#plt.scatter(A_training[:1,0:-10],A_training[:1,10:] ,alpha=.3 ); plt.scatter(B_training[:1,0:-10],B_training[:1,10:] ,alpha=.3 );plt.scatter(C_training[:1,0:-10],C_training[:1,10:] ,alpha=.3 )
| true |
951e322def932a4d7951f3fcd72ece9606cd8c49 | Python | michelleweii/Leetcode | /06_链表/142-环形链表 II.py | UTF-8 | 3,336 | 3.921875 | 4 | [] | no_license | """
middle 2021-12-23 链表
题目:判断环链表的入口位置——快慢指针
(推导+动图)https://leetcode-cn.com/problems/linked-list-cycle-ii/solution/linked-list-cycle-ii-kuai-man-zhi-zhen-shuang-zhi-/
"""
# a:起点到环入口的节点数(不包括入口)
# b:环节点数
# 根据: f=2s (快指针每次2步,路程刚好2倍)
# f=s+nb (相遇时,刚好多走了n圈), =>推出:s = nb。
# 从head结点走到入环点需要走:a+nb, 而slow已经走了nb,那么slow再走a步就是入环点了。
# (如果让指针从链表头部一直向前走并统计步数k,那么所有 走到链表入口节点时的步数 是:k=a+nb(先走a步到入口节点,之后每绕1圈环( b步)都会再次到入口节点)。)
# 如何知道slow刚好走了a步? 从head开始,和fast指针一起走,相遇时刚好就是a步。
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
# 方法二:# 掌握推导过程
# 2022/02/28更新
"""
假设a是head至环入口长度,b是环长度,假设fast一共走了K=a+nb;
有 f=2s, f=s+nb (相遇时,刚好多走了n圈), =>推出:s = nb。
=》fast = a+s,相遇时,s再走a即到达入口。
=》slow如何再走a? fast回到起始点head,走a,slow也走a,即两者再次相遇是环入口位置。
"""
def detectCycle(self, head: ListNode) -> ListNode:
# 链表相遇位置到环入口的距离
#=从链表开始到环入口的距离
# 1.求相遇点(使用快慢指针)
if not head or not head.next:return None
slow, fast = head, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow==fast:break # 停在了相遇节点
if slow!=fast:
return None # 如果链表不存在环
fast = head # 再从head出发,走a个节点
while fast!=slow:
fast = fast.next
slow = slow.next
return fast
# 方法一:
# 环入口怎么求?
# 快慢指针的相遇点到环入口的距离 == 链表起始点到环入口的距离
def detectCycle_old(self, head):
p1 = head # 慢指针
p2 = head # 快指针
encounter = head
flag = 0
while p2 and p2.next:
p1 = p1.next
p2 = p2.next.next
if p1 == p2:
# 相遇点
encounter = p1
flag = 1
break
if not flag:
return None
# print(encounter.val) # -4
start = head
pos = 0
while start and encounter:
if start == encounter:
pos += 1
# return pos # # 返回相遇节点的位置
return start # 返回相遇点(ac)
start = start.next
encounter = encounter.next
# return -1 # 返回相遇节点的位置
return None
if __name__ == '__main__':
a = ListNode(3)
b = ListNode(2)
c = ListNode(0)
d = ListNode(-4)
a.next = b
b.next = c
c.next = d
d.next = b
print(Solution().detectCycle(a)) | true |
34cac449904f9f523788f2f93c780bfd4a7c28d9 | Python | AdolphGirL/Tensorflow-CNN-Model | /Vgg16-CiFar10-Training.py | UTF-8 | 2,282 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from model.Vgg import VGG16
import datetime
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras import datasets
from tensorflow.keras.utils import to_categorical
logger_name = '[Vgg16-CiFar10-Training.py]: '
(x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()
print(logger_name + 'x_train shape: {}'.format(x_train.shape))
print(logger_name + 'y_train shape: {}'.format(y_train.shape))
print(logger_name + 'x_test shape: {}'.format(x_test.shape))
print(logger_name + 'y_test shape: {}'.format(y_test.shape))
# 看一下原本的圖片
# fig = plt.figure(figsize=(15, 7))
# fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
#
# for i in range(50):
# # xticks yticks設定為空
# ax = fig.add_subplot(5, 10, i+1, xticks=[], yticks=[])
# ax.imshow(x_test[i, :, :, :], cmap=plt.cm.gray_r, interpolation='nearest')
# ax.text(0, 7, y_train[i], color='red')
#
# plt.show()
# one-hot encoding
num_classes = 10
y_train = to_categorical(y_train, num_classes=num_classes)
y_test = to_categorical(y_test, num_classes=num_classes)
model = VGG16(input_shape=(32, 32, 3), nb_classes=num_classes)
model.summary()
# TensorBoard
log_dir = "logs/vgg16/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# 加入順練時的callback function
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# 電腦配備問題,epochs沒設太高
# verbose=2,one line per epoch
model.fit(x_train, y=y_train, epochs=5,
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback], verbose=2)
prediction_values = model.predict_classes(x_test)
# 看一下預測結果
fig = plt.figure(figsize=(15, 7))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(50):
ax = fig.add_subplot(5, 10, i+1, xticks=[], yticks=[])
ax.imshow(x_test[i, :, :, :], cmap=plt.cm.gray_r, interpolation='nearest')
# y_test[i] 已經過one-hot coding,所以取最大值的位置
if prediction_values[i] == np.argmax(y_test[i]):
ax.text(0, 7, prediction_values[i], color='blue')
else:
ax.text(0, 7, prediction_values[i], color='red')
plt.show()
| true |
9b69ea894f8892673924bf4f71d9581e52c30174 | Python | kotabrog/K_DeZero | /kdezero/functions/basic_calc_functions.py | UTF-8 | 2,655 | 2.78125 | 3 | [
"MIT"
] | permissive | import kdezero
from kdezero import Function
from kdezero import as_array
class Add(Function):
def forward(self, x0, x1):
self.x0_shape, self.x1_shape = x0.shape, x1.shape
y = x0 + x1
return y
def backward(self, gy):
gx0, gx1 = gy, gy
if self.x0_shape != self.x1_shape:
gx0 = kdezero.functions.sum_to(gx0, self.x0_shape)
gx1 = kdezero.functions.sum_to(gx1, self.x1_shape)
return gx0, gx1
def add(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Add()(x0, x1)
class Mul(Function):
def forward(self, x0, x1):
return x0 * x1
def backward(self, gy):
x0, x1 = self.inputs
gx0 = gy * x1
gx1 = gy * x0
if x0.shape != x1.shape:
gx0 = kdezero.functions.sum_to(gx0, x0.shape)
gx1 = kdezero.functions.sum_to(gx1, x1.shape)
return gx0, gx1
def mul(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Mul()(x0, x1)
class Neg(Function):
def forward(self, x):
return -x
def backward(self, gy):
return -gy
def neg(x):
return Neg()(x)
class Sub(Function):
def forward(self, x0, x1):
self.x0_shape, self.x1_shape = x0.shape, x1.shape
return x0 - x1
def backward(self, gy):
gx0 = gy
gx1 = -gy
if self.x0_shape != self.x1_shape:
gx0 = kdezero.functions.sum_to(gx0, self.x0_shape)
gx1 = kdezero.functions.sum_to(gx1, self.x1_shape)
return gx0, gx1
def sub(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Sub()(x0, x1)
def rsub(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Sub()(x1, x0)
class Div(Function):
def forward(self, x0, x1):
return x0 / x1
def backward(self, gy):
x0, x1 = self.inputs
gx0 = gy / x1
gx1 = gy * (-x0 / x1 ** 2)
if x0.shape != x1.shape:
gx0 = kdezero.functions.sum_to(gx0, x0.shape)
gx1 = kdezero.functions.sum_to(gx1, x1.shape)
return gx0, gx1
def div(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Div()(x0, x1)
def rdiv(x0, x1):
x1 = as_array(x1, kdezero.cuda.get_array_module(x0.data))
return Div()(x1, x0)
class Pow(Function):
def __init__(self, c):
self.c = c
def forward(self, x):
return x ** self.c
def backward(self, gy):
x = self.inputs[0]
c = self.c
gx = c * x ** (c - 1) * gy
return gx
def pow(x, c):
return Pow(c)(x)
| true |
c621a90e6240fb4810a17f29dfe6d137bf0f0ff1 | Python | kses1010/algorithm | /baekjoon/bronze/Number.py | UTF-8 | 192 | 3.375 | 3 | [] | no_license | # 10093
n1, n2 = map(int, input().split())
a = min(n1, n2)
b = max(n1, n2)
if a == b or a + 1 == b:
print(0)
else:
print(b - a - 1)
for i in range(a + 1, b):
print(i, end=' ')
| true |
5ecef39c78febba46d551ccb35df884a5c4b877a | Python | SingukMun/Python_Practice | /`21.07.28 입력한 변수 합계 프로그램.py | UTF-8 | 214 | 3.046875 | 3 | [] | no_license | aa=[]
for i in range(0, 4):
aa.append(0)
hap = 0
for i in range(0, 4) :
aa[i] = int(input( str(i + 1) + "번째 숫자 : "))
hap = aa[0] + aa[1] + aa[2] + aa[3]
print(" 합계 --> %d " % hap)
| true |
95de6c88595be1b3c186288c5e62766dd0dcce3c | Python | spweps/Day-1-Python | /dojos_and_ninjas/flask_app/models/dojo.py | UTF-8 | 1,442 | 2.640625 | 3 | [] | no_license | from flask_app.config.mysqlconnection import connectToMySQL
from .ninja import Ninja
class Dojo:
def __init__(self, data):
self.id = data['id']
self.name = data['name']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
self.ninjas = []
@classmethod
def get_all(cls):
query = "SELECT * FROM dojos;"
results = connectToMySQL('dojo_ninjas').query_db(query)
dojos = []
for d in results:
dojos.append( cls(d) )
return dojos
@classmethod
def save(cls, data):
query= "INSERT INTO dojos (name) VALUES (%(name)s);"
result = connectToMySQL('dojo_ninjas').query_db(query,data)
return result
@classmethod
def get_one_with_ninjas(cls, data ):
query = "SELECT * FROM dojos LEFT JOIN ninjas on dojos.id = ninjas.dojo_id WHERE dojos.id = %(id)s;"
results = connectToMySQL('dojo_ninjas').query_db(query,data)
print(results)
dojo = cls(results[0])
for row in results:
n = {
'id': row['ninjas.id'],
'first_name': row['first_name'],
'last_name': row['last_name'],
'age': row['age'],
'created_at': row['ninjas.created_at'],
'updated_at': row['ninjas.updated_at']
}
dojo.ninjas.append( Ninja(n) )
return dojo | true |
00f4fce34bf050608a420877558998751258d860 | Python | mqinbin/python_leetcode | /1175.质数排列.py | UTF-8 | 832 | 2.796875 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=1175 lang=python3
#
# [1175] 质数排列
#
# @lc code=start
class Solution:
def numPrimeArrangements(self, n: int) -> int:
def A(up,down):
answer = 1
for _ in range(up):
answer *= down
down -= 1
return answer
def count_primary(n):
answer = [1] * (n + 1)
answer[0:3] = [0,0,1]
for i in range(2, n + 1):
if answer[i] == 0:
continue
j = i + i
while j < n + 1:
answer[j] = 0
j+=i
return answer.count(1)
primarys = count_primary(n)
return A(primarys ,primarys) * A ( n - primarys , n - primarys) % ( 10**9 + 7 )
# @lc code=end
| true |
b3268193a4d73c47c25cff31cd581948080d2251 | Python | Maegereg/CryptoChallenges | /20.py | UTF-8 | 4,065 | 2.703125 | 3 | [] | no_license | import aes
import convert
repeatXor = __import__('6')
import xor
def generateCiphertexts():
plaintextFile = open("20.txt")
ciphertexts = []
key = aes.generateRandomKey()
for line in plaintextFile:
ciphertexts.append(aes.aesCTREncrypt(convert.b64ToByteString(line), key, 0))
plaintextFile.close()
return ciphertexts
def getMinLength(ciphertexts):
return min(map(len, ciphertexts))
def breakRepeatingCTR(ciphertexts):
minLength = getMinLength(ciphertexts)
repeatedXorCiphertext = "".join(map(lambda x: x[:minLength], ciphertexts))
key = repeatXor.getXorKey(repeatedXorCiphertext, minLength)
return key
if __name__ == "__main__":
ciphertexts = generateCiphertexts()
key = breakRepeatingCTR(ciphertexts)
for ciphertext in ciphertexts:
print xor.xorByteStrings(ciphertext, key)
''' Plaintext (partial):
I'm rated "R"...this is a warning, ya better void / P
Cuz I came back to attack others in spite- / Strike l
But don't be afraid in the dark, in a park / Not a sc
Ya tremble like a alcoholic, muscles tighten up / Wha
Suddenly you feel like your in a horror flick / You g
Music's the clue, when I come your warned / Apocalyps
Haven't you ever heard of a MC-murderer? / This is th
Death wish, so come on, step to this / Hysterical ide
Friday the thirteenth, walking down Elm Street / You
This is off limits, so your visions are blurry / All
Terror in the styles, never error-files / Indeed I'm
For those that oppose to be level or next to this / I
Worse than a nightmare, you don't have to sleep a win
Flashbacks interfere, ya start to hear: / The R-A-K-I
Then the beat is hysterical / That makes Eric go get
Soon the lyrical format is superior / Faces of death
MC's decaying, cuz they never stayed / The scene of a
The fiend of a rhyme on the mic that you know / It's
Melodies-unmakable, pattern-unescapable / A horn if w
I bless the child, the earth, the gods and bomb the r
Hazardous to your health so be friendly / A matter of
Shake 'till your clear, make it disappear, make the n
If not, my soul'll release! / The scene is recreated,
Cuz your about to see a disastrous sight / A performa
Lyrics of fury! A fearified freestyle! / The "R" is i
Make sure the system's loud when I mention / Phrases
You want to hear some sounds that not only pounds but
Then nonchalantly tell you what it mean to me / Stric
And I don't care if the whole crowd's a witness! / I'
Program into the speed of the rhyme, prepare to start
Musical madness MC ever made, see it's / Now an emerg
Open your mind, you will find every word'll be / Furi
Battle's tempting...whatever suits ya! / For words th
You think you're ruffer, then suffer the consequences
I wake ya with hundreds of thousands of volts / Mic-t
Novocain ease the pain it might save him / If not, Er
Yo Rakim, what's up? / Yo, I'm doing the knowledge, E
Well, check this out, since Norby Walters is our agen
Kara Lewis is our agent, word up / Zakia and 4th and
Okay, so who we rollin' with then? We rollin' with Ru
Check this out, since we talking over / This def beat
I wanna hear some of them def rhymes, you know what I
Thinkin' of a master plan / 'Cuz ain't nuthin' but sw
So I dig into my pocket, all my money is spent / So I
So I start my mission, leave my residence / Thinkin'
I need money, I used to be a stick-up kid / So I thin
I used to roll up, this is a hold up, ain't nuthin' f
But now I learned to earn 'cuz I'm righteous / I feel
Search for a nine to five, if I strive / Then maybe I
So I walk up the street whistlin' this / Feelin' out
A pen and a paper, a stereo, a tape of / Me and Eric
Fish, which is my favorite dish / But without no mone
'Cuz I don't like to dream about gettin' paid / So I
So now to test to see if I got pull / Hit the studio,
Rakim, check this out, yo / You go to your girl house
'Cause my girl is definitely mad / 'Cause it took us
Yo, I hear what you're saying / So let's just pump th
And count our money / Yo, well check this out, yo Eli
Turn down the bass down / And let the beat just keep
And we outta here / Yo, what happened to peace? / Pea
''' | true |
0db0ded497fac7e2bb584d63526480759c552c21 | Python | danielthiel/thielbots | /kit/codejail.py | UTF-8 | 1,754 | 2.78125 | 3 | [] | no_license | import imp
from RestrictedPython.Guards import safe_builtins
import random
class SecurityError:
def __init__(self, player_id, message):
self.player_id = player_id
self.message = message
class PlayerCodeJail:
allowed_imports = []
allowed_magic = []
def __init__(self, player_id, code):
self.player_id = player_id
suffix = str(random.randint(19319385, 1398513985))
for s in PlayerCodeJail.allowed_magic:
code = code.replace('__%s__' % s, s + suffix)
if '__' in code:
self.halt('not allowed to use \'__\' in your code')
for s in PlayerCodeJail.allowed_magic:
code = code.replace(s + suffix, '__%s__' % s)
self.mod = imp.new_module('usercode%d' % id(self))
self.mod.__dict__['__builtins__'] = safe_builtins
self.mod.__dict__['__builtins__']['__import__'] = PlayerCodeJail.create_import_hook(self)
self.mod.__dict__['__builtins__']['getattr'] = PlayerCodeJail.create_getattr_hook(self)
exec code in self.mod.__dict__
def halt(self, msg):
raise SecurityError(self.player_id, msg)
@staticmethod
def create_import_hook(self):
def import_hook(name, globals=None, locals=None, fromlist=None, level=-1):
if name not in PlayerCodeJail.allowed_imports:
self.halt('not allowed to import %s' % name)
return __import__(name, globals, locals, fromlist, level)
return import_hook
@staticmethod
def create_getattr_hook(self):
def getattr_hook(obj, key):
if '__' in key:
self.halt('not allowed to access a key containing \'__\'')
return getattr(obj, key)
return getattr_hook
| true |
b58447cb4a46adc3586c87cf852738d5eba3c24c | Python | ArtrixTech/AppleStockMonitor | /data_parser.py | UTF-8 | 4,282 | 2.703125 | 3 | [] | no_license | from os import getenv
import requests
import json
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
# Format {0} as partNbrs
BASE_URI = "https://www.apple.com/hk-zh/shop/fulfillment-messages?pl=true&mt=compact{0}&searchNearby=true&store={1}"
IP_LOOKUP_URI = "http://ip-api.com/json/"
NEAR_BY_STORE_URI = "https://www.apple.com/rsp-web/store-search?locale={0}&lat={1}&long={2}"
class Configuration:
def __init__(self, filename):
if filename:
with open(filename) as json_raw:
self.__json_loaded = json.loads(json_raw.read())
else:
print("No configuration was provided.")
exit(0)
def get_item(self, name):
if name in self.__json_loaded:
return self.__json_loaded[name]
raise KeyError("No such config item:", name)
def set_item(self, name, val):
self.__json_loaded[name] = val
def cut_string(input_str, head, tail):
if isinstance(head, str) and isinstance(tail, str) and isinstance(input_str, str):
start = input_str.find(head) + len(head)
end = input_str.find(tail, start)
if start == -1:
raise AttributeError("Head not found in target.")
if end == -1:
raise AttributeError("Tail not found in target.")
rt_str = ""
for index in range(start, end):
rt_str += input_str[index]
return rt_str
else:
raise TypeError("Inputs are not string!")
def get_locale(store_region):
uri = "https://www.apple.com/" + store_region
content = requests.get(uri).text
return cut_string(cut_string(content, "<html", ">"), 'lang="', '"').replace('-', '_')
def get_nearest_store_nbr(config):
loc = json.loads(requests.get(IP_LOOKUP_URI).text)
# print(loc)
near_by_stores = json.loads(requests.get(NEAR_BY_STORE_URI.format(
config.get_item("locale"), loc['lat'], loc['lon'])).text)
# print(NEAR_BY_STORE_URI.format(config.get_item("locale"), loc['lat'], loc['lon']))
nbs_number = near_by_stores['results'][0]['storeNumber']
# print(nbs_number)
return nbs_number
def gen_part_nbr_format(partNbrs):
ret_str = ""
index = 0
for partNbr in partNbrs:
ret_str += '&parts.{0}={1}'.format(index, partNbrs[index])
index += 1
return ret_str
def fetch():
config = Configuration("config.json")
config.set_item('locale', get_locale(config.get_item('store_region')))
partNbrs = config.get_item('partNames')
nbs_number = get_nearest_store_nbr(config)
stock_query_uri = BASE_URI.format(
gen_part_nbr_format(partNbrs), nbs_number)
stock_result = json.loads(requests.get(stock_query_uri).text)
# print(stock_query_uri)
stores = stock_result['body']['content']['pickupMessage']['stores']
availability = {}
partNames = {}
displayText = {}
for partNbr in partNbrs:
availability[partNbr] = {}
partNames[partNbr] = stores[0]['partsAvailability'][partNbr]['storePickupProductTitle']
displayText[partNbr] = {}
for store in stores:
availability[partNbr][store['storeName']] = (
store['partsAvailability'][partNbr]['pickupDisplay'] == 'available')
displayText[partNbr][store['storeName']
] = store['partsAvailability'][partNbr]['pickupSearchQuote']
return partNbrs, partNames, availability, displayText
if __name__ == '__main__':
partNbrs, partNames, result,displayText = fetch()
for partNbr in partNbrs:
no_stock = True
for storeName in result[partNbr]:
if result[partNbr][storeName]:
no_stock = False
print(partNames[partNbr], '🈚️'if no_stock else '✅')
if not no_stock:
for storeName in result[partNbr]:
if result[partNbr][storeName]:
print(' ', storeName,
displayText[partNbr][storeName])
# print(' ', store['storeName'], store['partsAvailability'][partNbr]['pickupSearchQuote'])
#
#print(' > ', store['pickupEncodedUpperDateString'])
| true |
e3cc439f68a9d7d14d90f764bfc189f43c6a454b | Python | daniel-ntr/Python | /CursoEmVideo/desafio073.py | UTF-8 | 776 | 4.125 | 4 | [] | no_license | # RETORNA NUMERO POR EXTENSO
extenso = ('Zero', 'Um', 'Dois', 'Três', 'Quatro', 'Cinco', 'Seis', 'Sete',
'Oito', 'Nove', 'Dez', 'Onze', 'Doze', 'Treze', 'Quatorze',
'Quinze', 'Dezesseis', 'Dezessete', 'Dezoito', 'Dezenove', 'Vinte')
num = int(input('Digite um número entre 0 e 20: ').strip())
while True:
while not -1 < num < 21:
num = int(input('Tente novamente. Digite um número entre 0 e 20: '))
print(f'Você digitou o número {extenso[num]}')
continuar = ' '
while continuar not in 'SN':
continuar = input('Deseja continuar? [S/N]').strip().upper()
if continuar == 'S':
num = int(input('Digite um número entre 0 e 20: ').strip())
break
if continuar == 'N':
break
| true |
eedacc0f35bffa1274dfd07ad69efc171f2cd635 | Python | ericosur/ericosur-snippet | /python3/os_path_join.py | UTF-8 | 429 | 3.265625 | 3 | [] | no_license | #!/usr/bin/python
# coding: utf-8
#
'''
demo how to use os.path.join()
It will take care the dir seperator from different OS.
'''
import os.path
def main():
''' main '''
print('demo os.path.join()')
folder_output_name = 'output'
for frame_number in range(6):
fn = f'frame_{frame_number:05d}.png'
f = os.path.join(folder_output_name, fn)
print(f)
if __name__ == '__main__':
main()
| true |
0a024b7d404787f2b34658ad60e586228bd7946e | Python | XxdpavelxX/HackerRank | /30daysOfCode/Loops5.py | UTF-8 | 168 | 3.40625 | 3 | [] | no_license | #https://www.hackerrank.com/challenges/30-loops/problem
import sys
n = int(raw_input().strip())
i = 1
while i <= 10:
print "%s x %s = %s"%(n, i, i*n)
i += 1
| true |
0d20402977a800d257d2908ef1ce7e6b8e62e2a4 | Python | HDPark95/algorithm | /greedy/number_card_game.py | UTF-8 | 775 | 3.078125 | 3 | [] | no_license | """
여 개의 숫자 카드 중에서 가장 높은 숫자가 쓰인 카드 한 장을 뽑는 겡미이다.
단, 게임의 룰을 지키며 카드를 뽑아야 하고 룰은 다음과 같다.
1. 숫자가 쓰인 카드들이 N x M 형태로 놓여있다. 이때 N은 행의 개수를 의미하여, M은 열의 개수를 의미한다.
2. 먼저 뽑고자 하는 카드가 포함되어 있는 행을 선택한다.
3. 그다음 선택된 행에 포함된 카드들 중 가장 숫자가 낮은 카드를 뽀아야 한다.
4. 따라서 처음에 카드를 골라낼 행을 선택할 때, 이후에 해당 행에서 가장 숫자가 낮은 카드를 뽑을 것을 고려하여 최종적으로 가장 높은 숫자의 카드를
뽑을 수 있도록 전략을 세워야한다.
"""
| true |
f92062e2e71a58f7c20ee860a3f9c19aca5c8037 | Python | Ruijan/flask_companies | /src/cache/local_history_cache.py | UTF-8 | 22,160 | 2.609375 | 3 | [] | no_license | import math
import os
import time
from datetime import datetime, timedelta
import yfinance as yf
import fmpsdk
from urllib.request import urlopen
import json
import pandas as pd
from src.currency import Currency
Y_M_D = "%Y-%m-%d"
FINANCE_KEY_ = os.environ["FINANCE_KEY"]
def get_range(end_date, period, start_date):
if end_date is None:
end_date = datetime.today()
if start_date is None:
if period == "max":
start_date = datetime(1900, 1, 1)
else:
amount = int("".join([i for i in period if i.isdigit()]))
element_of_time = "".join([c for c in period if not c.isdigit()])
if element_of_time == "d":
start_date = end_date - timedelta(days=amount)
elif element_of_time == "w":
start_date = end_date - timedelta(weeks=amount)
elif element_of_time == "m":
start_date = end_date - timedelta(weeks=4 * amount)
elif element_of_time == "y":
start_date = end_date - timedelta(weeks=52 * amount)
return end_date, start_date
def rearrange_data_in_dictionnary(data, tickers, end_date, list_tickers, start_date):
all_prices = {}
all_dividends = {}
for ticker in tickers:
currency_history = {(start_date + timedelta(days=x)).strftime(Y_M_D): math.nan for x in
range((end_date - start_date).days)}
currency_div_history = {(start_date + timedelta(days=x)).strftime(Y_M_D): 0 for x in
range((end_date - start_date).days)}
if ticker in list_tickers:
for payment in data[list_tickers[ticker]]["historical"]:
currency_history[payment["date"]] = payment["adjClose"]
all_prices[ticker] = currency_history
all_dividends[ticker] = currency_div_history
return all_dividends, all_prices
class LocalHistoryCache(dict):
__instance = None
__source = "grep"
@staticmethod
def get_instance():
if LocalHistoryCache.__instance is not None:
return LocalHistoryCache.__instance
else:
return LocalHistoryCache()
def __init__(self):
super().__init__()
if LocalHistoryCache.__instance is not None:
raise Exception("This class is a singleton!")
else:
LocalHistoryCache.__instance = self
def get_currency_ticker(self, currency1, currency2):
if self.__source == "grep":
return get_currency_ticker_grep(currency1, currency2)
return get_currency_ticker_yahoo(currency1, currency2)
def get_forex(self, currency1, currency2, start_date=None, end_date=None, period="max"):
if self.__source == "grep":
ticker = get_currency_ticker_grep(currency1, currency2)
else:
ticker = get_currency_ticker_yahoo(currency1, currency2)
return self.get(ticker, start_date, end_date, period)
def get_market_index_ticker(self, index_name):
if self.__source == "grep":
return index_name.replace("^", "%5E")
return index_name
def get(self, key, start_date=None, end_date=None, period="max"):
end_date, start_date = get_range(end_date, period, start_date)
self[key] = self.fetch_history(key, start_date, end_date, period)
hist = self[key]["history"].copy()
hist.index = pd.to_datetime(hist.index)
mask = (hist.index >= start_date) & (hist.index <= end_date)
return hist.loc[mask]
def get_last_day(self, key):
if key not in self:
if self.__source != "grep":
history = yf.Ticker(key).history(period="1d")
else:
history = fmpsdk.quote_short(apikey=os.environ["FINANCE_KEY"], symbol=key)
self[key] = {"history": history,
"last_update": datetime.now(),
"start_date": datetime.today() - timedelta(days=1),
"end_date": datetime.today()}
self[key]["history"] = self[key]["history"].loc[~self[key]["history"].index.duplicated(keep='first')]
last_day_close = self[key]["history"].loc[self[key]["history"].index.max(), :]
return last_day_close
def fetch_history(self, key, start_date=None, end_date=None, period="max"):
end_date, start_date = get_range(end_date, period, start_date)
temp_data = None
if key not in self.keys():
start = time.time()
if self.__source == "grep":
dividends = fmpsdk.historical_stock_dividend(apikey=os.environ["FINANCE_KEY"], symbol=key)
prices = fmpsdk.historical_price_full(apikey=os.environ["FINANCE_KEY"], symbol=key,
series_type="line",
from_date=start_date.strftime(Y_M_D),
to_date=end_date.strftime(Y_M_D))
price_history = {(start_date + timedelta(days=x)).strftime(Y_M_D): math.nan for x in
range((end_date - start_date).days)}
dividends_history = {(start_date + timedelta(days=x)).strftime(Y_M_D): math.nan for x in
range((end_date - start_date).days)}
for payment in prices["historical"]:
price_history[payment["date"]] = payment["close"]
if "historical" in dividends:
for payment in dividends["historical"]:
dividends_history[payment["date"]] = payment["adjDividend"]
df = pd.DataFrame(price_history.values(), index=list(price_history.keys()), columns=["Close"])
df2 = pd.DataFrame(dividends_history.values(), index=list(dividends_history.keys()), columns=["Dividends"])
result = pd.concat([df, df2], axis=1, sort=False)
result = result.fillna(method='ffill')
history = result.fillna(method='bfill')
history.index.name = "Date"
else:
history = yf.Ticker(key).history(start=start_date, end=end_date)
temp_data = {"history": history,
"last_update": datetime.now(),
"start_date": start_date,
"end_date": end_date}
print("GET COMPANY HISTORY %s seconds ---" % (time.time() - start))
elif key in self:
temp_data = self[key].copy()
diff_time = self[key]["start_date"] - start_date
if diff_time.days > 0:
if self.__source == "grep":
dividends = fmpsdk.historical_stock_dividend(apikey=os.environ["FINANCE_KEY"], symbol=key)
prices = fmpsdk.historical_price_full(apikey=os.environ["FINANCE_KEY"], symbol=key,
series_type="line",
from_date=start_date.strftime(Y_M_D),
to_date=(self[key]["start_date"] - timedelta(
days=1)).strftime(Y_M_D))
price_history = {(start_date + timedelta(days=x)).strftime(Y_M_D): math.nan for x in
range((end_date - start_date).days)}
dividends_history = {(start_date + timedelta(days=x)).strftime(Y_M_D): math.nan for x in
range((end_date - start_date).days)}
for payment in prices["historical"]:
price_history[payment["date"]] = payment["close"]
if "historical" in dividends:
for payment in dividends["historical"]:
dividends_history[payment["date"]] = payment["adjDividend"]
df = pd.DataFrame(price_history.values(), index=list(price_history.keys()), columns=["Close"])
df2 = pd.DataFrame(dividends_history.values(), index=list(dividends_history.keys()), columns=["Dividends"])
added_history = pd.concat([df, df2], axis=1, sort=False)
added_history.index.name = "Date"
added_history.index = pd.to_datetime(added_history.index)
added_history.sort_index(inplace=True)
else:
added_history = yf.Ticker(key).history(start=start_date,
end=self[key]["start_date"] - timedelta(days=1))
temp_data["history"].index = pd.to_datetime(temp_data["history"].index)
temp_data["history"] = temp_data["history"].append(added_history).sort_values(by=["Date"],
ascending=True)
temp_data["start_date"] = start_date
temp_data["history"]["Close"] = temp_data["history"]["Close"].fillna(method='ffill').fillna(method='bfill')
temp_data["history"] = temp_data["history"].loc[~temp_data["history"].index.duplicated(keep='first')]
return temp_data
def fetch_multiple_histories(self, keys, currency_tickers, index_tickers, start_date=None, end_date=None,
period="max"):
end_date, start_date = get_range(end_date, period, start_date)
ticker_to_download = [key for key in keys if key not in self]
currency_to_download = [key for key in currency_tickers if key not in self]
index_to_download = [key for key in index_tickers if key not in self]
keys_in_cache = [key for key in keys + currency_tickers + index_tickers if key in self]
results = {}
if len(ticker_to_download + currency_to_download + index_to_download) > 0:
if self.__source == "grep":
data = self.fetch_history_multiple_ticker_grep(end_date, ticker_to_download, currency_to_download,
index_to_download, start_date)
else:
tickers = " ".join(ticker_to_download + currency_to_download + index_to_download)
data = yf.download(tickers, start=start_date, end=end_date, group_by='ticker', auto_adjust=True,
actions=True)
data.index.name = "Date"
for key in data.columns:
if len(ticker_to_download + currency_to_download + index_to_download) > 1:
temp_data = data[key[0]]
else:
temp_data = data
temp_data = temp_data.fillna(method='ffill')
temp_data = temp_data.fillna(method='bfill')
temp_data = temp_data.loc[~temp_data.index.duplicated(keep='first')]
results[key[0]] = {"history": temp_data,
"last_update": datetime.now(),
"start_date": start_date,
"end_date": end_date}
for key in keys_in_cache:
results[key] = self[key]
return results
def fetch_history_multiple_ticker_grep(self, end_date, stock_keys, currency_tickers, index_tickers, start_date):
today = datetime.today()
today = today.replace(hour=0, minute=0, second=0, microsecond=0)
formatted_index_tickers = [stock_key.replace("^", "%5E") for stock_key in index_tickers]
# Retrieve Index market
all_indices_div, all_indices_price = self.retrieve_index(end_date, formatted_index_tickers, index_tickers,
start_date)
# Retrieve currencies
all_currencies_div, all_currencies_price = self.retrieve_currencies(currency_tickers, end_date, start_date)
# Retrieve dividends
all_dividends = self.extract_dividends(end_date, start_date, stock_keys)
all_dividends.update(all_currencies_div)
all_dividends.update(all_indices_div)
data = None
# Retrieve price
if (end_date - start_date).days > 1:
prices = retrieve_data_five_by_five(stock_keys, fmpsdk.historical_price_full, series_type="line",
from_date=start_date.strftime(Y_M_D),
to_date=end_date.strftime(Y_M_D))
list_prices = {prices[index]["symbol"]: index for index in range(len(prices)) if prices[index]}
all_prices = {}
for stock_name in stock_keys:
price_history = {(start_date + timedelta(days=x)).strftime(Y_M_D): math.nan for x in
range((end_date - start_date).days)}
if stock_name in list_prices:
for payment in prices[list_prices[stock_name]]["historical"]:
price_history[payment["date"]] = payment["close"]
all_prices[stock_name] = price_history
all_prices.update(all_currencies_price)
all_prices.update(all_indices_price)
for company in all_prices.keys():
header = pd.MultiIndex.from_product([[company], ["Close"]], names=['Ticker', 'Parameters'])
header2 = pd.MultiIndex.from_product([[company], ["Dividends"]],
names=['Ticker', 'Parameters'])
df = pd.DataFrame(all_prices[company].values(), index=all_prices[company].keys(), columns=header)
df2 = pd.DataFrame(all_dividends[company].values(), index=list(all_dividends[company].keys()),
columns=header2)
result = pd.concat([df, df2], axis=1, sort=False)
result = result.fillna(method='ffill')
result = result.fillna(method='bfill')
data = result if data is None else pd.concat([data, result], axis=1, sort=False)
else:
all_prices = fmpsdk.quote_short(apikey=FINANCE_KEY_, symbol=','.join(stock_keys))
all_prices += fmpsdk.quote_short(apikey=FINANCE_KEY_, symbol=','.join(formatted_index_tickers))
data2 = fmpsdk.forex(apikey=FINANCE_KEY_)
for fx in data2:
currencies = fx["ticker"].split("/")
try:
first_currency_combi = self.get_currency_ticker(Currency(currencies[0]), Currency(currencies[1]))
second_currency_combi = self.get_currency_ticker(Currency(currencies[1]), Currency(currencies[0]))
if first_currency_combi in currency_tickers:
all_prices += [{"symbol": first_currency_combi, "price": float(fx["ask"])}]
elif second_currency_combi in currency_tickers:
all_prices += [{"symbol": second_currency_combi, "price": 1 / float(fx["ask"])}]
except AttributeError:
pass
for company in all_prices:
symbol = company["symbol"]
header = pd.MultiIndex.from_product([[symbol], ["Close"]], names=['Ticker', 'Parameters'])
header2 = pd.MultiIndex.from_product([[symbol], ["Dividends"]], names=['Ticker', 'Parameters'])
df = pd.DataFrame([company["price"]], index=[today.strftime(Y_M_D)], columns=header)
df2 = pd.DataFrame(all_dividends[symbol].values(), index=list(all_dividends[symbol].keys()),
columns=header2)
result = pd.concat([df, df2], axis=1, sort=False)
result = result.fillna(method='ffill')
result = result.fillna(method='bfill')
data = result if data is None else pd.concat([data, result], axis=1, sort=False)
return data
def retrieve_currencies(self, currency_tickers, end_date, start_date):
currencies = retrieve_data_five_by_five(currency_tickers, fmpsdk.historical_price_full,
from_date=start_date.strftime(Y_M_D), to_date=end_date.strftime(Y_M_D))
list_currencies = {currencies[index]["symbol"].replace("/", ""): index for index in range(len(currencies)) if
currencies[index]}
all_currencies_div, all_currencies_price = rearrange_data_in_dictionnary(currencies, currency_tickers,
end_date, list_currencies,
start_date)
return all_currencies_div, all_currencies_price
def retrieve_index(self, end_date, formatted_index_tickers, index_tickers, start_date):
indices = retrieve_data_five_by_five(formatted_index_tickers, fmpsdk.historical_price_full,
from_date=start_date.strftime(Y_M_D), to_date=end_date.strftime(Y_M_D))
if not isinstance(indices, list):
indices = [indices]
list_indices = {indices[index]["symbol"]: index for index in range(len(indices)) if indices[index]}
all_indices_div, all_indices_price = rearrange_data_in_dictionnary(indices, index_tickers, end_date,
list_indices, start_date)
return all_indices_div, all_indices_price
def extract_dividends(self, end_date, start_date, stock_keys):
dividends = retrieve_data_five_by_five(stock_keys, fmpsdk.historical_stock_dividend)
list_dividends = {dividends[index]["symbol"]: index for index in range(len(dividends)) if dividends[index]}
all_dividends = {}
for stock_name in stock_keys:
dividends_history = {(start_date + timedelta(days=x)).strftime(Y_M_D): 0 for x in
range((end_date - start_date).days)}
if stock_name in list_dividends:
for payment in dividends[list_dividends[stock_name]]["historical"]:
if payment["recordDate"]:
dividends_history[payment["date"]] = payment["adjDividend"]
all_dividends[stock_name] = dividends_history
return all_dividends
def update_history(self, key, new_value):
if key in self:
self[key].update(new_value)
else:
self[key] = new_value
def today_update_from_transactions(self, transactions, company_cache, currency):
tickers = []
currency_tickers = []
index_tickers = []
for txn in transactions:
if txn["ticker"] not in tickers:
tickers.append(txn["ticker"])
currency_ticker = self.get_currency_ticker(Currency(company_cache[txn["ticker"]]["currency"]), currency)
if currency_ticker not in currency_tickers and len(currency_ticker) > 3:
currency_tickers.append(currency_ticker)
index_tickers.append("^GSPC")
results = self.fetch_multiple_histories(tickers, currency_tickers, index_tickers, period="1d")
for ticker in results.keys():
self.update_history(ticker, results[ticker])
def update_from_transactions(self, transactions, company_cache, currency):
stock_symbols = {}
index_symbols = {}
currencies_symbols = {}
now = datetime.now()
min_date = now
for txn in transactions:
date = datetime.strptime(txn["date"], Y_M_D)
currency_ticker = self.get_currency_ticker(Currency(company_cache[txn["ticker"]]["currency"]), currency)
if currency_ticker:
if currency_ticker not in currencies_symbols:
currencies_symbols[currency_ticker] = {"start": date, "end": now}
if date < currencies_symbols[currency_ticker]["start"]:
currencies_symbols[currency_ticker]["start"] = date
if txn["ticker"] not in stock_symbols:
stock_symbols[txn["ticker"]] = {"start": date, "end": now}
if date < stock_symbols[txn["ticker"]]["start"]:
stock_symbols[txn["ticker"]]["start"] = date
if date < min_date:
min_date = date
if len(stock_symbols) > 0:
index_symbols["^GSPC"] = {"start": min_date, "end": now}
results = self.fetch_multiple_histories(list(stock_symbols.keys()), list(currencies_symbols.keys()),
list(index_symbols.keys()), min_date, now)
for ticker in list(stock_symbols.keys()) + list(currencies_symbols.keys()) + list(index_symbols.keys()):
self.update_history(ticker, results[ticker])
def get_currency_ticker_yahoo(currency, target_currency):
ticker = ""
if currency.short != target_currency.short:
ticker = currency.short + target_currency.short + '=X'
if currency == "USD":
ticker = target_currency.short + '=X'
return ticker
def get_currency_ticker_grep(currency, target_currency):
if currency.short != target_currency.short:
return currency.short + target_currency.short
return ""
def get_json_parsed_data(url):
response = urlopen(url)
data = response.read().decode("utf-8")
return json.loads(data)
def retrieve_data_five_by_five(tickers, method, **args):
start_time = time.time()
data = []
for index in range(math.ceil(len(tickers) / 5)):
current_keys = tickers[index * 5:((index + 1) * 5 if (index + 1) * 5 < len(tickers) else len(tickers))]
temporary_data = method(apikey=os.environ["FINANCE_KEY"], symbol=','.join(current_keys), **args)
data += temporary_data['historicalStockList'] if 'historicalStockList' in temporary_data else [temporary_data]
print("Retrieving all data --- %s seconds ---" % (time.time() - start_time))
return data
| true |
ddf2812a93e012004c98ebf1d6c100d6ac194d49 | Python | ercris990/_curso_gg_py_3 | /_aula/aula016c_tuplas.py | UTF-8 | 247 | 4.3125 | 4 | [] | no_license | a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b
print(f'Elementos da tupla por ordem alfabetica: {sorted(c)}')
print(f'Quantas veses aparece o numero 5: {c.count(5)}')
print(f'Tupla: {c}')
print(c.index(5)) # mostra a posição do número na tupla
| true |
1a58833fcf9384020aee19fa75661ef38e7c00d8 | Python | sumit-kushwah/Python-stuff | /argparse/subcommand.py | UTF-8 | 2,376 | 2.53125 | 3 | [] | no_license | import argparse
parser = argparse.ArgumentParser(prog="todo", description='A smart command line todo application.')
subparsers = parser.add_subparsers(title="subcommands", description='Available subcommands', help='commands', dest="subcommand")
# parser for add sub-command
parser_add = subparsers.add_parser('add', help="add one or more tasks")
parser_add.add_argument('tasks', nargs='+', help='task descriptions')
parser_add.add_argument('-p', '--project', help='project name')
# parser for list sub-command
parser_list = subparsers.add_parser('list', help='list out tasks')
listgroup = parser_list.add_mutually_exclusive_group()
listgroup.add_argument('-a', '--all', action='store_true', help='list out all tasks')
listgroup.add_argument('-d', '--due', action='store_true', help='list out overdue tasks')
listgroup.add_argument('-u', '--upcoming', action='store_true', help='list out upcoming tasks')
parser_list.add_argument('--sort', action='store_true', help='sort tasks by name, date etc.')
parser_list.add_argument('--verbose', '-v', action='count', help='verbosity label', default=0)
# parser for find sub-command
parser_find = subparsers.add_parser('find', help="find tasks by text or #label")
parser_find.add_argument('text', help="text or #label")
# parser for update sub-command
parser_update = subparsers.add_parser('update', help='update tasks description or project name')
updategroup = parser_update.add_mutually_exclusive_group()
updategroup.add_argument('-id', '--taskid', help='id of task')
updategroup.add_argument('-p', '--project', help='project name')
# parser for delete sub-command
parser_delete = subparsers.add_parser('delete', help='delete tasks or project')
deletegroup = parser_delete.add_mutually_exclusive_group()
deletegroup.add_argument('-id', '--taskid', help='id for task')
deletegroup.add_argument('-p', '--project', help='project name')
# parser for sync sub-command
parser_sync = subparsers.add_parser('sync', help='Sync tasks with firebase')
# parser for mail sub-command
parser_mail = subparsers.add_parser('mail', help='Mail tasks')
parser_mail.add_argument('--subject', help='Subject of mail message')
# parser for print sub-command
parser_print = subparsers.add_parser('print', help='Print tasks')
parser_print.add_argument('-f', '--filename', help='File name')
# parsing the arguments
args = parser.parse_args() | true |
f4213133305c5fd5d3447e211e47ccc5db11e041 | Python | azamatkb/Function | /7may_1.py | UTF-8 | 659 | 4.09375 | 4 | [] | no_license | #7may_1 Создайте функцию которая берет лист делит его пополам и разворачивает...
def revers():
list_1 = ['name', 'age', '1', '19']
a = list(reversed(list_1[len(list_1)//2:]))
b = list(reversed(list_1[:len(list_1)//2]))
print(b + a)
revers()
#7may_1 - 2 версия Создайте функцию которая берет лист делит его пополам и разворачивает...
#def revers():
# a = input('Введите слова: ').split()
# print((list(reversed(a[:(int(len(a)) // 2)]))) + (list(reversed(a[(int(len(a)) // 2)::]))))
#
#revers()
| true |
27fc5a08499fbea6bfaf8e3822a5eb2395e9c781 | Python | andrewfhou/advent-of-code-2018 | /day06/daySix.py | UTF-8 | 1,139 | 3.359375 | 3 | [] | no_license | from collections import defaultdict
with open("input.txt") as file:
inputs = file.read().splitlines()
xPoints = defaultdict(int)
yPoints = defaultdict(int)
maxX = 0
maxY = 0
count = 0
for a in inputs:
x = int(a[:a.find(',')])
y = int(a[a.find(',') + 2:])
xPoints[count] = x
yPoints[count] = y
count += 1
if x > maxX:
maxX = x
if y > maxY:
maxY = y
# grid = [[None for x in range(maxX + 1)] for y in range(maxY + y)]
areas = defaultdict(int)
for x in range(maxX):
for y in range(maxY):
shortest = maxX + maxY
best = -1
for i in range(count): # Given (x,y), find the distance to the closest point
ptX = xPoints[i]
ptY = yPoints[i]
dist = abs(x - ptX) + abs(y - ptY)
if dist < shortest:
shortest = dist
best = i
elif dist == shortest:
best = -1
if areas[best] == 0:
areas[best] = 1
else:
areas[best] += 1
largest = 0
for a in areas:
if a > largest:
largest = a
print("Largest:", largest) | true |
294c9c5a647e1dbf20a047ef2adfc7738f67143a | Python | felixbosco/specklepy | /specklepy/reduction/filter.py | UTF-8 | 1,967 | 3.4375 | 3 | [
"MIT"
] | permissive | import numpy as np
def hot_pixel_mask(image, threshold=5):
"""Identify hot pixels via a 2nd derivative method and create a hot pixel mask with hot pixels `True`.
Arguments:
image (np.ndarray):
Image for which the mask shall be created.
threshold (int or float, optional):
Sigma-clipping threshold, used to evaluate whether the 2nd derivative is too large.
Returns:
mask (np.ndarray):
Boolean array of the shape of the image input. Hot pixels are `True`.
"""
# Compute second derivatives
ddx = np.diff(image, n=2)
ddy = np.diff(image, n=2, axis=-2)
# Pad derivatives to return to image size
ddx = np.pad(ddx, ((0, 0), (1, 1)))
ddy = np.pad(ddy, ((1, 1), (0, 0)))
# Derive masks
mask_x = np.abs(ddx) > threshold * np.std(ddx)
mask_y = np.abs(ddy) > threshold * np.std(ddy)
return mask_x | mask_y
def mask_hot_pixels(image, threshold=5):
return np.ma.masked_array(image, mask=hot_pixel_mask(image=image, threshold=threshold))
def fill_hot_pixels(image, fill_value=0, threshold=5):
return mask_hot_pixels(image=image, threshold=threshold).filled(fill_value)
def variable_pixel_mask(cube, var, threshold=5):
std = np.std(cube, axis=0)
return np.divide(std, np.sqrt(var)) > threshold
def mask_variable_pixels(cube, var, threshold=5):
integrated = np.sum(cube, axis=0)
return np.ma.masked_array(integrated, mask=variable_pixel_mask(cube=cube, var=var, threshold=threshold))
def fill_variable_pixels(cube, var, fill_value=0, threshold=5):
return mask_variable_pixels(cube=cube, var=var, threshold=threshold).filled(fill_value)
def bad_pixel_mask(cube, var=None, threshold=5):
hpm = hot_pixel_mask(np.sum(cube, axis=0), threshold=threshold)
if var is not None:
vpm = variable_pixel_mask(cube=cube, var=var, threshold=threshold)
return np.logical_or(hpm, vpm)
else:
return hpm
| true |
7b4f6b08c1df986d3e6fcbd99299b06e24985392 | Python | small-west/eASCs | /0_Clean_Python_Scripts/2_dicty_fourth_t.py | UTF-8 | 5,682 | 2.609375 | 3 | [] | no_license | ### IMPORTS ###
import os
import numpy as np
import csv
### CHOOSE SOURCE FILES ###
source_dicty = '2_Development/sociality_genes.csv'
source_fasta = '2_Development/Dictyostelium_ac.fa'
### MAIN ###
def main():
csv_total = []
#Get gene ids
raw_data = open(source_dicty).read()
dicty_split = raw_data.split('\n')
social_genes = []
single_genes = []
for i in dicty_split:
items = i.split(',')
identifier = items[1]
gene_id = items[0]
if identifier == '1':
social_genes.append(gene_id)
if identifier == '0':
single_genes.append(gene_id)
#Get sequences for both sets
raw_fasta = open(source_fasta).read()
fasta_split = raw_fasta.split('>')
social_seqs = []
single_seqs = []
for k in fasta_split:
if k != '':
chunks = k.split('\n')
line = chunks[0]
seq = chunks[1]
line_split = line.split(';')
fasta_id = line_split[0].replace('ID=gene:', '')
if fasta_id in social_genes:
if seq[3:6] != 'TAA' and seq[3:6] != 'TGA' and seq[3:6] != 'TAG':
social_seqs.append(seq)
elif fasta_id in single_genes:
if seq[3:6] != 'TAA' and seq[3:6] != 'TGA' and seq[3:6] != 'TAG':
single_seqs.append(seq)
else:
continue
#Calculate +4T for each position
T_social = get_fourth_t(social_seqs)
T_single = get_fourth_t(single_seqs)
#Get stop groups
social_taa, social_tga, social_tag = get_stop_groups(social_seqs)
single_taa, single_tga, single_tag = get_stop_groups(single_seqs)
#Test each stop group
soc_taa_a, soc_taa_t, soc_taa_g, soc_taa_c = get_fourth_freqs(social_taa)
soc_tga_a, soc_tga_t, soc_tga_g, soc_tga_c = get_fourth_freqs(social_tga)
soc_tag_a, soc_tag_t, soc_tag_g, soc_tag_c = get_fourth_freqs(social_tag)
sin_taa_a, sin_taa_t, sin_taa_g, sin_taa_c = get_fourth_freqs(single_taa)
sin_tga_a, sin_tga_t, sin_tga_g, sin_tga_c = get_fourth_freqs(single_tga)
sin_tag_a, sin_tag_t, sin_tag_g, sin_tag_c = get_fourth_freqs(single_tag)
social_line = ['social_genes', T_social, soc_taa_a, soc_taa_t, soc_taa_g, soc_taa_c, soc_tga_a, soc_tga_t, soc_tga_g, soc_tga_c, soc_tag_a, soc_tag_t, soc_tag_g, soc_tag_c]
single_line = ['single_genes', T_single, sin_taa_a, sin_taa_t, sin_taa_g, sin_taa_c, sin_tga_a, sin_tga_t, sin_tga_g, sin_tga_c, sin_tag_a, sin_tag_t, sin_tag_g, sin_tag_c]
csv_total.append(social_line)
csv_total.append(single_line)
#Write output file
headers = ['Set', 'overall_t_freq', 'TAA_A', 'TAA_T', 'TAA_G', 'TAA_C', 'TGA_A', 'TGA_T', 'TGA_G', 'TGA_C', 'TAG_A', 'TAG_T', 'TAG_G', 'TAG_C']
filename = "fourth.csv"
subdir = "2_Development"
filepath = os.path.join(subdir, filename)
with open(filepath, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(i for i in headers)
for j in csv_total:
writer.writerow(j)
### FUNCTIONS ###
def get_ASCs(sequences):
overall_stops = 0
stops = [0, 0, 0, 0, 0, 0]
n = len(sequences)
overall_codons = n * 6
for sequence in sequences:
codon_seq = [sequence[i:i+3] for i in range(0, len(sequence), 3)]
if codon_seq[1] == 'TAA' or codon_seq[1] == 'TGA' or codon_seq[1] == 'TAG':
overall_stops += 1
stops[0] += 1
if codon_seq[2] == 'TAA' or codon_seq[2] == 'TGA' or codon_seq[2] == 'TAG':
overall_stops += 1
stops[1] += 1
if codon_seq[3] == 'TAA' or codon_seq[3] == 'TGA' or codon_seq[3] == 'TAG':
overall_stops += 1
stops[2] += 1
if codon_seq[4] == 'TAA' or codon_seq[4] == 'TGA' or codon_seq[4] == 'TAG':
overall_stops += 1
stops[3] += 1
if codon_seq[5] == 'TAA' or codon_seq[5] == 'TGA' or codon_seq[5] == 'TAG':
overall_stops += 1
stops[4] += 1
if codon_seq[6] == 'TAA' or codon_seq[6] == 'TGA' or codon_seq[6] == 'TAG':
overall_stops += 1
stops[5] += 1
overall_f = overall_stops / overall_codons
f_array = np.array(stops) / n
f_list = f_array.tolist()
return overall_f, f_list
def get_fourth_t(genes):
taa = []
tga = []
tag = []
n = len(genes)
t = 0
for gene in genes:
fourth = gene[3]
if fourth == 'T':
t += 1
if gene[0:3] == 'TAA':
taa.append(gene)
if gene[0:3] == 'TGA':
tga.append(gene)
if gene[0:3] == 'TAG':
tag.append(gene)
overall_t = t / n
return overall_t
def get_stop_groups(genes):
taa = []
tga = []
tag = []
for gene in genes:
if gene[0:3] == 'TAA':
taa.append(gene)
if gene[0:3] == 'TGA':
tga.append(gene)
if gene[0:3] == 'TAG':
tag.append(gene)
return taa, tga, tag
def get_fourth_freqs(genes):
a = 0
t = 0
g = 0
c = 0
for gene in genes:
fourth = gene[3]
if fourth == 'A':
a += 1
if fourth == 'T':
t += 1
if fourth == 'G':
g += 1
if fourth == 'C':
c += 1
if len(genes) > 0:
a_freq = a / len(genes)
t_freq = t / len(genes)
g_freq = g / len(genes)
c_freq = c / len(genes)
else:
a_freq = 0
t_freq = 0
g_freq = 0
c_freq = 0
return a_freq, t_freq, g_freq, c_freq
### RUN ###
if __name__ == '__main__':
main()
| true |
2f0bc59cd894cc42b0c28e64e78b632b8586ad64 | Python | elados93/trex-core | /scripts/external_libs/lockfile-0.10.2/test/compliancetest.py | UTF-8 | 8,523 | 2.515625 | 3 | [
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"MIT"
] | permissive | import os
import threading
import shutil
import lockfile
class ComplianceTest(object):
def __init__(self):
self.saved_class = lockfile.LockFile
def _testfile(self):
"""Return platform-appropriate file. Helper for tests."""
import tempfile
return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid())
def setup(self):
lockfile.LockFile = self.class_to_test
def teardown(self):
try:
tf = self._testfile()
if os.path.isdir(tf):
shutil.rmtree(tf)
elif os.path.isfile(tf):
os.unlink(tf)
elif not os.path.exists(tf):
pass
else:
raise SystemError("unrecognized file: %s" % tf)
finally:
lockfile.LockFile = self.saved_class
def _test_acquire_helper(self, tbool):
# As simple as it gets.
lock = lockfile.LockFile(self._testfile(), threaded=tbool)
lock.acquire()
assert lock.i_am_locking()
lock.release()
assert not lock.is_locked()
## def test_acquire_basic_threaded(self):
## self._test_acquire_helper(True)
def test_acquire_basic_unthreaded(self):
self._test_acquire_helper(False)
def _test_acquire_no_timeout_helper(self, tbool):
# No timeout test
e1, e2 = threading.Event(), threading.Event()
t = _in_thread(self._lock_wait_unlock, e1, e2)
e1.wait() # wait for thread t to acquire lock
lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
assert lock2.is_locked()
if tbool:
assert not lock2.i_am_locking()
else:
assert lock2.i_am_locking()
try:
lock2.acquire(timeout=-1)
except lockfile.AlreadyLocked:
pass
else:
lock2.release()
raise AssertionError("did not raise AlreadyLocked in"
" thread %s" %
threading.current_thread().get_name())
try:
lock2.acquire(timeout=0)
except lockfile.AlreadyLocked:
pass
else:
lock2.release()
raise AssertionError("did not raise AlreadyLocked in"
" thread %s" %
threading.current_thread().get_name())
e2.set() # tell thread t to release lock
t.join()
## def test_acquire_no_timeout_threaded(self):
## self._test_acquire_no_timeout_helper(True)
## def test_acquire_no_timeout_unthreaded(self):
## self._test_acquire_no_timeout_helper(False)
def _test_acquire_timeout_helper(self, tbool):
# Timeout test
e1, e2 = threading.Event(), threading.Event()
t = _in_thread(self._lock_wait_unlock, e1, e2)
e1.wait() # wait for thread t to acquire lock
lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
assert lock2.is_locked()
try:
lock2.acquire(timeout=0.1)
except lockfile.LockTimeout:
pass
else:
lock2.release()
raise AssertionError("did not raise LockTimeout in thread %s" %
threading.current_thread().get_name())
e2.set()
t.join()
def test_acquire_timeout_threaded(self):
self._test_acquire_timeout_helper(True)
def test_acquire_timeout_unthreaded(self):
self._test_acquire_timeout_helper(False)
def _test_context_timeout_helper(self, tbool):
# Timeout test
e1, e2 = threading.Event(), threading.Event()
t = _in_thread(self._lock_wait_unlock, e1, e2)
e1.wait() # wait for thread t to acquire lock
lock2 = lockfile.LockFile(self._testfile(), threaded=tbool,
timeout=0.2)
assert lock2.is_locked()
try:
lock2.acquire()
except lockfile.LockTimeout:
pass
else:
lock2.release()
raise AssertionError("did not raise LockTimeout in thread %s" %
threading.current_thread().get_name())
e2.set()
t.join()
def test_context_timeout_unthreaded(self):
self._test_context_timeout_helper(False)
def _test_release_basic_helper(self, tbool):
lock = lockfile.LockFile(self._testfile(), threaded=tbool)
lock.acquire()
assert lock.is_locked()
lock.release()
assert not lock.is_locked()
assert not lock.i_am_locking()
try:
lock.release()
except lockfile.NotLocked:
pass
except lockfile.NotMyLock:
raise AssertionError('unexpected exception: %s' %
lockfile.NotMyLock)
else:
raise AssertionError('erroneously unlocked file')
## def test_release_basic_threaded(self):
## self._test_release_basic_helper(True)
def test_release_basic_unthreaded(self):
self._test_release_basic_helper(False)
## def test_release_from_thread(self):
## e1, e2 = threading.Event(), threading.Event()
## t = _in_thread(self._lock_wait_unlock, e1, e2)
## e1.wait()
## lock2 = lockfile.LockFile(self._testfile(), threaded=False)
## assert not lock2.i_am_locking()
## try:
## lock2.release()
## except lockfile.NotMyLock:
## pass
## else:
## raise AssertionError('erroneously unlocked a file locked'
## ' by another thread.')
## e2.set()
## t.join()
def _test_is_locked_helper(self, tbool):
lock = lockfile.LockFile(self._testfile(), threaded=tbool)
lock.acquire(timeout=2)
assert lock.is_locked()
lock.release()
assert not lock.is_locked(), "still locked after release!"
## def test_is_locked_threaded(self):
## self._test_is_locked_helper(True)
def test_is_locked_unthreaded(self):
self._test_is_locked_helper(False)
## def test_i_am_locking_threaded(self):
## self._test_i_am_locking_helper(True)
def test_i_am_locking_unthreaded(self):
self._test_i_am_locking_helper(False)
def _test_i_am_locking_helper(self, tbool):
lock1 = lockfile.LockFile(self._testfile(), threaded=tbool)
assert not lock1.is_locked()
lock1.acquire()
try:
assert lock1.i_am_locking()
lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
assert lock2.is_locked()
if tbool:
assert not lock2.i_am_locking()
finally:
lock1.release()
def _test_break_lock_helper(self, tbool):
lock = lockfile.LockFile(self._testfile(), threaded=tbool)
lock.acquire()
assert lock.is_locked()
lock2 = lockfile.LockFile(self._testfile(), threaded=tbool)
assert lock2.is_locked()
lock2.break_lock()
assert not lock2.is_locked()
try:
lock.release()
except lockfile.NotLocked:
pass
else:
raise AssertionError('break lock failed')
## def test_break_lock_threaded(self):
## self._test_break_lock_helper(True)
def test_break_lock_unthreaded(self):
self._test_break_lock_helper(False)
def _lock_wait_unlock(self, event1, event2):
"""Lock from another thread. Helper for tests."""
l = lockfile.LockFile(self._testfile())
l.acquire()
try:
event1.set() # we're in,
event2.wait() # wait for boss's permission to leave
finally:
l.release()
def test_enter(self):
lock = lockfile.LockFile(self._testfile())
lock.acquire()
try:
assert lock.is_locked(), "Not locked after acquire!"
finally:
lock.release()
assert not lock.is_locked(), "still locked after release!"
def test_decorator(self):
@lockfile.locked(self._testfile())
def func(a, b):
return a + b
assert func(4, 3) == 7
def _in_thread(func, *args, **kwargs):
"""Execute func(*args, **kwargs) after dt seconds. Helper for tests."""
def _f():
func(*args, **kwargs)
t = threading.Thread(target=_f, name='/*/*')
t.setDaemon(True)
t.start()
return t
| true |
12ed448e300437a59dbc7b5d9b97a6f227e43409 | Python | karoberts/adventofcode2015 | /21-1.py | UTF-8 | 2,663 | 3.125 | 3 | [] | no_license |
def run_game(my_hp, my_damage, my_armor, boss_hp, boss_damage, boss_armor):
while True:
boss_hp -= max(1, my_damage - boss_armor)
if boss_hp <= 0:
#print('boss loses', 'player =', my_hp)
return True
my_hp -= max(1, boss_damage - my_armor)
if my_hp <= 0:
#print('player loses', 'boss =', boss_hp)
return False
weapons = []
weapons.append({'t': 'Dagger', 'cost': 8, 'damage': 4})
weapons.append({'t': 'Shortsword', 'cost': 10, 'damage': 5})
weapons.append({'t': 'Warhammer', 'cost': 25, 'damage': 6})
weapons.append({'t': 'Longsword', 'cost': 40, 'damage': 7})
weapons.append({'t': 'Greataxe', 'cost': 74, 'damage': 8})
armors = []
armors.append({'t': 'Leather', 'cost': 13, 'armor': 1})
armors.append({'t': 'Chainmail', 'cost': 31, 'armor': 2})
armors.append({'t': 'Splintmail', 'cost': 53, 'armor': 3})
armors.append({'t': 'Bandedmail', 'cost': 75, 'armor': 4})
armors.append({'t': 'Platemail', 'cost': 102, 'armor': 5})
rings = []
rings.append({'t':'Damage +1', 'cost': 25 , 'damage': 1, 'armor': 0})
rings.append({'t':'Damage +2', 'cost': 50 , 'damage': 2, 'armor': 0})
rings.append({'t':'Damage +3', 'cost':100 , 'damage': 3, 'armor': 0})
rings.append({'t':'Defense +1', 'cost': 20, 'damage': 0, 'armor': 1})
rings.append({'t':'Defense +2', 'cost': 40, 'damage': 0, 'armor': 2})
rings.append({'t':'Defense +3', 'cost': 80, 'damage': 0, 'armor': 3})
# boss
# Hit Points: 104
# Damage: 8
# Armor: 1
# my hp = 100
mincost = 9999999
for w in range(0, len(weapons)):
my_weapon = weapons[w]['damage']
for a in range(-1, len(armors)):
my_armor = 0 if a == -1 else armors[a]['armor']
for r1 in range(-1, len(rings)):
my_ring1 = (0, 0) if r1 == -1 else (rings[r1]['damage'], rings[r1]['armor'])
for r2 in range(-1, len(rings)):
my_ring2 = (0, 0) if r2 == -1 else (rings[r2]['damage'], rings[r2]['armor'])
if run_game(100, my_weapon + my_ring1[0] + my_ring2[0], my_armor + my_ring1[1] + my_ring2[1], 104, 8, 1):
wcost = weapons[w]['cost']
acost = 0 if a == -1 else armors[a]['cost']
r1cost = 0 if r1 == -1 else rings[r1]['cost']
r2cost = 0 if r2 == -1 else rings[r2]['cost']
cost = wcost + acost + r1cost + r2cost
if cost < mincost:
print('cost', cost, 'w', weapons[w], 'a', armors[a], 'r1', my_ring1, 'r2', my_ring2)
mincost = cost
print('mincost', mincost) | true |
93d0257387359b9d3f43803d87e65adf99790f64 | Python | Mohamedballouch/covid19_morocco-package | /covid19_morocco/covid19.py | UTF-8 | 4,705 | 2.671875 | 3 | [] | no_license | import urllib.request
from bs4 import BeautifulSoup as bf
import time
state='Morocco'
def confirmed_people():
time.sleep(5)
#url = 'https://www.google.com/search?q=python'
url='https://www.worldometers.info/coronavirus/country/'+state+'/'
# now, with the below headers, we defined ourselves as a simpleton who is
# still using internet explorer.
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
soup = bf(respData,'html.parser')
tag = soup("span")
confirmed_people = tag[4].contents[0]
return(confirmed_people)
def deaths_people():
time.sleep(5)
#url = 'https://www.google.com/search?q=python'
url='https://www.worldometers.info/coronavirus/country/'+state+'/'
# now, with the below headers, we defined ourselves as a simpleton who is
# still using internet explorer.
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
soup = bf(respData,'html.parser')
tag = soup("span")
death_people = tag[5].contents[0]
return(death_people)
def recoverd_people():
time.sleep(5)
#url = 'https://www.google.com/search?q=python'
url='https://www.worldometers.info/coronavirus/country/'+state+'/'
# now, with the below headers, we defined ourselves as a simpleton who is
# still using internet explorer.
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
soup = bf(respData,'html.parser')
tag = soup("span")
recovers_people = tag[6].contents[0]
return(recovers_people)
def confirmed_people_regions(city):
json_url ="https://moroccostats.herokuapp.com/stats/coronavirus/countries/morocco/regions?fbclid=IwAR3KpT3JArtFc83s14sOJsT99pIx8UFqfHWZgfN4oxAr6OLj05apXvdWmqY"
import requests
import json
# download the raw JSON
raw = requests.get(json_url).text
data = json.loads(raw)
#print(data['SoussMassa'])
return(data[city])
if city=='BeniMellalKhnifra':
return(data['BeniMellalKhnifra'])
elif city=='DaraaTafilalet':
return(data['Daraatafilalet'])
elif city=='FesMeknes':
return(data['Fsmeknes'])
elif city=='LaayouneSakiaElHamra':
return(data['LayouneSakiaElHamra'])
elif city=='Oriental':
return(data['Oriental'])
elif city=='SoussMassa':
return(data['SoussMassa'])
elif city=='CasaSettat':
return(data['CasaSettat'])
elif city=='DakhlaOuedEdDahab':
return(data['DakhlaOuedEdDahab'])
elif city=='GuelmimOuedNoun':
return(data['GuelmimOuedNoun'])
elif city=='MarrakechSafi':
return(data['MarrakechSafi'])
elif city=='RabatSaleKenitra':
return(data['RabatSalKenitra'])
elif city=='TangerTetouanAlHoceima':
return(data['TangerTetouanAlHoceima'])
def plot_cases():
json_url ="https://raw.githubusercontent.com/aboullaite/Covid19-MA/master/stats/MA-times_series.csv"
import pandas as pd
df = pd.read_csv(json_url,index_col=0)
df = df.rename(columns={'Dates / التواريخ': 'Dates', 'Cases / الحالات': 'Cases', 'Recovered / تعافى': 'Recovered', 'Deaths / الوفيات': 'Deaths'})
df=df.drop(columns=['Recovered'])
from matplotlib import pyplot
df.plot(marker='o', figsize=(15,5))
pyplot.xticks(rotation = 50)
pyplot.xlabel("Dates")
pyplot.show()
def hist_cases():
json_url ="https://raw.githubusercontent.com/aboullaite/Covid19-MA/master/stats/MA-times_series.csv"
import pandas as pd
df = pd.read_csv(json_url,index_col=0)
df = df.rename(columns={'Dates / التواريخ': 'Dates', 'Cases / الحالات': 'Cases', 'Recovered / تعافى': 'Recovered', 'Deaths / الوفيات': 'Deaths'})
df=df.drop(columns=['Recovered'])
from matplotlib import pyplot
#df.plot(marker='o')
df.plot(kind='bar', figsize=(15,5))
pyplot.xticks(rotation = 50)
pyplot.xlabel("Dates")
pyplot.show()
| true |
ea328c36e7dd543fdc39bf5126f50b4d0922e663 | Python | dhinojosa/tdd20160609 | /calcstats.py | UTF-8 | 586 | 3.546875 | 4 | [] | no_license | import unittest
class CalcStats
def __init__(self, list)
self.list = list
def filter(self, pred)
if (len(self) == 0) None
answer = self.list[0]
for (item in self_list[1:])
if (pred(item, answer))
answer = item
answer
def max(self)
filter(lambda next, currentAnswer: next < currentAnswer)
def min(self)
filter(lambda next, currentAnswer: next > currentAnswer)
end
class CalcStatsTest(unittest.TestCase)
def test_max_with_list_of_3(self)
cs = CalcStats([1,2,3])
self.assertEqual(cs.max, 3)
| true |
d67399ca1f0a8157319b221b6f5a7786cc9b0236 | Python | tirsott/lc-go | /problems/0123.best-time-to-buy-and-sell-stock-iii/best-time-to-buy-and-sell-stock-iii.py | UTF-8 | 924 | 3.28125 | 3 | [] | no_license | from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) < 2:
return 0
dp = [[[None, None, None], [None, None, None]] for _ in range(len(prices))]
dp[0][0][0] = 0
dp[0][0][1] = 0
dp[0][0][2] = 0
dp[0][1][0] = -prices[0]
dp[0][1][1] = -prices[0]
dp[0][1][2] = -prices[0]
print(dp)
for i in range(1, len(prices)):
dp[i][0][0] = 0
dp[i][0][1] = max(dp[i-1][0][1], dp[i-1][1][0] + prices[i])
dp[i][0][2] = max(dp[i-1][0][2], dp[i-1][1][1] + prices[i])
dp[i][1][0] = max(dp[i-1][1][0], dp[i-1][0][0] - prices[i])
dp[i][1][1] = max(dp[i-1][1][1], dp[i-1][0][1] - prices[i])
dp[i][1][2] = 0
print(dp)
return max(dp[-1][0])
print(Solution().maxProfit([1,2,4,2,5,7,2,4,9,0]))
# [3,3,5,0,0,3,1,4] | true |
fc2e361a47ab232f9bc360b0b6aac3feacbec2f3 | Python | aravind225/hackerearth | /pattern6.py | UTF-8 | 137 | 3.703125 | 4 | [] | no_license | n=5
i=1
j=n
while j:
if i==n:
print(j,end=" ")
j=j-1
else:
print(i,end=" ")
i=i+1
| true |
cba64e2d1f2a8a62f542a3164b9b262bccfff869 | Python | bean710/AirBnB_clone_v3 | /api/v1/views/places_reviews.py | UTF-8 | 2,714 | 2.609375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/python3
"""File for the reviews route"""
from api.v1.views import app_views
from flask import Flask, jsonify, abort, request
from models import storage
from models.review import Review
from models.place import Place
@app_views.route("/places/<place_id>/reviews", methods=["GET"],
strict_slashes=False)
def all_reviews(place_id):
"""Returns JSON of all of the reviews"""
place = storage.get("Place", place_id)
if place is None:
abort(404)
reviews = storage.all("Review").values()
dict_reviews = [r.to_dict() for r in reviews if r.place_id == place_id]
return jsonify(dict_reviews)
@app_views.route("/reviews/<review_id>", methods=["GET"],
strict_slashes=False)
def get_review(review_id):
"""Returns a specific review"""
review = storage.get("Review", review_id)
if review is None:
abort(404)
else:
return jsonify(review.to_dict())
@app_views.route("/reviews/<review_id>", methods=["DELETE"],
strict_slashes=False)
def del_review(review_id):
"""Deletes a specific review"""
review = storage.get("Review", review_id)
if review is None:
abort(404)
else:
storage.delete(review)
storage.save()
return jsonify({}), 200
@app_views.route("/places/<place_id>/reviews", methods=["POST"],
strict_slashes=False)
def create_review(place_id):
"""Creates a review"""
place = storage.get("Place", place_id)
if place is None:
abort(404)
data = request.get_json()
if data is None:
return jsonify({"error": "Not a JSON"}), 400
elif "user_id" not in data:
return jsonify({"error": "Missing user_id"}), 400
elif "text" not in data:
return jsonify({"error": "Missing text"}), 400
user_id = data["user_id"]
data_user = storage.get("User", user_id)
if data_user is None:
abort(404)
data["place_id"] = place_id
nreview = Review(**data)
storage.new(nreview)
storage.save()
return jsonify(nreview.to_dict()), 201
@app_views.route("/reviews/<review_id>", methods=["PUT"],
strict_slashes=False)
def update_review(review_id):
"""Updates a review"""
review = storage.get("Review", review_id)
if review is None:
abort(404)
else:
data = request.get_json(force=True)
if data is None:
return jsonify({"error": "Not a JSON"}), 400
ignore = ["id", "created_at", "place_id", "user_id"]
for k, v in data.items():
if k not in ignore:
setattr(review, k, v)
storage.save()
return jsonify(review.to_dict()), 200
| true |
5663ab5a62c2f4a70083bda247ae784d653f957a | Python | templeblock/dlex | /dlex/utils/utils.py | UTF-8 | 3,185 | 2.734375 | 3 | [] | no_license | """General utils"""
import os
import sys
import time
import zipfile
import tarfile
import shutil
from six.moves import urllib
import requests
from tqdm import tqdm
from .logging import set_log_dir, logger
urllib_start_time = 0
def reporthook(count, block_size, total_size):
global urllib_start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - urllib_start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = min(int(count * block_size * 100 / total_size), 100)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def maybe_download(work_directory, source_url, filename=None):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.exists(work_directory):
os.makedirs(work_directory)
filepath = os.path.join(work_directory, filename or source_url[source_url.rfind("/")+1:])
if not os.path.exists(filepath):
with open(filepath, 'wb') as f:
logger.info("Downloading file at %s to %s", source_url, filepath)
r = requests.get(source_url, stream=True, allow_redirects=True)
total_length = r.headers.get('content-length')
if total_length is None: # no content length header
for data in r.iter_content(chunk_size=128):
f.write(data)
print(len(data))
elif r.status_code == 200:
total_length = int(total_length)
logger.info("File size: %.1fMB", total_length / 1024 / 1024)
with tqdm(desc="Downloading", total=int(total_length), unit="B", unit_scale=True, unit_divisor=1024) as pbar:
for data in r.iter_content(chunk_size=4096):
f.write(data)
pbar.update(len(data))
return filepath
def maybe_unzip(file_path, folder_path):
_dir = folder_path
if os.path.exists(_dir):
return
_, ext = os.path.splitext(file_path)
if ext == '.zip':
logger.info("Extract %s to %s", file_path, folder_path)
zip_ref = zipfile.ZipFile(file_path, 'r')
zip_ref.extractall(_dir)
zip_ref.close()
elif ext in ['.lzma', '.gz', '.tgz']:
logger.info("Extract %s to %s", file_path, folder_path)
tar = tarfile.open(file_path)
tar.extractall(path=_dir)
tar.close()
else:
raise Exception("File type is not supported (%s)" % ext)
def init_dirs(params):
os.makedirs(params.log_dir, exist_ok=True)
shutil.rmtree(params.output_dir, ignore_errors=True)
os.makedirs(params.output_dir)
if params.mode == "train":
set_log_dir(params)
| true |
ce32202f6a2419730f9ed5240d3ed542b372c517 | Python | hvn2001/LearnPython | /DataStructures/IntroTrees/PreOrderTraversal.py | UTF-8 | 433 | 3.546875 | 4 | [] | no_license | from DataStructures.IntroTrees.BinarySearchTree import BinarySearchTree
from DataStructures.IntroTrees.Print import display
def preOrderPrint(node):
if node is not None:
print(node.val)
preOrderPrint(node.leftChild)
preOrderPrint(node.rightChild)
BST = BinarySearchTree(6)
BST.insert(4)
BST.insert(9)
BST.insert(5)
BST.insert(2)
BST.insert(8)
BST.insert(12)
display(BST.root)
preOrderPrint(BST.root)
| true |
ff6f58f4c23ca918b0ba239e90913d570e58a825 | Python | mbisbano1/369_Project_1 | /experiment3/UDPServer.py | UTF-8 | 809 | 2.890625 | 3 | [] | no_license | from socket import *
import sys
global server_port
#serverPort=12000
if len(sys.argv) <= 1:
print('Usage: "python3 UDPServer.py server_port"')
print('server_port = server socket port: #80GX')
print('Using Default values for server_port')
print('server_port = 12000')
server_port = 12000
else:
server_port = int(sys.argv[1])
print('server_port = ', server_port)
class UDPServer:
def __init__(self):
try:
serverSocket=socket(AF_INET, SOCK_DGRAM)
serverSocket.bind(('', server_port))
print("The server is ready to receive.")
while 1:
message, clientAddress = serverSocket.recvfrom(2048)
modifiedMessage=message.decode().upper()
serverSocket.sendto(modifiedMessage.encode(), clientAddress)
except KeyboardInterrupt:
print("Keyboard interrupt")
exit(1)
UDPServer()
| true |
f3ed8928c63c260a81979a28fc43e0c835280e84 | Python | jimms/leetcode | /63.py | UTF-8 | 751 | 2.984375 | 3 | [] | no_license | class Solution(object):
def uniquePathsWithObstacles(self, g):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
m = len(g)
n = len(g[0])
a = []
for i in range(m):
a.append([0] * n)
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
if g[i][j] == 1:
g[i][j] = 0
else:
if i == m - 1 and j == n - 1:
a[i][j] = 1
else:
if i + 1 < m:
a[i][j] += a[i + 1][j]
if j + 1 < n:
a[i][j] += a[i][j + 1]
return a[0][0]
| true |
79b1d666d6fe69fcba41b1d958607d2e948cdd41 | Python | koalahang/covid19-mobility | /PODA_Model_Code/myFunctions.py | UTF-8 | 2,169 | 2.734375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Tue May 12 00:52:39 2020
@author: hexx
"""
import os
def def_add_datashift (data_frame, column_name, x):
for i in x:
shift_i = column_name + '_shifted_'+str(i)
data_frame[shift_i] = data_frame[column_name]
data_frame[shift_i] = data_frame[shift_i].shift(0-i)
return(data_frame)
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def Google_factor (data_used, factor):
data_used['work factor'] = 1 + data_used['workplaces']/100*factor[0]
data_used['school factor'] = 1 + data_used['workplaces']/100*factor[1]
data_used['medical factor'] = 1 + data_used['grocery_and_pharmacy']/100*factor[2]
data_used['shopping factor'] = 1 + data_used['grocery_and_pharmacy']/100*factor[3]
data_used['social factor'] = 1 + data_used['workplaces']/100*factor[4]
data_used['park factor'] = 1 + data_used['parks']/100*factor[5]
data_used['transport someone factor'] = 1+ data_used['workplaces']/100*factor[7]
data_used['meals factor'] = 1 + data_used['workplaces']/100*factor[6]
data_used['else factor'] = 1+ data_used['workplaces']/100*factor[7]
data_used['accumulated factor'] = (data_used['Work']*data_used['work factor'] + \
data_used['School/Daycare/Religious activity']*data_used['school factor'] + \
data_used['Medical/Dental services']*data_used['medical factor'] + \
data_used['Shopping/Errands']*data_used['shopping factor'] + \
data_used['Social/Recreational']*factor[8]*data_used['social factor'] + \
data_used['Social/Recreational']*(1-factor[8])*data_used['park factor'] + \
data_used['Meals']*data_used['meals factor'] +\
data_used['Transport someone']*data_used['transport someone factor'] + \
data_used['Something else']*data_used['else factor'])/100 + factor[9]
return data_used | true |
d2ecae5e7934e2d0e9b7791edef89cee80c4933f | Python | CodingPirates/taarnby-python | /uge3/dyr.py | UTF-8 | 3,222 | 3.84375 | 4 | [] | no_license | # Her laver vi en ny klasse. Bemærk at vi altid laver klassenavne med et stort bogstav
class Dyr:
# Her kommer nogle variable der tilhører klassen. Alle objekter af samme klasse deler disse
dyrtype = "dyr"
# Her kommer initialiseringsfunktionen. Den køres hver gang vi laver et nyt objekt af denne klasse
# Vores klasse skal kunne lave al slags dyr, og derfor er der mange argumenter
def __init__(self, navn, skind, lyd, foede, antal_ben, hvirveldyr):
# "self" betyder "mig selv". Når man siger self.antal_ben = 2, så sættes antal hjul til 2 for lige præcis
# dette her objekt. Andre objekter af samme klasse kan have en helt andet antal ben
self.navn = navn
self.antal_ben = antal_ben
self.skind = skind
self.hvirveldyr = hvirveldyr
self.lyd = lyd
self.foede = foede
def hvem_er_jeg(self):
print("Jeg er en "+self.dyrtype+" som hedder "+self.navn+". Jeg er dækket med "+self.skind+", har "+str(self.antal_ben)+" ben og jeg siger "+ str(self.lyd))
if self.hvirveldyr == True:
print("Jeg er et hvirveldyr")
else:
print("Jeg er ikke et hvirveldyr")
def spisetid(self):
print("Omnom, jeg spiser "+self.foede)
# Dyr er en basisklasse, men vi kan godt lave nogle mere specialiserede klasser
# Det er ingen grund til at opfinde den dybe tallerken en gang til
# Vi laver en mere simpel "init" funktion, fordi alle guldfiske har 0 ben og spiser larver.
# De har heller ikke noget navn. Derfor er det supersimpelt
class Guldfisk(Dyr):
dyrtype = "guldfisk"
def __init__(self,navn):
Dyr.__init__(self, navn, "skæl", "ikke noget", "larver", 0, False)
# Nu kan vi lave en lidt mere avanceret dyr
# Alle hunde har 4 ben, pels og siger vov, men de har også nogle navne
class Raev(Dyr):
dyrtype = "ræv"
def __init__(self,navn):
Dyr.__init__(self, navn, "pels", "wa pa pa pa pa pa pow", "mus", 4, True)
# Hvis vi synes funktionerne fra moderklassen ikke er helt dækkende kan vi lave nogle nye
# Det hedder at "override" funktionen. Hvis man har et objekt af typen "Ræv" er det den der køres:
def spisetid(self):
print("Jeg kan rigtig godt lide at jage "+self.foede+" i skoven. Mums!")
# Vi skal da også have katte. Udover at have navn, så kan de også have en race. Hvis ikke vi angiver
# en race, så er de et "gadekryds"
class Kat(Dyr):
dyrtype = "kat"
# Vi har noget ekstra information som giver mening når vi har katte
def __init__(self,navn,race="gadekryds"):
Dyr.__init__(self, navn, "pels", "miauw", "kattemad", 4, True)
self.race = race
# Vi overrider "hvem er jeg", fordi vi har noget ekstra information (race)
def hvem_er_jeg(self):
print("Jeg er en " + self.race + "-kat som hedder " + self.navn + ". Jeg er dækket med " + self.skind + ", har " + str(
self.antal_ben) + " ben og jeg siger " + str(self.lyd))
# Katte kan spinde, men det giver ikke rigtig mening for de andre dyr
# Vi kan godt lave funktioner som ikke er i moderklassen (her laver vi en "spind" funktion
def spind(self):
print("Rrrrrrrrrrrrr")
| true |
c0147fff8cf9a2b6282919ecdecb35932ae18eaa | Python | KYHlings/Poke-Mood2.0 | /pygame_upgraded/pygame_states.py | UTF-8 | 1,318 | 2.609375 | 3 | [] | no_license | import pygame as pg
from TextToPygame import start_game
#print("Lets use your new stats, press [Enter] to ge in to the World of Poketeers")
import pygame_upgraded.variables
from pygame_upgraded import global_stuff
from pygame_upgraded.screens import MenuStartScreen, StartScreen
from pygame_upgraded.variables import screen
pg.init()
def mainloop(screen):
# To be able to go back to startscreen and run popups if not run before
pygame_upgraded.variables.start_screen = StartScreen()
state = MenuStartScreen()
clock = pg.time.Clock()
while True:
# Event handling
ev = pg.event.poll()
if ev.type == pg.MOUSEBUTTONDOWN:
temp_state = state.handle_mouse_button(ev.button)
if temp_state is not None:
state = temp_state
elif ev.type == pg.QUIT:
break
if global_stuff.next_screen is not None:
#print("changing frames to", type(common.next_screen))
state = global_stuff.next_screen
global_stuff.next_screen = None
state = state.handle_timer()
state.render(screen)
pg.display.update()
clock.tick(30)
if __name__ == '__main__':
global_stuff.common_init()
pg.display.set_caption("PokeMood")
mainloop(screen)
pg.quit()
| true |
7570b4ce839370a2879002f6c5cc3f08afba0d82 | Python | phaustin/pythonlibs | /pyutils/pyutils/move_files.py | UTF-8 | 1,365 | 3.28125 | 3 | [
"BSD-3-Clause"
] | permissive | """
move files that don't start with . to a folder, leaving only directories
example: python -m pyutils.moveit thedir
"""
import argparse
import re, os
import tempfile
from pathlib import Path
import errno, sys
dotre = re.compile(r'^\..*')
def mkdir_p(path):
try:
os.makedirs(path, exist_ok=False)
print('made: ', dirname)
except OSError as exc:
if exc.errno == errno.EEXIST:
print('{} already exists'.format(path))
else:
#permission error, etc.
raise
if __name__ == "__main__":
linebreaks = argparse.RawTextHelpFormatter
descrip = __doc__.lstrip()
parser = argparse.ArgumentParser(formatter_class=linebreaks,
description=descrip)
parser.add_argument(
'dir', nargs='?',
type=str, help='optional directory name')
args = parser.parse_args()
if args.dir:
dirname = args.dir
mkdir_p(dirname)
else:
confirm = input("confirm creation of temporary directory {}: y/n ")
if confirm == 'y':
dirname = tempfile.mkdtemp(prefix='holdit_', dir='.')
p = Path('.')
for x in p.iterdir():
if x.is_dir() or dotre.match(str(x)):
continue
newfile = Path(dirname) / x
x.rename(newfile)
| true |
7ece71eb5727f05d16eee36de10039457bb613e4 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_96/1534.py | UTF-8 | 925 | 2.75 | 3 | [] | no_license | import sys
output = "Case #%s: %s"
f = open(sys.argv[1],'r')
T = int(f.readline())
for counter in xrange(T):
line = [int(i) for i in f.readline().strip().split(' ')]
N = line[0]
S = line[1]
p = line[2]
t = line[3:]
#print N
#print S
#print p
#print t
# calculus
normal = p*3-2 if p*3-2 >0 else 0
surprise = normal - 2 if normal-2>0 else 0
if p ==1:
surprise = 1
t.sort()
#print t
toadd = 0
counter2 = -1
sol = 0
#import pdb; pdb.set_trace()
for i in t:
counter2 +=1
if S>0:
if i<surprise:
continue
else:
S-=1
toadd+=1
continue
else:
if i < normal:
continue
else:
sol = len(t) - counter2
break
sol += toadd
print output % (counter+1, sol)
| true |
826800f38bf455d7e35ab3d82bd9f8204d727e53 | Python | stephanieeechang/PythonGameProg | /PyCharmProj/turtleRunner.py | UTF-8 | 921 | 4.03125 | 4 | [
"LicenseRef-scancode-public-domain"
] | permissive | import turtle
def square(sqrlength, turtle):
'''
this function draws a square with turtle
:param sqrlength: length of square
:return:
'''
if sqrlength < 5:
return
else:
for i in range(4):
turtle.forward(sqrlength)
turtle.left(90)
turtle.up()
turtle.fd(5)
turtle.left(90)
turtle.fd(5)
turtle.left(-90)
turtle.down()
square(sqrlength-10, turtle)
def tree(tur, times, len):
if(times != 1):
tur.fd(len)
tur.right(20)
tree(tur, times-1, len-10)
tur.left(40)
tree(tur, times-1, len-10)
tur.right(20)
tur.backward(len)
myWin = turtle.Screen()
#creates a turtle
rp = turtle.Turtle()
#draw a square
#square(100, rp)
#draw a tree
rp.up()
rp.back(80)
rp.down()
rp.left(90)
tree(rp, 8, 80)
#close window by clicking screen
myWin.exitonclick()
| true |
4b6a4dd4d30363869796df0e65587d4f969dcfa6 | Python | Daniyal56/Python-Projects | /Euclidean distance.py | UTF-8 | 949 | 4.53125 | 5 | [] | no_license | ## 10. Euclidean distance
### write a Python program to compute the distance between the points (x1, y1) and (x2, y2).
#### Program Console Sample 1:
###### Enter Co-ordinate for x1: 2
###### Enter Co-ordinate for x2: 4
###### Enter Co-ordinate for y1: 4
###### Enter Co-ordinate for y2: 4
###### Distance between points (2, 4) and (4, 4) is 2
print('-----------------------------------------Distance between two lines----------------------------------------------')
# getting input from user
x1 = int(input('Enter Co-ordinate for x1 : '))
print('%s cm' %x1)
x2 = int(input('Enter Co-ordinate for x2 : '))
print('%s cm' %x2)
y1 = int(input('Enter Co-ordinate for y1 : '))
print('%s cm' %y1)
y2 = int(input('Enter Co-ordinate for y2 : '))
print('%s cm' %y2)
#by putting in formulae
dis_1 = (x2 - x1)**2
dis_2 = (y2 - y1)**2
# Output
result = (dis_1 + dis_2) ** 1/2
print('distance between the points (x1, y1) and (x2, y2) is %s cm' %result) | true |
63693af1ffe1097b0415afe723728f77c74b16e4 | Python | bhrigu123/interview_prep_python | /double_dimension.py | UTF-8 | 128 | 3.046875 | 3 | [] | no_license |
def get_dd_matrix(rows, columns):
return [[0 for x in range(columns)] for y in range(rows)]
print (get_dd_matrix(1, 10))
| true |
a9c327052c0000f2252162d83635edd69cdb1a6d | Python | Akhila474/Python | /Python prgms/StudentDetails.py | UTF-8 | 834 | 4.59375 | 5 | [] | no_license | 1)Create a Python class called "Student" having "name","age" as attribute along with a list having the marks obtained for three subjects.
2)Create a constructor to initialize two objects of this class.
3)Create a member function called 'display' printing the details of a specific object.
4)Ask user to enter the values for an object through an 'accept' member function.
5)Display these details
class Student:
def __init__(self):
self.name=0
self.age=0
def display(self):
print(self.name)
print(self.age)
print(self.marks)
def accept(self):
self.marks = input("Enter marks")
self.marks=list((self.marks.split(' ')))
name=input("Enter name")
age=input("Enter age")
self.name=name
self.age=age
p1=Student()
p1.accept()
p1.display()
| true |
9e2f88c82370bb418b0889b30395186ed711e537 | Python | yuu19/scraping_learing | /selpra2.py | UTF-8 | 853 | 3.15625 | 3 | [] | no_license | import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#ChromeOptionでヘッドレスモードを指定
chrome_option = webdriver.ChromeOptions()
chrome_option.add_argument('--headless')
chrome_option.add_argument('--disable-gpu')
driver = webdriver.Chrome(options=chrome_option)
url = 'http://www.webscrapingfordatascience.com/complexjavascript/'
driver.get(url)
quote_elements = WebDriverWait(driver, 10).until(
EC.presence_of_all_elements_located(
(By.CSS_SELECTOR, ".quote:not(.decode)") #ページの要素を選択するメソッドを第一引数、実際の値を第二引数に取る
)
)
for quote in quote_elements:
print(quote.text)
| true |
ef7c242b9d8e9c6d2994ea18853e5b56672f4e6c | Python | mokuno3430/emu | /lyla_plot/bin/alignment.py | UTF-8 | 10,187 | 2.734375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as clr
import sys
import csv
import common
import os.path
class Colormap:
pallet = {
'red':((0.00, 0.10, 0.10),
(0.25, 0.10, 0.10),
(0.40, 0.30, 0.30),
(0.60, 1.00, 1.00),
(0.80, 0.90, 0.90),
(1.00, 0.70, 0.70)),
'green':((0.00, 0.10, 0.10),
(0.25, 0.60, 0.60),
(0.40, 0.80, 0.80),
(0.60, 0.75, 0.75),
(0.80, 0.30, 0.30),
(1.00, 0.15, 0.15)),
'blue':((0.00, 0.40, 0.40),
(0.25, 1.00, 1.00),
(0.40, 0.25, 0.25),
(0.60, 0.00, 0.00),
(0.80, 0.05, 0.05),
(1.00, 0.20, 0.20))
}
mycm = clr.LinearSegmentedColormap('original', pallet )
cmaps = [ cm.binary, cm.bone_r, cm.inferno_r, cm.hot_r, cm.YlGnBu, mycm ]
cmap_list = [ 'binary', 'bone_r', 'inferno_r', 'hot_r', 'YlGnBu', 'original' ]
def __init__( self, min_identity, max_identity, cm, alpha=0.5 ):
self.min_identity = min_identity
self.max_identity = max_identity
self.alpha = alpha
self.cm = cm
def convert_identity2color( self, identity ):
color_value=( identity - self.min_identity )/( self.max_identity - self.min_identity )
align_color=Colormap.cmaps[ self.cm ]( color_value )
return align_color
def output_parameters( self ):
print( '##Colormap paramenters:' )
print( ' min_identity: %d' % ( self.min_identity))
print( ' max_identity: %d' % ( self.max_identity ))
print( ' alpha: %.2f' % ( self.alpha ))
print( ' colormap: %d (%s)' % ( self.cm, Colormap.cmap_list[self.cm] ))
print( '' )
class Colorbox:
def __init__( self, size ):
self.height = size.bottom_margin * 0.2
self.width = size.xlim_max * 0.2
self.origin_x = size.xlim_max * 0.55
self.origin_y = float( ( size.bottom_margin - self.height ) /2 )
def plot( self, ax, heatmap ):
cell_width = self.width / ( heatmap.max_identity - heatmap.min_identity )
text_legend = common.Text( 'identity (%)', 8, self.origin_x - cell_width * 2, self.origin_y + self.height / 2, 'right', 'center' )
text_legend.output( ax )
cell_originx = self.origin_x
for i in range( heatmap.min_identity, heatmap.max_identity + 1 ):
align_color=heatmap.convert_identity2color( i )
colorbox_x = [ cell_originx, cell_originx + cell_width, cell_originx + cell_width, cell_originx ]
colorbox_y = [ self.origin_y, self.origin_y, self.origin_y + self.height, self.origin_y + self.height ]
cell_originx += cell_width
ax.fill( colorbox_x, colorbox_y, color=align_color, alpha=heatmap.alpha, linewidth=0 )
if( i % 10 == 0 ):
num_legend = common.Text( i, 6, cell_originx, self.origin_y - 0.06, 'center', 'top' )
num_legend.output( ax )
def output_parameters( self ):
print( '##Colorbox paramenters:' )
print( ' width: %.2f' % ( self.width ))
print( ' height: %.2f' % ( self.height ))
print( ' origin_x %.2f' % ( self.origin_x ))
print( ' origin_y: %.2f' % ( self.origin_y ))
print( '' )
def convert_position2coord( A, B, A_start, A_end, B_start, B_end, margin, A_h_height, B_h_height ):
x1 = A.convert_position2xcoord( A_start )
x2 = A.convert_position2xcoord( A_end )
x3 = B.convert_position2xcoord( B_end )
x4 = B.convert_position2xcoord( B_start )
x = [ x1, x2, x3, x4 ]
if( A.origin_y < B.origin_y ):
y1 = A.convert_position2ycoord( margin + A.height + A_h_height )
y2 = B.convert_position2ycoord( margin * -1 )
else:
y1 = A.convert_position2ycoord( margin * -1 )
y2 = B.convert_position2ycoord( margin + A.height + B_h_height )
y = [ y1, y1, y2, y2 ]
return x, y
def count_alignment_files( args ):
valid_files = 0
input_formats = [ args.alignment, args.blastn, args.lastz, args.mummer ]
for files in input_formats:
if files is None:
continue
for fn in files:
if not os.path.isfile( fn ):
continue
valid_files += 1
return valid_files
def set_min_identity( args ):
list_min_identity = []
input_formats = [ args.alignment, args.blastn, args.lastz, args.mummer ]
func_set_min_identity = [ cal_min_identity4original, cal_min_identity4blastn, cal_min_identity4lastz, cal_min_identity4mummer ]
if( args.min_identity != -1 ):
return args.min_identity
for files, func_min in zip( input_formats, func_set_min_identity ):
if files is None:
continue
for fn in files:
if not os.path.isfile( fn ):
print( 'WARNING: %s is not found\n' % fn )
continue
list_min_identity.append( func_min( fn ) )
if( len( list_min_identity ) == 0 ):
return 0
return min( list_min_identity )
def plot_alignment4original( seqs, ax, heatmap, size, fn ):
with open( fn , 'r' ) as file:
for line in file:
buf = line.rstrip( '\n' ).split( '\t' )
i = common.detect_index( buf[0], int( buf[1] ), int( buf[2] ), seqs )
if( buf[4] < buf[5] ):
j = common.detect_index( buf[3], int( buf[4] ), int( buf[5] ), seqs )
else:
j = common.detect_index( buf[3], int( buf[5] ), int( buf[4] ), seqs )
color = heatmap.convert_identity2color( float( buf[6] ))
if( i == -1 or j == -1 ):
continue
x, y = convert_position2coord( seqs[i][buf[0]], seqs[j][buf[3]], int( buf[1] ), int( buf[2] ), int( buf[4] ), int( buf[5] ), size.margin_bw_scaffold_alignment, size.histograms[i], size.histograms[j] )
ax.fill( x, y, color=color, alpha=heatmap.alpha, lw=0 )
def plot_alignment4blastn( seqs, ax, heatmap, size, fn ):
with open( fn , 'r' ) as file:
for line in file:
buf = line.rstrip( '\n' ).split( '\t' )
i = common.detect_index( buf[0], int( buf[6] ), int( buf[7] ), seqs )
if( buf[8] < buf[9] ):
j = common.detect_index( buf[1], int( buf[8] ), int( buf[9] ), seqs )
else:
j = common.detect_index( buf[1], int( buf[9] ), int( buf[8] ), seqs )
color = heatmap.convert_identity2color( float( buf[2] ))
if( i == -1 or j == -1 ):
continue
x, y = convert_position2coord( seqs[i][buf[0]], seqs[j][buf[1]], int( buf[6] ), int( buf[7] ), int( buf[8] ), int( buf[9] ), size.margin_bw_scaffold_alignment, size.histograms[i], size.histograms[j] )
ax.fill( x, y, color=color, alpha=heatmap.alpha, lw=0 )
def plot_alignment4lastz( seqs, ax, heatmap, size, fn ):
with open( fn , 'r' ) as file:
for line in file:
if( line[0:1] == "#" ):
continue
buf = line.rstrip( '\n' ).split( '\t' )
i = common.detect_index( buf[1], int( buf[4] ), int( buf[5] ), seqs )
if( buf[7] == '+' ):
s_pos = int( buf[9] )
e_pos = int( buf[10] )
j = common.detect_index( buf[6], s_pos, e_pos, seqs )
else:
s_pos = int( buf[8] ) - int( buf[9] )
e_pos = int( buf[8] ) - int( buf[10] )
j = common.detect_index( buf[6], e_pos, s_pos, seqs )
color = heatmap.convert_identity2color( float( buf[12][:-1] ))
if( i == -1 or j == -1 ):
continue
x, y = convert_position2coord( seqs[i][buf[1]], seqs[j][buf[6]], int( buf[4] ), int( buf[5] ), s_pos, e_pos, size.margin_bw_scaffold_alignment, size.histograms[i], size.histograms[j] )
ax.fill( x, y, color=color, alpha=heatmap.alpha, lw=0 )
def plot_alignment4mummer( seqs, ax, heatmap, size, fn ):
with open( fn , 'r' ) as file:
for line in file:
buf = line.rstrip( '\n' ).split( )
i = common.detect_index( buf[11], int( buf[0] ), int( buf[1] ), seqs )
j = common.detect_index( buf[12], int( buf[3] ), int( buf[4] ), seqs )
color = heatmap.convert_identity2color( float( buf[9] ))
if( i == -1 or j == -1 ):
continue
x, y = convert_position2coord( seqs[i][buf[11]], seqs[j][buf[12]], int( buf[0] ), int( buf[1] ), int( buf[3] ), int( buf[4] ), size.margin_bw_scaffold_alignment, size.histograms[i], size.histograms[j] )
ax.fill( x, y, color=color, alpha=heatmap.alpha, lw=0 )
def cal_min_identity4original( fn ):
min_identity = 0
BIN = 10
dict = {}
with open( fn , 'r' ) as file:
for line in file:
buf = line.rstrip( '\n' ).split( '\t' )
dict[ int( float( buf[6] )/BIN ) * BIN ] = 0
min_identity = min( dict )
return min_identity
def cal_min_identity4blastn( fn ):
min_identity = 0
BIN = 10
dict = {}
with open( fn , 'r' ) as file:
for line in file:
buf = line.rstrip( '\n' ).split( '\t' )
dict[ int( float( buf[2] )/BIN ) * BIN ] = 0
min_identity = min( dict )
return min_identity
def cal_min_identity4lastz( fn ):
min_identity = 0
BIN = 10
dict = {}
with open( fn , 'r' ) as file:
for line in file:
if( line[0:1] == "#" ):
continue
buf = line.rstrip( '\n' ).split( '\t' )
dict[ int( float( buf[12][:-1] )/BIN ) * BIN ] = 0
min_identity = min( dict )
return min_identity
def cal_min_identity4mummer( fn ):
min_identity = 0
BIN = 10
dict = {}
with open( fn , 'r' ) as file:
for line in file:
buf = line.rstrip( '\n' ).split( )
dict[ int( float( buf[9] )/BIN ) * BIN ] = 0
min_identity = min( dict )
return min_identity
| true |
6c26eee3b256bd33aeb2c6ba99f4047d7fbbc4de | Python | piyushpatel2005/Python | /examples/testing/test_mymath.py | UTF-8 | 945 | 3.546875 | 4 | [] | no_license | import mymath
import unittest
class TestAdd(unittest.TestCase):
"""
Test the add function from mymath library
"""
def test_add_integers(self):
"""
Tests that the addition of two integers returns the correct total
"""
result = mymath.add(1, 2)
self.assertEqual(result, 3)
def test_add_floats(self):
result = mymath.add(10.5, 2)
self.assertEqual(result, 12.5)
@unittest.skip('Skip this test')
def test_add_strings(self):
result = mymath.add('abc', 'def')
self.assertEqual(result, 'abcdef')
# @unittest.skipUnless(sys.platform.startswith("win"), "requires windows")
def test_adding_on_windows(self):
result = mymath.add(1, 2)
self.assertEqual(result, 3)
def test_subtract_integers(self):
result = mymath.subtract(10, 8)
self.assertEqual(result, 2)
if __name__ == '__main__':
unittest.main()
| true |
caaab9031bb3d2a9a3ff51e79b9da15227b9c6c0 | Python | thomcom/berserker | /advmodel/AdvBuilders/WeaponBuilder.py | UTF-8 | 520 | 2.875 | 3 | [] | no_license | # Build objects of type Weapon
from advmodel.AdvBuilders import ItemBuilder
from advmodel.AdvDataObjects import Weapon
from advmodel.AdvDataObjects import DieRoll
from advview.Log import Log
class WeaponBuilder(ItemBuilder):
def Build(self):
builder = ItemBuilder()
builder.SetJson(self.jsonData)
result = builder.Build()
try:
result.__class__ = Weapon
result.damage = DieRoll(self.jsonData["damage"])
except Exception:
return None
return result | true |
76cda9f0d1ad55dbb78a84411f1ffaec3f3d38f0 | Python | dynasty919/stanford_algorithms | /course3_pa4_knapsack_big.py | UTF-8 | 1,523 | 3.09375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2018/3/7 16:56
# @Author : dynasty919
# @Email : dynasty919@163.com
# @File : course3_pa4_knapsack_big.py
# @Software: PyCharm
def readfile():
with open('knapsack_big.txt', 'rt') as f:
a = f.readlines()
c = []
for b in a[1:]:
c.append(b.strip().split(' '))
d = list(map(lambda x: (int(x[0]), int(x[1])), c))
f = {}
for e in range(2000):
f[e] = d[e]
return f
def knapsack():
stuff = readfile()
odd = {}
for aint in range(2000001):
odd[(0, aint)] = 0
for stuffnum in range(1, 2001):
if stuffnum % 2 == 1 :
even = {}
for weightnum in range(2000001):
temp = weightnum - stuff[stuffnum - 1][1]
if temp >= 0:
even[(stuffnum, weightnum)] = max(odd[(stuffnum - 1, weightnum)],odd[(stuffnum - 1, temp)] + stuff[stuffnum - 1][0])
else:
even[(stuffnum, weightnum)] = odd[(stuffnum - 1, weightnum)]
else:
odd = {}
for weightnum in range(2000001):
temp = weightnum - stuff[stuffnum - 1][1]
if temp >= 0:
odd[(stuffnum, weightnum)] = max(even[(stuffnum-1, weightnum)], even[(stuffnum-1, temp)]+stuff[stuffnum-1][0])
else:
odd[(stuffnum, weightnum)] = even[(stuffnum-1, weightnum)]
return odd[(2000, 2000000)]
def main():
print(knapsack())
main() | true |
26beefaa416bbee8f44b81891dcb4ed31d07aeac | Python | luvt2019/DecisionTree-Model | /DecisionTree.py | UTF-8 | 2,555 | 3.03125 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
loans = pd.read_csv('loan_data.csv')
# Create a histogram of two FICO distributions on top of each other, one for each credit.policy outcome
plt.figure(figsize = (11,7))
loans[loans['credit.policy'] == 1]['fico'].hist(alpha = 0.5, bins = 30,color='blue',label='Credit Policy = 1')
loans[loans['credit.policy'] == 0]['fico'].hist(alpha = 0.5, bins = 30,color='red',label='Credit Policy = 0')
plt.legend()
plt.xlabel('FICO')
# Create a similar figure, except this time select by the not.fully.paid column
plt.figure(figsize = (11,7))
loans[loans['not.fully.paid'] == 1]['fico'].hist(alpha = 0.5, bins = 30, color = 'blue', label = 'not.fully.paid = 1')
loans[loans['not.fully.paid'] == 0]['fico'].hist(alpha = 0.5, bins = 30, color = 'red', label = 'not.fully.paid = 0')
plt.legend()
plt.xlabel('FICO')
# Create a countplot using seaborn showing the counts of loans by purpose, with the color hue defined by not.fully.paid
plt.figure(figsize = (11,7))
sns.countplot(x = 'purpose',data = loans, hue='not.fully.paid')
# Observe the trend between FICO score and interest rate in a jointplot
sns.jointplot(x='fico',y='int.rate',data=loans)
# Create lmplots to see if the trend differed between not.fully.paid and credit.policy
plt.figure(figsize=(11,7))
sns.lmplot(x='fico',y='int.rate',data=loans,hue='credit.policy', col='not.fully.paid')
# Transform categorical data (purpose column) using dummy variables so sklearn will be able to understand them
cat_feats = ['purpose']
final_data = pd.get_dummies(loans,columns=cat_feats,drop_first=True)
# Split data into training and testing sets
from sklearn.model_selection import train_test_split
X = final_data.drop('not.fully.paid',axis=1)
Y = final_data['not.fully.paid']
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.3,random_state=101)
# Train model
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(X_train, Y_train)
# Predictions and evaluation of model
predictions = dtree.predict(X_test)
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(Y_test,predictions))
print(confusion_matrix(Y_test,predictions))
# Trying a random forest model
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators = 600)
rfc.fit(X_train,Y_train)
predict2 = rfc.predict(X_test)
print(classification_report(Y_test,predict2))
print(confusion_matrix(Y_test,predict2))
| true |