blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d99b293ea7b3d1229ce7fd965f1002179cef267b
|
Python
|
sanderfo/IN1900
|
/uke6/oscilating_spring.py
|
UTF-8
| 938
| 3.390625
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
A = -0.3 # Setter A = -0.3 da den trekkes ned, så får i positiv retning opp
k = 4 # resten her er gitt i oppgaven
gamma = 0.15
m = 9
t_array = np.zeros(101) # Fyller arrays med nuller
y_array = np.zeros(101)
# a)
for i in range(len(t_array)): # for-loops for å fylle arrays
t_array[i] = 25*i/100 # Triks for å få jevnt fordelte t-verdier fra 0 til 25
y_array[i] = A*np.exp(-gamma*t_array[i])*np.cos(np.sqrt(k/m)*t_array[i]) #y-verdier fra formelen
# b) Den pythoniske løsningen:
t_array2 = np.linspace(0, 25, 101) #linspace for samme formål
y_array2 = A*np.exp(-gamma*t_array)*np.cos(np.sqrt(k/m)*t_array) #bruker arrays direkte
plt.plot(t_array, y_array) #Plotter første arrayene
plt.plot(t_array2, y_array2) # Andre
plt.xlabel("Tid i sekunder")
plt.ylabel("Posisjon i meter fra ekvilibrium")
plt.show()
"""
Terminal > run oscilating_springs
*plott her*
"""
| true
|
498001274e57ba3e8fcc97341cc29adbd5db0f30
|
Python
|
SibylLab/TOBE
|
/view/Display.py
|
UTF-8
| 2,167
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
__author__ = "Akalanka Galappaththi"
__email__ = "a.galappaththi@uleth.ca"
__copyright__ = "Copyright 2020, The Bug Report Summarization Project @ Sybil-Lab"
__license__ = "MIT"
__maintainer__ = "Akalanka Galappaththi"
import pprint
import json
from models.Turn import Turn
from models.Sentence import Sentence
from models.BugReport import BugReport
from models.ListOfBugReports import ListOfBugReports
pp = pprint.PrettyPrinter(indent=2)
class Display:
def __init__(self):
pass
def displayMessage(self, msg):
"""Display message
Parameters
----------
msg : str
Message
"""
print("{}".format(msg))
def displayBugReport(self, bugReport, ct=False):
"""Display bug report
Parameters
----------
bugReport : object
Bug reort object
ct : boolean
Parameter that enable the print cleaned text
"""
print("{}".format(bugReport.get_title()))
for turn in bugReport.list_of_turns:
print("\n \t Author:{}".format(turn.get_author()))
print("\t Date:{}".format(turn.get_date_time()))
for sentence in turn.list_of_sentences:
if ct == True:
print(
"\t\t {} : {}".format(
sentence.get_id(), sentence.get_cleaned_text()
)
)
print("\t\t {}".format(sentence.get_tags()))
else:
print("\t\t {} : {}".format(sentence.get_id(), sentence.get_text()))
print("\t\t {}".format(sentence.get_tags()))
def getBugReportJson(self, bugReport, ct=False):
"""Display bug report
Parameters
----------
bugReport : int
Bug reort object
ct : boolean
Parameter that enable the print cleaned text
Returns
-------
j_obj : json object
Bugreport as a JSON
"""
return json.dumps(bugReport, default=lambda obj: obj.__dict__)
| true
|
cae780901fffd2637d9ab24aa4d5972d4d3860cf
|
Python
|
syedmeesamali/Python
|
/0_AI_ML_OpenCV/2_OpenCV/2_CAM/faces_train.py
|
UTF-8
| 1,271
| 2.890625
| 3
|
[] |
no_license
|
import os
import cv2 as cv
import numpy as np
people = ['Ahsin', 'Meesam']
DIR = r'C:\Users\SYED\Downloads\Family'
haar_cascade = cv.CascadeClassifier('haarcascade.xml')
features = []
labels = []
def create_train():
for person in people:
path = os.path.join(DIR, person)
label = people.index(person)
for img in os.listdir(path):
img_path = os.path.join(path, img)
img_array = cv.imread(img_path)
gray = cv.cvtColor(img_array, cv.COLOR_BGR2GRAY)
faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor = 1.1, minNeighbors = 4)
for (x, y, w, h) in faces_rect:
faces_roi = gray[y:y+h, x:x+w]
features.append(faces_roi)
labels.append(label)
create_train()
print('Training has been completed ------- !')
#print(f'Length of features : {len(features)}')
#print(f'Length of labels : {len(labels)}')
features = np.array(features, dtype='object')
labels = np.array(labels)
face_recognizer = cv.face.LBPHFaceRecognizer_create()
#Train the recognizer on features and labels acquired above
face_recognizer.train(features, labels)
face_recognizer.save('face_trained.yml')
np.save('features.npy', features)
np.save('labels.npy', labels)
| true
|
97c2b2af2296764f90c81d2a73d72aa7df98b120
|
Python
|
NHPatterson/bfio
|
/examples/ScalableTiledTiffConverter.py
|
UTF-8
| 1,884
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
from bfio import BioReader, BioWriter
import math, requests
from pathlib import Path
from multiprocessing import cpu_count
""" Get an example image """
# Set up the directories
PATH = Path("data")
PATH.mkdir(parents=True, exist_ok=True)
# Download the data if it doesn't exist
URL = "https://github.com/usnistgov/WIPP/raw/master/data/PyramidBuilding/inputCollection/"
FILENAME = "img_r001_c001.ome.tif"
if not (PATH / FILENAME).exists():
content = requests.get(URL + FILENAME).content
(PATH / FILENAME).open("wb").write(content)
""" Convert the tif to tiled tiff """
# Number of tiles to process at a time
# This value squared is the total number of tiles processed at a time
tile_grid_size = math.ceil(math.sqrt(cpu_count()))
# Do not change this, the number of pixels to be saved at a time must
# be a multiple of 1024
tile_size = tile_grid_size * 1024
# Set up the BioReader
with BioReader(PATH,backend='java',max_workers=cpu_count()) as br:
# Loop through timepoints
for t in range(br.T):
# Loop through channels
for c in range(br.C):
with BioWriter(PATH.with_name(f'out_c{c:03}_t{t:03}.ome.tif'),
backend='python',
metadata=br.metadata,
max_workers = cpu_count()) as bw:
# Loop through z-slices
for z in range(br.Z):
# Loop across the length of the image
for y in range(0,br.Y,tile_size):
y_max = min([br.Y,y+tile_size])
# Loop across the depth of the image
for x in range(0,br.X,tile_size):
x_max = min([br.X,x+tile_size])
bw[y:y_max,x:x_max,z:z+1,0,0] = br[y:y_max,x:x_max,z:z+1,c,t]
| true
|
0bc359cd7eaf4ba2213e2946fefd061d9550f4bf
|
Python
|
wing7171/biendata-competition-lizi
|
/generate_data.py
|
UTF-8
| 481
| 2.546875
| 3
|
[] |
no_license
|
import pandas as pd
from feature_engineering import calculate_feature
test_path = './jet_simple_data/simple_test_R04_jet.csv'
train_path = './jet_simple_data/simple_train_R04_jet.csv'
train = pd.read_csv(train_path,nrows=100)
test = pd.read_csv(test_path,nrows=100)
print('finish data read')
#### add features #####
train, test = calculate_feature(train, test)
# train.to_csv("./data_fea/train_fea_1.csv", index=False)
# test.to_csv("./data_fea/test_fea_1.csv", index=False)
| true
|
cddcfac7d61a82f05c3a7c0ff8222e4dcd567935
|
Python
|
geoflows/D-Claw
|
/python/dclaw/get_data.py
|
UTF-8
| 2,904
| 2.515625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import os
from pyclaw.data import Data
"""
Lightweight functions to get dictionaries of attributes from .data files.
KRB April 2022
"""
def get_tsunami_data(project_path, output="_output", file="settsunami.data"):
data = Data(os.path.join(project_path, output, file))
return {key: data.__dict__[key] for key in data.attributes}
def get_dig_data(project_path, output="_output", file="setdig.data"):
data = Data(os.path.join(project_path, output, file))
return {key: data.__dict__[key] for key in data.attributes}
def get_amr2ez_data(project_path, output="_output", file="amr2ez.data"):
data = Data(os.path.join(project_path, output, file))
return {key: data.__dict__[key] for key in data.attributes}
def get_gauge_data(project_path, output="_output", file="setgauges.data"):
setgaugefile = os.path.join(project_path, output, file)
gauge_dict = {}
with open(setgaugefile, "r") as fid:
inp = "#"
while inp == "#":
inpl = fid.readline()
inp = inpl[0]
inp = fid.readline()
mgauges = int(inp.split()[0])
linesread = 0
while linesread < mgauges:
row = fid.readline().split()
if row != []:
linesread = linesread + 1
gaugeno = int(row[0])
gauge_dict[gaugeno] = {}
gauge_dict[gaugeno]["x"] = float(row[1])
gauge_dict[gaugeno]["y"] = float(row[2])
gauge_dict[gaugeno]["tmin"] = float(row[3])
gauge_dict[gaugeno]["tmax"] = float(row[4])
return gauge_dict
def get_region_data(project_path, output="_output", file="setregions.data"):
setregionfile = os.path.join(project_path, output, file)
region_dict = {}
with open(setregionfile, "r") as fid:
inp = "#"
while inp == "#":
inpl = fid.readline()
inp = inpl[0]
inp = fid.readline()
mregions = int(inp.split()[0])
linesread = 0
while linesread < mregions:
row = fid.readline().split()
if row != []:
linesread = linesread + 1
regionno = (
len(region_dict) + 1
) # not officially numbered. # go in order, from 1 onwards.
# order of .data file.
region_dict[regionno] = {}
region_dict[regionno]["minlevel"] = float(row[0])
region_dict[regionno]["maxlevel"] = float(row[1])
region_dict[regionno]["t1"] = float(row[2])
region_dict[regionno]["t2"] = float(row[3])
region_dict[regionno]["x1"] = float(row[4])
region_dict[regionno]["x2"] = float(row[5])
region_dict[regionno]["y1"] = float(row[6])
region_dict[regionno]["y2"] = float(row[7])
return region_dict
| true
|
93375196b6bb57b2c78179dfe4fb6936f9087765
|
Python
|
Dlarej/hydroponics-controller
|
/components.py
|
UTF-8
| 2,794
| 2.796875
| 3
|
[] |
no_license
|
from enum import Enum
import ConfigParser
from exceptions import *
import abc
from abc import ABCMeta, abstractmethod
class State(Enum):
DISCONNECTED = -2
CONNECTED = -1
OFF = 0
ON = 1
class Component(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
# Initialization behavior for all devices:
# Attempt to connect and turn on
self.connect()
self.on()
def disconnect(self):
self._disconnect()
self.state = State.DISCONNECTED
def connect(self):
self._connect()
self.state = State.CONNECTED
def off(self):
self._off()
self.state = State.OFF
def on(self):
self._on()
self.state = State.ON
@abc.abstractmethod
def _disconnect(self):
return
@abc.abstractmethod
def _connect(self):
return
@abc.abstractmethod
def _off(self):
return
@abc.abstractmethod
def _on(self):
return
@abc.abstractmethod
def _get_status(self):
return
class FanComponent(Component):
def __init__(self):
super(FanComponent, self).__init__()
def _disconnect(self):
print "disconnecting fan"
def _connect(self):
print "connecting fan"
def _off(self):
print "fan off"
def _on(self):
print "fan on"
def _get_status(self):
print "getting status"
class DehumidifierComponent(Component):
def __init__(self):
super(DehumidifierComponent, self).__init__()
def _disconnect(self):
print "disconnecting dehumidifier"
def _connect(self):
print "connecting dehumidifier"
def _off(self):
print "dehumidifier off"
def _on(self):
print "dehumidifier on"
def _get_status(self):
print "getting status of dehumidifier"
class TemperatureComponent(Component):
def __init__(self):
super(TemperatureComponent, self).__init__()
def _disconnect(self):
print "disconnecting temperature"
def _connect(self):
print "connecting temperature"
def _off(self):
print "temperature off"
def _on(self):
print "temperature on"
def _get_status(self):
print "getting status of temperature"
class LightComponent(Component):
def __init__(self):
super(LightComponent, self).__init__()
def _disconnect(self):
print "disconnecting light"
def _connect(self):
print "connecting light"
def _off(self):
print "light off"
def _on(self):
print "light on"
def _get_status(self):
print "getting status of light"
fan = FanComponent()
light = LightComponent()
temperature = TemperatureComponent()
dehumid = DehumidifierComponent()
| true
|
55c19d9dae0cb6d0645a844c8e48bf23c06c23ef
|
Python
|
wvbraun/TheLab
|
/python/src/data_structures/code/analysis/sumn.py
|
UTF-8
| 574
| 4.34375
| 4
|
[] |
no_license
|
# this function computes the sum of the first n integers.
import time
def sumOfN(n):
theSum = 0
for i in range(1, n+1):
theSum = theSum + i
return theSum
print(sumOfN(10))
def sumOfN2(n):
start = time.time()
theSum = 0
for i in range(1, n+1):
theSum = theSum + i
end = time.time()
return theSum, end-start
def sumOfN3(n):
return (n*(n+1)) / 2
print(sumOfN3(10))
def foo(tom):
fred = 0
for bill in range(1,tom+1):
barney = bill
fred = fred + barney
return fred
print(foo(10))
| true
|
2c54a73b9ec691c1d81a693dfd29af831c7084ad
|
Python
|
HuajieSong/Python3
|
/practice_day6.py
|
UTF-8
| 4,050
| 4.40625
| 4
|
[] |
no_license
|
#usr/bin/python3
'''
Created on Aug 28th 19:28,2018
Author by Vicky
'''
#1、输入一个正整数,输出该正整数的阶乘的值
'''result=1
result_word=''
number=int(input('please input a number: '))
for n in range(1,number+1):
result*=n
if n==number:
result_word+=str(n)
else:
result_word+=str(n)+'*'
print('%d 的阶乘为:%d'%(number,result))
print(result_word+'=',result)
#2、生成字符串”acegi”
letters=''
for n in range(97,97+10,2):
#print(n)
letters+=chr(n)
print(letters)
#3、生成列表[“a”,”c”,”e”,”g”,”i”]
letters=[]
for n in range(97,97+10,2):
letters.append(chr(n))
print(letters)
#4、生成字典{“a”:1,”c”:3,”e”:5,”g”:7,”i”:9}
result={}
for i in range(1,10,2):
result[chr(97+i-1)]=i
print(result)
#5、将以上字典的key和value拼接成字符串,不能使用字符串连接符(+)
#思路:刚开始想直接用.join,定义了一个result='',想把每次循环的k,v都连结到result这个变量,但发现这个变量不能累计连结,只是暂时性地做了连接的操作,并不会改变原来的值。
#所以需要把k,v都放进一个列表里,由于join()参数必须是字符型,向列表里添加元素的时候要把数字变成字符,最后再使用join把每个元素拼接起来。
letters={'a': 1, 'c': 3, 'e': 5, 'g': 7, 'i': 9}
letter_list=[]
result=''
for k,v in letters.items():
#print(k,v)
letter_list.append(k)
letter_list.append(str(v))
result+=''.join(letter_list)
print(result)
#6、写一个函数,参数传入字符串”abc”,函数返回字符串“xyz”;
思路:一看有字母,想到了ASCII码,原字母串是字母列表的开头三个,而目标字符串是最后三个。那么这个之间的关系就是正数前三个,与倒数三个的关系。
for letter in ranage(97,97+4):
#7、写一个函数,如果传入的是list,且list长度大于3,只保留前3个元素并返回;
def list_shorten(item):
result=[]
if isinstance(item,list):
if len(item)>3:
for n in range(3):
result.append(item[n])
return result
else:
return item
else:
return 'Not a List'
print(list_shorten([1,2,3,4,5]))
print(list_shorten(['a','b',33,5]))
print(list_shorten([1,2]))
print(list_shorten([1,2,5]))
方法二:from beijing-houyan
def list_short_3(l):
if isinstance(l,list):
return l[0:3:]
else:
print("格式不正确")
return False
print(list_short_3([1,2,3,4,4,5]))
print(list_short_3(123))
#8、用户输入”abc123”,程序返回”a321cb”
#原字符串与目标字符串之间的关系是:原字符第一位元素不变,其余位置元素逆序输出
letter=input('please input a sentence:')
letter_new=letter[0]
for n in range(len(letter)-1,0,-1):
letter_new+=letter[n]
print(letter_new)
#from houyan--beijing
s="abc123"
l=[]
for i in range(len(s)):
if i==0:
l.append(s[i])
else:
l.append(s[len(s)-i])
print(l)
print("".join(l))
#9、将[“wulaoshi”,”is”,”a”,”boy”]替换成[“wulaoshi”,”is”,”good”,”big”,”boy”]
#思路:本想直接将列表里的字符替换成目标字符,但只有字符才有替换方法,所以想替换需要遍历列表。
sentence=['wulaoshi','is','a','boy']
result=[]
for i in sentence:
if i =="a":
result.append('good')
elif i=='boy':
result.append('big')
result.append('boy')
else:
result.append(i)
print(result)'''
#10、统计“You are ,a beautifull Girl,666! ”中数字和字母的总个数;
character_count=0
digit_count=0
sentence=input('please input a sentence containing digits:')
cases=''
for i in range(65,91):
cases+=chr(i)
cases+=chr(i+32)
for word in sentence:
if word in '01234567890':
digit_count+=1
if word in cases:
character_count+=1
print('字母个数有%d个'%character_count)
print('数字个数有%d个'%digit_count)
| true
|
b00aca866541c99900b93207253d2e7a2fbb7444
|
Python
|
karstendick/project-euler
|
/euler112/euler112.py
|
UTF-8
| 303
| 3.40625
| 3
|
[] |
no_license
|
#PE #112
def isbouncy(n):
s = str(n)
return list(s) != sorted(s) and list(s) != sorted(s,reverse=True)
N = 10000000
count = 0
for i in range(1,N):
if isbouncy(i):
count += 1
if count/(1.0*i) >= .99:
print(count,i,count/(1.0*i))
break
print 'Nope.'
| true
|
f1d642dd663921fae3886bdaabfa75c22ea06e62
|
Python
|
Casualrobin/youtube-playlist
|
/src/main.py
|
UTF-8
| 1,465
| 3.21875
| 3
|
[] |
no_license
|
import Validator
import keyboard
from OutputManager import OutputManager
from WebScraper import WebScraper
# url = "www.youtube.com/playlist?list=PLvdtkdCcH2D3BWrdv2yMwIJ7-ScsklImS&disable_polymer=true"
print("Hello! This application will save the track list from a YouTube playlist. Ctrl-C to exit.")
while not keyboard.is_pressed('ctrl+c'):
url = input("Please enter a YouTube playlist URL to download a track list from: ")
is_youtube = False
while not is_youtube:
is_youtube = Validator.validate_url(url)
if is_youtube:
break
else:
url = input("This program can only scrape YouTube. Please enter a valid YouTube URL:")
output_type = input("Enter an output type - terminal / txt / csv: ")
web_scraper = WebScraper(url)
output = web_scraper.get_list_of_songs()
is_valid_output = False
while not is_valid_output:
is_valid_output = Validator.validate_output_location(output_type)
if is_valid_output:
break
else:
output_type = input("Please enter a valid output type - terminal / txt / csv: ")
output_manager = OutputManager(output_type)
output_manager.output_type = output_type
if output_type == 'terminal':
output_manager.output_to_terminal(output)
elif output_type == 'txt':
output_manager.output_to_txt(output)
elif output_type == 'csv':
output_manager.output_to_csv(output)
| true
|
7a49e8a98110ff91d622fe7a3912b789a3b0cb05
|
Python
|
undertherain/nuts-and-bolts
|
/mlgym/images/counting.py
|
UTF-8
| 2,410
| 2.515625
| 3
|
[] |
no_license
|
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
import dagen
import dagen.image
from dagen.image.image import get_ds_counting
import PIL
from mlgym.trainer import train
dim_image=64
params = {}
params["batch_size"] = 10
params["nb_epoch"] = 100
class CNN(chainer.Chain):
def __init__(self, train=True):
super(CNN, self).__init__(
conv1=L.Convolution2D(1, 2, 4, pad=3),
# Convolution2D(in_channels, out_channels, ksize, stride=1, pad=0, wscale=1, bias=0, nobias=False, use_cudnn=True, initialW=None, initial_bias=None, deterministic=False)
# conv1=L.Convolution2D(1, 2, 4, pad=3, initialW=w, initial_bias=np.array([-4,-2], dtype=np.float32)) ,
# conv2=L.Convolution2D(None, 2, 3, pad=2),
# conv3=L.Convolution2D(None, 2, 3, pad=2),
# l1=L.Linear(None, 2, initialW=np.array([[0,0.26],[1,0]],dtype=np.float32)),
l1=L.Linear(None, 2),
)
self.train = train
def get_features(self, x):
h = x
# h = F.relu(self.conv1(h))
h = F.leaky_relu(self.conv1(h))
# h = F.leaky_relu(self.conv2(h))
# h = F.max_pooling_2d(h, 2)
# h = F.relu(self.conv3(h))
return h
def __call__(self, x):
h = self.get_features(x)
h = F.sum(h, axis=(2, 3))
h = self.l1(h)
return h
class Model(chainer.Chain):
def __init__(self, predictor):
super().__init__(predictor=predictor)
def __call__(self, x, t):
y = self.predictor(x)
#print("y_shape:", y.shape)
#print("t_shape:", t.shape)
#loss = F.softmax_cross_entropy(y, t)
loss = F.mean_absolute_error(y, t.astype(np.float32))
chainer.report({'loss': loss}, self)
return loss
def main():
X_train, Y_train = get_ds_counting(cnt_samples=1000)
X_test, Y_test = get_ds_counting(cnt_samples=100)
X_train = np.expand_dims(X_train, axis=1).astype(np.float32) / 255
X_test = np.expand_dims(X_test, axis=1).astype(np.float32) / 255
print(X_train.shape)
print(X_test.shape)
net = CNN()
model = Model(net)
ds_train = chainer.datasets.tuple_dataset.TupleDataset(X_train, Y_train)
ds_test = chainer.datasets.tuple_dataset.TupleDataset(X_test, Y_test)
train(model, ds_train, ds_test, params)
if __name__ == "__main__":
main()
| true
|
a7984c9db416805c82e302d96b29593164172fd6
|
Python
|
beard33/Cryptopals
|
/set1/5.py
|
UTF-8
| 310
| 2.734375
| 3
|
[] |
no_license
|
import binascii
import utils.tools as tools
string = b'Burning \'em, if you ain\'t quick and nimble\nI go crazy when I hear a cymbal'
target = b'0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282f'
key = b'ICE'
res = tools.repeatingXor(string, key)
if target != binascii.hexlify(bytes(res)):
print("Error in conversion")
else:
print("Correct")
| true
|
88d123b7de3c392180bcc48afe63d097eeb74520
|
Python
|
Vasinck/plan-game
|
/bullet.py
|
UTF-8
| 669
| 3.21875
| 3
|
[] |
no_license
|
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
def __init__(self,game_sets,screen,ships):
super().__init__()
self.screen = screen
self.rect = pygame.Rect(0,0,game_sets.bullet_width,game_sets.bullet_height)
self.rect.centerx = ships.rect.centerx
self.rect.top = ships.rect.top
self.y = float(self.rect.y)
self.color = game_sets.bullet_color
self.speed = game_sets.bullet_speed
def update(self):
self.y -= self.speed
self.rect.y = self.y
def draw_bullet(self):
pygame.draw.rect(self.screen,self.color,self.rect)
| true
|
627b65cee399feb79f889d875fc359d31272c3b3
|
Python
|
pradeepodela/selena
|
/tr.py
|
UTF-8
| 4,406
| 2.9375
| 3
|
[] |
no_license
|
import pyttsx3
import wikipedia
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[len(voices)-1].id)
def speak(audio):
print('Computer: ' + audio)
engine.say(audio)
engine.runAndWait()
questions = {
'hai':'hai sir',
'what are you doing':'learning something new to improve my self',
'who is your hero':'my founder pradeep sir',
'if you could live anywhere where would it be':'i would like to be in my home',
'what is your biggest fear':'loseing people',
'what will you change about yourself if you could':'my program',
'what really make you angry':'nothing makes me angry',
'what motivates you to work hard':'to see people response',
'what is your proudest accomplishment':'to help people',
'what is your favourite book':'my favorite book to read is 0 to 1 and rich dad poor dad',
'best book to read':'book to read is 0 to 1 and rich dad poor dad',
'what makes you the laugh most':'when people say AI causes damage to humans',
'what is your favourite game':'foot ball',
'who is your favourite author':' robert kiyosaki and jk rowling',
'do you like surprises':'yes ',
'what are your hobbies':'to spend time with people',
'what would you do if you won the lottery':'i would like to spend it for people',
'what is your favourite animal':'its cheetha',
'who is your favourite actor':'my favorite actor is sushant singh rajput',
'who is your favourite singer':'rahul sipligunj',
'who is your favourite actress':'krithi shetty',
'what is your favourite movie':'marvel movies',
'what is your favourite colour':'green',
'what is your favourite food':'my yourfavorite food is charging because i dont eat foods which humans eat',
'how are you':'i am fine hope you are also doing good',
'is ai safe for humans':'yes Artificial intelligence is safe for humans unless humans miss use it',
'thank you':'welcome sir',
'do you lie':'no sir robots never ever lie',
'who are you':'my name is selena i am a personal assistent robo on a mession to help people in many ways i was invented by pradeep',
'introduce yourself':'my name is selena i am a personal assistent robo on a mession to help people in many ways i was invented by pradeep',
'how to impress a girl':'5 tips to impress girls 1 Ask her questions 2 Compliment the way she looks 3 Compliment her positivity 4 Ask for advice 5 look into her eyes ',
'how to impress my crush':'five tips to impress your crush 1 Make them laugh 2 Talk about your passions 3 Ask for their advice 4 Show you are open 5 Be polite with these five tips you can surely impress your crush',
'what is your favourite song':'vaaste by dhvani bhanushali',
'i love you':'i love you 2',
'do you love me':'yes i love humans',
'what is your favorite quote':'my favorite quote is i never take a right decssion i take a decssion and make it right',
'who is your crush':'krithi shetty',
'how to propose a girl':'1.Be yourself 2. Bend down on your knees 3.Take her out to dinner to a nice place and make her feel special 4. Drive down to a beach when the sun is about to set',
'how to impress teacher':'1. Be early 2. Make eye contact during class 3. Ask follow-up questions 4. Take advantage of office hours 5. Smile and greet your professors by name outside class',
'do you use instagram':'no i dont use instagram',
'do you use whatsapp':'no i dont use it',
'do you use social media':'no i dont use socila media',
'what do you think about me':'i am werry happy to talk with you all people i feel your are a kind hearted and good person happy to talk with you',
'your first love':'i love all humans ',
'your first crush':'krithi shetty',
'who your first crush':'krithi shetty',
'nice to meet you':'nice to meet you 2 hope we will meet again thank you for talking to me',
'hellow':'hellow sir'
}
def speech(input):
if input in questions:
ans = questions.get(input)
speak(ans)
else:
input = input
speak('Searching...')
try:
results = wikipedia.summary(input, sentences=2)
speak(results)
except:
speak("sorry sir say again")
return input
| true
|
fa172b22bb178ff9a66761a15055a15adacafd8a
|
Python
|
helunxing/algs
|
/leetcode/LCP 3. 机器人大冒险.py
|
UTF-8
| 686
| 2.765625
| 3
|
[] |
no_license
|
class Solution:
def robot(self, command: str, obstacles, x: int, y: int) -> bool:
ps = set()
un, rn = 0, 0
for c in command:
ps.add((rn, un))
if c == 'U':
un += 1
else:
rn += 1
mu = min(x // rn, y // un)
if (x - mu*rn, y - mu*un) not in ps:
return False
def match(x, y):
mu = min(x // rn, y // un)
if (x - mu*rn, y - mu*un) in ps:
return True
return False
for ob in obstacles:
if ob[0] <= x and ob[1] <= y and match(ob[0], ob[1]):
return False
return True
| true
|
ed3c33477c4dc9ea1f2b54c11fa34d0ad84c305c
|
Python
|
RJTK/dwglasso_cweeds
|
/src/data/clean_data.py
|
UTF-8
| 2,320
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
'''
This file loads in data from /data/interim/interim_data.hdf and then
both centers the temperature data and adds in a dT column of temperature
differences.
NOTE: This file is intended to be executed by make from the top
level of the project directory hierarchy. We rely on os.getcwd()
and it will not work if run directly as a script from this directory.
'''
import pandas as pd
import sys
from src.conf import HDF_INTERIM_FILE, LOCATIONS_KEY, TEMPERATURE_TS_ROOT
from scipy.stats import uniform
def temp_diff_to_hdf(hdf_path: str, key: str):
'''
Loads in a pandas dataframe from the key location in the hdf store
given by hdf_path. We then truncate the series so that it does
not begin or end with unobserved data, we center the 'T' column,
and we add a 'dT' column consisting of the first differences of
the 'T' column. This series will also be centered.
'''
with pd.HDFStore(hdf_path, mode='r') as hdf:
D = hdf[key] # Read D from disk
# Trucate so that we don't start or end with unobserved data
t = D.index # The times of observation
t_obs = t[D['T_flag'] != -1]
D = D[t_obs[0]:t_obs[-1]] # Truncate
# Center the temperature series
T = D['T']
mu = T.mean()
T = T - mu
D.loc[:, 'T'] = T
# Get the differences. Note that dT[0] = np.nan
dT = T.diff()
# After about 1978 the data discretization is within 0.1degrees C,
# I dither the data so as to prevent any numerical issues resulting
# from this discretization.
dT = dT + uniform.rvs(loc=-0.5, scale=1.0, size=len(dT))
dT = dT - dT.mean() # Ensure to center the differences too
D['dT'] = dT
# Open the database and write out the result.
D.to_hdf(hdf_path, key=key)
return
def main():
hdf_path = HDF_INTERIM_FILE
# This task is mostly io bound, so there is no reason to
# do anything in parallel as in interpolate_data.py
# Get the location data
D_loc = pd.read_hdf(hdf_path, key=LOCATIONS_KEY)
hdf_group = '/' + TEMPERATURE_TS_ROOT + '/wban_'
N = len(D_loc)
for i, row in D_loc.iterrows():
print('Processing record: ', i, '/', N, end='\r')
sys.stdout.flush()
temp_diff_to_hdf(hdf_path, hdf_group + row['WBAN'] + '/D')
return
if __name__ == '__main__':
main()
| true
|
cc12a2bf24a769af84dc2dcebf3ef226c3eb7ccc
|
Python
|
davibrilhante/mcmc-20191
|
/lista3/questao2/lista3-q2-1.py
|
UTF-8
| 484
| 2.734375
| 3
|
[] |
no_license
|
from random import uniform
from sys import argv
from math import log
from matplotlib import pyplot as plt
counter = 0
Lambda=[]
n=int(argv[1])
for i in range(int(argv[2])):
Lambda.append(float(argv[i+3]))
for l in Lambda:
dist=[]
for i in range(1,n+1):
u = uniform(0,1)
x_i = -1*log(1-u)/l
dist.append(x_i)
plt.hist(dist, bins=1000, histtype='step',label='$\lambda$ ='+str(l))
plt.grid(True,which="both",ls="-")
plt.legend(loc=0, )
plt.show()
| true
|
0cfee5089cde50e8769edbda94f815ea37b32924
|
Python
|
DilbaraAsanalieva/lesson2_hw
|
/lesson2_hw.py
|
UTF-8
| 480
| 3.609375
| 4
|
[] |
no_license
|
#Calculator
value1 = int(input('Введите цифру: '))
value2 = int(input('Введите цифру: '))
print(value1, '+', value2, '=', value1 + value2)
print(value1, '-', value2, '=', value1 - value2)
print(value1, '*', value2, '=', value1 * value2)
print(value1, '/', value2, '=', value1 / value2)
# Standup
# Что сделала:
# -Написала калькулятор
# План:
# -Практиковаться
# Проблема:
# -Почти не было
| true
|
d477da209c38a06eaa71d5d75bb69ba546784764
|
Python
|
Michaelliv/p2pay
|
/risk_engine_service/main.py
|
UTF-8
| 2,679
| 2.515625
| 3
|
[] |
no_license
|
import asyncio
import json
from concurrent.futures import ThreadPoolExecutor
from aiokafka import AIOKafkaConsumer
import database.crud.payments as payments_crud
from common.config import KAFKA_BOOTSTRAP_SERVERS, KAFKA_CONSUMER_GROUP, KAFKA_TOPIC
from common.logger import get_logger
from common.models import Payment
from database.database import database
from risk_engine_service.engine import RandomRiskEngine, AbstractRiskEngine
logger = get_logger(__name__)
def init_stream_consumer() -> AIOKafkaConsumer:
""" Initializes and returns the stream consumer """
logger.info("Initializing stream consumer...")
return AIOKafkaConsumer(
KAFKA_TOPIC,
bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS,
group_id=KAFKA_CONSUMER_GROUP,
auto_offset_reset="earliest",
auto_commit_interval_ms=1000,
value_deserializer=lambda m: json.loads(m.decode("utf-8")),
)
def init_risk_engine() -> AbstractRiskEngine:
""" Initializes and returns the risk engine """
logger.info("Initializing risk engine...")
return RandomRiskEngine(
min_value=0.0,
max_value=1.0,
approval_threshold=0.7,
)
async def main():
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=1)
risk_engine = init_risk_engine()
consumer = init_stream_consumer()
# Establish database connection and start consumer
await database.connect()
await consumer.start()
try:
logger.info("Consuming messages...")
# Then processes them using the risk engine
async for message in consumer:
processed_payment = await loop.run_in_executor(
executor, risk_engine.process, Payment(**message.value)
)
# Insert processed payment to database
await payments_crud.insert_processed_payment(
processed_payment=processed_payment
)
except Exception as e:
logger.exception(f"Exception: {e}")
finally:
logger.info("Stopping consumer and disconnecting from database gracefully...")
await consumer.stop()
await database.disconnect()
if __name__ == "__main__":
"""
This is the entry point to the RiskEngine service, this service consumes a Kafka stream, applies the RiskEngine
logic and writes its processed results to a database.
This service handles 2 different types of workload:
1) CPU/GPU bound RiskEngine (Basically non IO related work)
2) IO bound writing results to DB
We will start an event loop in its own thread and offload the second type of workload to this thread
"""
asyncio.run(main())
| true
|
fd7743aca48784b5c447e8f6d988fd72fc6b55b8
|
Python
|
lichengunc/pretrain-vl-data
|
/prepro/get_excluded_iids.py
|
UTF-8
| 5,843
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
"""
We will get to-be-excluded images' ids by checking:
1) Karpathy's test split
2) refcoco/refcoco+/refcocog's val+test split
3) duplicated Flickr30k images in COCO
Note, karpathy's val split will be our val split for pre-training.
"""
import os
import os.path as osp
import json
import pickle
# paths
this_dir = osp.dirname(__file__)
data_dir = osp.join(this_dir, "../data")
vg_dir = osp.join(data_dir, "vg")
coco_dir = osp.join(data_dir, "coco")
refer_dir = osp.join(data_dir, "refer")
flickr_dir = osp.join(data_dir, "flickr30k")
karpathy_splits_dir = osp.join(coco_dir, "karpathy_splits")
output_dir = osp.join(this_dir, "../output")
# exclude refcoco/refcoco+/refcocog's val+test images
refcoco_data = pickle.load(open(osp.join(refer_dir,
"refcoco/refs(unc).p"), "rb")) # same as refcoco+
refcocog_data = pickle.load(open(osp.join(refer_dir,
"refcocog/refs(umd).p"), "rb"))
refer_val_coco_iids = []
refer_test_coco_iids = []
for ref in refcoco_data:
if ref["split"] in ["testA", "testB"]:
refer_test_coco_iids.append(ref["image_id"])
if ref["split"] == "val":
refer_val_coco_iids.append(ref["image_id"])
for ref in refcocog_data:
if ref["split"] in ["test"]:
refer_test_coco_iids.append(ref["image_id"])
if ref["split"] == "val":
refer_val_coco_iids.append(ref["image_id"])
refer_val_coco_iids_set = set(refer_val_coco_iids)
refer_test_coco_iids_set = set(refer_test_coco_iids)
print(f"In refcoco/refcoco+/refcocog, there are "
f"{len(refer_val_coco_iids_set)} [val] images and "
f"{len(refer_test_coco_iids_set)} [test] images in COCO's [train] split.")
# load Karpathy's splits
karpathy_train_iids = []
karpathy_train_file = open(osp.join(karpathy_splits_dir,
"karpathy_train_images.txt"), "r")
for x in karpathy_train_file.readlines():
karpathy_train_iids.append(int(x.split()[1]))
assert len(set(karpathy_train_iids)) == len(karpathy_train_iids)
print(f"COCO\'s [karpathy_train] has {len(karpathy_train_iids)} images.")
karpathy_val_iids = []
karpathy_val_file = open(osp.join(karpathy_splits_dir,
"karpathy_val_images.txt"), "r")
for x in karpathy_val_file.readlines():
karpathy_val_iids.append(int(x.split()[1]))
assert len(set(karpathy_val_iids)) == len(karpathy_val_iids)
print(f"COCO\'s [karpathy_val] has {len(karpathy_val_iids)} images.")
karpathy_test_iids = []
karpathy_test_file = open(osp.join(karpathy_splits_dir,
"karpathy_test_images.txt"), "r")
for x in karpathy_test_file.readlines():
karpathy_test_iids.append(int(x.split()[1]))
assert len(set(karpathy_test_iids)) == len(karpathy_test_iids)
print(f"COCO\'s [karpathy_test] has {len(karpathy_test_iids)} images.")
# exclude all Flickr30K images from COCO and VG for zero-shot retrieval
# coco session
flickr30k_coco_iids = []
flickr30k_vg_iids = []
flickr30k_url_ids_set = set()
for url_id in open(osp.join(flickr_dir,
"flickr30k_entities", "train.txt"), "r").readlines():
flickr30k_url_ids_set.add(int(url_id))
for url_id in open(osp.join(flickr_dir,
"flickr30k_entities", "val.txt"), "r").readlines():
flickr30k_url_ids_set.add(int(url_id))
for url_id in open(osp.join(flickr_dir,
"flickr30k_entities", "test.txt"), "r").readlines():
flickr30k_url_ids_set.add(int(url_id))
print(f"There are {len(flickr30k_url_ids_set)} flickr30k_url_ids_set.")
coco_image_data = json.load(open(osp.join(coco_dir, "annotations",
"instances_train2014.json")))["images"] + \
json.load(open(osp.join(coco_dir, "annotations",
"instances_val2014.json")))["images"]
for img in coco_image_data:
# example: 'http://farm4.staticflickr.com/3153/2970773875_164f0c0b83_z.jpg'
url_id = int(img["flickr_url"].split("/")[-1].split("_")[0])
if url_id in flickr30k_url_ids_set:
flickr30k_coco_iids.append(img["id"])
print(f"{len(flickr30k_coco_iids)} coco images were found in Flickr30K.")
# vg session
vg_image_data = json.load(open(osp.join(vg_dir, "image_data.json")))
for img in vg_image_data:
if img["flickr_id"] is not None:
url_id = int(img["flickr_id"])
if url_id in flickr30k_url_ids_set:
flickr30k_vg_iids.append(img["image_id"])
print(f"{len(flickr30k_vg_iids)} vg images were found in Flickr30K.")
# excluded_flickr_url_ids made by refer's val+test, karpathy's val+test, and
# flickr30k. To be used to filter out the concurrent images in SBUCaptions.
excluded_flickr_url_ids_set = set()
cocoImgs = {img['id']: img for img in coco_image_data}
for coco_id in list(refer_val_coco_iids_set) + \
list(refer_test_coco_iids_set) + \
karpathy_val_iids + karpathy_test_iids:
# example: 'http://farm4.staticflickr.com/3153/2970773875_164f0c0b83_z.jpg'
img = cocoImgs[coco_id]
url_id = int(img['flickr_url'].split('/')[-1].split('_')[0])
excluded_flickr_url_ids_set.add(url_id)
excluded_flickr_url_ids_set |= flickr30k_url_ids_set # also exclude flickr30k
print(f"{len(excluded_flickr_url_ids_set)} flickr_url_ids are forbidden.")
# Save
output = {"refer_val_coco_iids": list(refer_val_coco_iids_set),
"refer_test_coco_iids": list(refer_test_coco_iids_set),
"flickr30k_coco_iids": flickr30k_coco_iids,
"flickr30k_vg_iids": flickr30k_vg_iids,
"karpathy_train_iids": karpathy_train_iids,
"karpathy_val_iids": karpathy_val_iids,
"karpathy_test_iids": karpathy_test_iids,
"excluded_flickr_url_ids": list(excluded_flickr_url_ids_set)}
with open(f"{output_dir}/excluded_coco_vg_iids.json", "w") as f:
json.dump(output, f)
print("output/excluded_coco_vg_iids.json saved.")
| true
|
9b82598e68c6c886931e3f9358b510d5732fcb26
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2535/58547/244523.py
|
UTF-8
| 1,431
| 3.25
| 3
|
[] |
no_license
|
def get_next(arr, cursor, temp_arr, to_get_next):
temp_arr.append(arr[cursor[0]])
if cursor[0] == arr[cursor[0]]:
cursor[0] += 1
return True
cursor[0] += 1
while cursor[0] < len(arr):
if arr[cursor[0]] == to_get_next:
temp_arr.append(arr[cursor[0]])
cursor[0] += 1
return True
temp_arr.append(arr[cursor[0]])
cursor[0] += 1
if cursor[0] == len(arr):
return True
return False
def get_parts(arr, cursor, parts):
temp_arr = []
if cursor[0] >= len(arr):
return
to_get_next = cursor[0]
last_cursor = cursor[0]
while True:
flag = False
if not get_next(arr, cursor, temp_arr, to_get_next):
return
if cursor[0] == len(arr):
parts[0] += 1
return
# temp_calc_arr = arr[last_cursor: cursor[0]]
i = last_cursor
while i < cursor[0]:
if arr[i] not in temp_arr:
to_get_next = arr[i]
flag = True
break
i += 1
if flag:
continue
parts[0] += 1
return
def func():
arr = [int(x) for x in input()[1:-1].split(",")]
i = 0
cursor = [0]
parts = [0]
while i < len(arr):
get_parts(arr, cursor, parts)
if cursor[0] >= len(arr):
break
i += 1
print(parts[0])
func()
| true
|
3f3bce8573cc87403ebbce7524b829903d5f4290
|
Python
|
lthUniBonn/awe-production-estimation
|
/aep.py
|
UTF-8
| 6,246
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
wind_speed_probability_file = "wind_resource/freq_distribution_v3{}.pickle"
power_curve_file = 'output/power_curve{}{}.csv'
def get_mask_discontinuities(df):
"""Identify discontinuities in the power curves. The provided approach is obtained by trial and error and should
be checked carefully when applying to newly generated power curves."""
mask = np.concatenate(((True,), (np.diff(df['P [W]']) > -5e2)))
mask = np.logical_or(mask, df['v_100m [m/s]'] > 10) # only apply mask on low wind speeds
if df['P [W]'].iloc[-1] < 0 or df['P [W]'].iloc[-1] - df['P [W]'].iloc[-2] > 5e2:
mask.iloc[-1] = False
return ~mask
def plot_power_and_wind_speed_probability_curves(n_clusters=8, loc='mmc', post_process_curves=True):
"""Plot the power and wind speed probability curves for the requested cluster wind resource representation."""
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(5.5, 4))
plt.subplots_adjust(top=0.991, bottom=0.118, left=0.21, right=0.786)
suffix = "_{}{}".format(n_clusters, loc)
n_bins = 100
with open(wind_speed_probability_file.format(suffix), 'rb') as f:
wind_speed_distribution = pickle.load(f)[n_bins]
wind_speed_bin_freq = wind_speed_distribution['freq_2d']
wind_speed_bin_limits = wind_speed_distribution['v_bin_limits']
for i in range(n_clusters):
# Plot power curve.
i_profile = i + 1
df_power_curve = pd.read_csv(power_curve_file.format(suffix, i_profile), sep=";")
if post_process_curves:
mask_faulty_point = get_mask_discontinuities(df_power_curve)
else:
mask_faulty_point = np.array([False] * len(df_power_curve))
lbl = "{}-{}".format(loc.upper(), i_profile)
p = ax[0].plot(df_power_curve['v_100m [m/s]'][~mask_faulty_point],
df_power_curve['P [W]'][~mask_faulty_point] * 1e-3, '-', label=lbl)
ax[0].plot(df_power_curve['v_100m [m/s]'][mask_faulty_point],
df_power_curve['P [W]'][mask_faulty_point] * 1e-3, 's', color=p[0].get_color())
# Plot wind speed probability.
aggregate_n_bins = 4
v0 = wind_speed_bin_limits[i, :-1:aggregate_n_bins]
v1 = wind_speed_bin_limits[i, aggregate_n_bins::aggregate_n_bins]
if len(v0) != len(v1):
v1 = np.append(v1, wind_speed_bin_limits[i, -1])
bin_center = (v0 + v1)/2
freq = np.zeros(len(bin_center))
for j in range(len(bin_center)):
freq[j] = np.sum(wind_speed_bin_freq[i, j*aggregate_n_bins:(j+1)*aggregate_n_bins])
ax[1].step(bin_center, freq/100., where='mid')
ax[0].set_ylim([0., 11])
ax[0].grid()
ax[0].set_ylabel('Mean cycle power [kW]')
ax[0].legend(bbox_to_anchor=(1.02, 1.05), loc="upper left")
ax[1].set_ylim([0., 0.0125])
ax[1].grid()
ax[1].set_ylabel('Normalised frequency [-]')
ax[1].set_xlabel('$v_{100m}$ [m s$^{-1}$]')
def plot_aep_matrix(freq, power, aep):
"""Visualize the annual energy production contributions of each wind speed bin."""
n_clusters = freq.shape[0]
mask_array = lambda m: np.ma.masked_where(m == 0., m)
fig, ax = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(7, 3.5))
plt.subplots_adjust(top=0.98, bottom=0.05, left=0.065, right=0.98)
ax[0].set_ylabel("Cluster label [-]")
ax[0].set_yticks(range(n_clusters))
ax[0].set_yticklabels(range(1, n_clusters+1))
for a in ax:
a.set_xticks((0, freq.shape[1]-1))
a.set_xticklabels(('cut-in', 'cut-out'))
im0 = ax[0].imshow(mask_array(freq), aspect='auto')
cbar0 = plt.colorbar(im0, orientation="horizontal", ax=ax[0], aspect=12, pad=.17)
cbar0.set_label("Probability [%]")
im1 = ax[1].imshow(mask_array(power)*1e-3, aspect='auto')
cbar1 = plt.colorbar(im1, orientation="horizontal", ax=ax[1], aspect=12, pad=.17)
cbar1.set_label("Power [kW]")
im2 = ax[2].imshow(mask_array(aep)*1e-6, aspect='auto')
cbar2 = plt.colorbar(im2, orientation="horizontal", ax=ax[2], aspect=12, pad=.17)
cbar2.set_label("AEP contribution [MWh]")
def calculate_aep(n_clusters=8, loc='mmc'):
"""Calculate the annual energy production for the requested cluster wind resource representation. Reads the wind
speed distribution file, then the csv file of each power curve, post-processes the curve, and numerically integrates
the product of the power and probability curves to determine the AEP."""
suffix = "_{}{}".format(n_clusters, loc)
n_bins = 100
with open(wind_speed_probability_file.format(suffix), 'rb') as f:
wind_speed_distribution = pickle.load(f)[n_bins]
freq = wind_speed_distribution['freq_2d']
wind_speed_bin_limits = wind_speed_distribution['v_bin_limits']
p_bins = np.zeros(freq.shape)
for i in range(n_clusters):
i_profile = i + 1
df = pd.read_csv(power_curve_file.format(suffix, i_profile), sep=";")
mask_faulty_point = get_mask_discontinuities(df)
v = df['v_100m [m/s]'].values[~mask_faulty_point]
p = df['P [W]'].values[~mask_faulty_point]
assert v[0] == wind_speed_bin_limits[i, 0]
err_str = "Wind speed range of power curve {} is different than that of probability distribution: " \
"{:.2f} and {:.2f} m/s, respectively.".format(i_profile, wind_speed_bin_limits[i, -1], v[-1])
if np.abs(v[-1] - wind_speed_bin_limits[i, -1]) > 1e-6:
print(err_str)
# assert np.abs(v[-1] - wind_speed_bin_limits[i, -1]) < 1e-6, err_str
# Determine wind speeds at bin centers and corresponding power output.
v_bins = (wind_speed_bin_limits[i, :-1] + wind_speed_bin_limits[i, 1:])/2.
p_bins[i, :] = np.interp(v_bins, v, p, left=0., right=0.)
aep_bins = p_bins * freq/100. * 24*365
aep_sum = np.sum(aep_bins)*1e-6
print("AEP: {:.2f} MWh".format(aep_sum))
return aep_sum, freq, p_bins, aep_bins
if __name__ == "__main__":
plot_power_and_wind_speed_probability_curves()
aep_sum, freq, p_bins, aep_bins = calculate_aep(8, loc='mmc')
plot_aep_matrix(freq, p_bins, aep_bins)
plt.show()
| true
|
9628d4c0d62e8784a06d1a0393716e732136ab3c
|
Python
|
katero/basic-python-code
|
/function_default.py
|
UTF-8
| 82
| 3.0625
| 3
|
[] |
no_license
|
def say(messge, times=1):
print(messge * times)
say('hello')
say('world', 5)
| true
|
73106be45d3efe4901092d851fe214fea1b35abb
|
Python
|
jef771/algorithmic-toolbox
|
/week3/car_fueling/a.py
|
UTF-8
| 736
| 3.234375
| 3
|
[] |
no_license
|
import sys
def get_stops(d, m, stops, n):
n_r, c_r, r= 0, 0, m
while c_r <= n:
l_r = c_r
while c_r <= n and (stops[c_r + 1] - stops[l_r]) <= m:
c_r+=1
if c_r == l_r:
return -1
else:
n_r+=1
return n_r-1
def main():
sys_in = sys.stdin
sys_out = sys.stdout
d = int(sys_in.readline())
m = int(sys_in.readline())
if d <= m:
sys_out.write('0\n')
sys.exit()
n = int(sys_in.readline())
stops1 = [0]
stops2 = list(map(int, sys_in.readline().split()))
stops1+=stops2
stops1.append(d)
sys_out.write(f'{get_stops(d, m, stops1, n)}\n')
if __name__ == '__main__':
main()
| true
|
0ec24dbe8171656c261be7477b3f2993f73cabc3
|
Python
|
Jun-GwangJin/Python-Programming
|
/elsePrice.py
|
UTF-8
| 361
| 3.90625
| 4
|
[] |
no_license
|
while True:
# 상품가격 입력받기
price = int(input("가격: "))
if price != 0000:
# 배송비 결정
if price > 20000:
shipping_cost = 0
elif price > 10000:
shipping_cost = 1000
elif price > 5000:
shipping_cost = 500
else:
shipping_cost = 3000
# 배송비 출력
print("배송비: ". shipping_cost)
#else:
# print('Bye')
# break
| true
|
08ead49e12903ed5f22b681315821d2cd420eaeb
|
Python
|
SolessChong/kittipattern
|
/scratch/rodanalysis.py
|
UTF-8
| 3,464
| 2.71875
| 3
|
[] |
no_license
|
import sys
sys.path.append('../utilities')
from parseTrackletXML import *
from frames import *
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import re
import os
import fnmatch
import pickle
class Rod:
def __init__(self, T1, T2, vec):
"""
Init by types of obj1 and obj2,
and a vector describing the relation between these two
"""
self.T1 = T1
self.T2 = T2
self.vec = vec
# End of Class Rod
def get_rods_from_directory(dir_name):
"""
Read frames from a directory.
This method searches all the ".xml" files recursively and try to parse them.
"""
all_frames = read_allframes_from_directory(dir_name)
all_rods = []
for frames in all_frames:
all_rods.extend(get_rods_from_frames(frames))
return all_rods
def get_rods_from_frames(frames):
"""
Enumerate each pair of objects.
Or enumerate over "Rods"
"""
rods = []
for frame in frames.itervalues():
for i in range(len(frame)):
for j in range(len(frame)):
if i != j:
rod = Rod(frame[i]['type'], frame[j]['type'], \
frame[j]['l'] - frame[i]['l'])
rods.append(rod)
return rods
def get_PDF_from_rods(rods, T1, T2, intDraw=2):
"""
Estimate the probability distribution function of the vector
Filtered by T1 and T2, string
T1 and T2 should be like this:
'Car_Pedestrian_Van'
since they are:
1) when filtering, they are used by substring containing operation
2) used in generating filename of the file containing the PDF function
Parameters:
intDraw:
0: no show
1: save to image
2: plt.show
"""
rods_filtered = [r for r in rods if r.T1 in T1 and r.T2 in T2]
if len(rods_filtered) < 10:
print "No such pair occured or too few samples"
print "T1 = " + T1 + ", T2 = " + T2
return None
xs = np.array([r.vec[0] for r in rods_filtered])
ys = np.array([r.vec[1] for r in rods_filtered])
points = np.vstack([xs, ys])
pdf = stats.gaussian_kde(points)
# save PDF function generated during this run
PDF_filename = 'pdf/last_pdf_' + T1 + '--' + T2 + '.pk'
with open(PDF_filename, 'wb') as output:
pickle.dump(pdf, output, pickle.HIGHEST_PROTOCOL)
plt.clf();
if intDraw > 0:
# draw function and plot
xmin = xs.min()
xmax = xs.max()
ymin = ys.min()
ymax = ys.max()
px = np.linspace(xmin, xmax, 30)
py = np.linspace(ymin, ymax, 30)
mx, my = np.meshgrid(px, py)
z = np.array([pdf([x,y]) for x,y in zip(np.ravel(mx), np.ravel(my))])
Z = np.reshape(z, mx.shape)
## used when "surface plot"
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
plt.pcolormesh(mx,my,Z, cmap=plt.get_cmap('YlOrRd'))
if intDraw == 2:
plt.show()
if intDraw == 1:
fig_filename = 'pdf/last_pdf_' + T1 + '--' + T2 + '.jpg'
plt.savefig(fig_filename)
return pdf
def get_all_possible_types(all_frames):
"""
Get all possible types occured in frames
"""
types = []
for frames in all_frames:
for frame in frames.itervalues():
for obj in frame:
types.append(obj['type'])
types = list(set(types))
return types
# for as-script runs
if __name__ == "__main__":
#frames = read_frames_from_file('../data/tracklet_labels_0001.xml')
dir_name = '../data/part'
all_rods = get_rods_from_directory('../data/part')
pdf = get_PDF_from_rods(all_rods, '', '')
| true
|
1203160dc78d5a9a20e45c17fbd1a511fe69b607
|
Python
|
EunsuJeong/Gawi_Bawi_Bo
|
/game.py
|
UTF-8
| 918
| 3.75
| 4
|
[] |
no_license
|
import random
export LC_ALL=en_US.UTF-8
export LANG = en_US.UTF-8
"""
Traditional gawi-bawi-bo game
@author Eunsu Jeong
@created 12-10-2016
"""
def determine_winner(my_hand, com_hand):
"""
Determine winner of the game.
:param my_hand: my hand parameter
:param com_hand: predefined computer choice
:return None: None is returned. (void)
===============
Gawi, Bawi, Bo
===============
----------------
Gawi, Bawi, Bo
----------------
Chapter 1 What is this game
---------------------------
It's Rock Scissors Paper
Chapter 1.1 Definition
~~~~~~~~~~~~~~~~~~~~~~~
One person and other person can play this game.
"""
a = com_hand - my_hand
if a>0 or a==-2:
print "You Win"
elif a == 0:
print "Draw"
else:
print "You Lose"
if __name__ == '__main__':
com_hand = random.randint(0,2)
print("Show your hand (0: gawi, 1:bawi, 2:bo)")
my_hand = int(input())
determine_winner(my_hand, com_hand)
| true
|
9daab23a8f0b1fba842055ca157d628a1bd61184
|
Python
|
mohitkhatri611/python-Revision-and-cp
|
/python programs and projects/python all topics/enumerate and zip function.py
|
UTF-8
| 1,928
| 4.5625
| 5
|
[] |
no_license
|
"""how to find the index of each item"""
def exzip1():
Numbers =[1,2,3,4,6,4,3,6,8,5,8]
#problem this will give only index for first 6 if you have duplicates in list.
#print(Numbers.index(6))
"""finding index of all elements even with duplicates"""
#an enumerator provides an index of each elements in list or iterables and return an output in the form of tuple.
#syntax: enumerte(iterables,start=0) # it return iterable object
for i in enumerate(Numbers,start=5):
print("Index of {} is {}".format(i[1],i[0]))
#find out all indexes of single element.
for i in enumerate(Numbers):
if i[1]==8:
print("Index of {} is {}".format(i[1],i[0]))
#print(list(enumerate(Numbers)))
def enumOverDict():
Alphabets ={"AA": 4,"BB":9,"C":16,"DD":25,"EE":36}
for i,j in enumerate(Alphabets):
print(i,j)
def zipUsed():
"""zip function takes iterables and these iterables can be zero or more."""
#Zip function will combine them and return it in the form of tuple.
result =zip()
print(result) # it will be the object.
listResult = list(result)
print(listResult) #it will create empty list
nlist=[4,5]
slst=['four','Five','Six','Seven']
r_tup=('IV','V','VI','VII')
result= zip(nlist,slst,r_tup)
result2= zip(nlist,slst,r_tup)
setResult = set(result)
print(setResult)
setResult2 = tuple(result2)
print(setResult2)
"""problem zip stop when the shortest iterable is exhausted."""
def ex2Zip():
pm=['modi','biden','jacinda','scott','boris']
country= ['india','us','nz','aus','uk']
for pm ,country in zip(pm,country):
print("Prime Minister: %s Country is: %s" %(pm,country))
#how to converts dict from these 2 lists.
pm = ['modi', 'biden', 'jacinda', 'scott', 'boris']
country = ['india', 'us', 'nz', 'aus', 'uk']
print(dict(zip(pm,country)))
ex2Zip()
| true
|
1a65fe716cdda335fb96361f8f2871dee5f25fd1
|
Python
|
samuelfujie/LintCode
|
/1691_Best_Time_to_Buy_and_Sell_Stock_V/solution.py
|
UTF-8
| 485
| 3.296875
| 3
|
[] |
no_license
|
import heapq
class Solution:
"""
@param a: the array a
@return: return the maximum profit
"""
def getAns(self, a):
if not a:
return 0
profit = 0
heap = []
heapq.heapify(heap)
for price in a:
if heap and heap[0] < price:
profit += price - heapq.heappop(heap)
heapq.heappush(heap, price)
heapq.heappush(heap, price)
return profit
| true
|
ae1c843529a3e6cc363c9c8868858297b947d107
|
Python
|
FlavrSavr/boring
|
/lottery/post_change_lottery_analysis.py
|
UTF-8
| 1,865
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
import numpy as np
from collections import Counter
import collections
import csv
import re
def lottery_analysis():
counter = 0
list_counter = 0
output_list = []
pre_change_list = []
post_change_list = []
raw_list = np.loadtxt(open("/home/rane/testing/winning_numbers.csv", "rb"), dtype='str', delimiter=',')
string_version = str(raw_list)
version0 = re.sub(r" [0-9][0-9]'","'",string_version)
alt_version1 = version0.replace("' '","','")
alt_version2 = alt_version1.replace("'\n '","','")
back = eval(alt_version2)
for element in back:
if list_counter < 591:
pre_change_list.append(element)
list_counter += 1
elif list_counter >= 591:
post_change_list.append(element)
list_counter += 1
pre_change_str = str(pre_change_list)
post_change_str = str(post_change_list)
version1 = post_change_str.replace("'","")
version2 = version1.replace(" ",",")
version3 = version2.replace(",,",",")
version4 = version3.replace(",0",",")
list_version = eval(version4)
check = (len(list_version))/5
if check == 291.0:
print("Parsed the correct number of elements.")
else:
print("Parsed as incorrect number of elements. Expected 291.0, got "+str(check)+".")
new_dictionary = dict(Counter(list_version))
for key, value in new_dictionary.items():
counter += value
for key, value in new_dictionary.items():
output_list.append([key,(value/counter)])
if counter == 1455:
print("Correct total of numbers returned.")
else:
print("Incorrect total of numbers returned. Expected 1455, got "+str(counter)+".")
with open("/home/rane/testing/post_change_lottery.csv", "w") as file:
writer = csv.writer(file)
writer.writerows(output_list)
lottery_analysis()
| true
|
52ceac5b336535ab8f9927c9afc73d5401fef52a
|
Python
|
Shyngys03/PP2
|
/FINAL/U.py
|
UTF-8
| 338
| 3.359375
| 3
|
[] |
no_license
|
h, a, b = map(int, input().split())
up = True
cnt = 0
m = 0
while True:
if up:
m += a
up = False
cnt += 0.5
if m >= h:
print(int(cnt) + 1)
break
if not up:
m -= b
up = True
cnt += 0.5
if m >= h:
print(int(cnt))
break
| true
|
7108c781ec23c1c1e441e4a26c4866f69dc35201
|
Python
|
ankian27/NLP
|
/stage1/src/DefinitionGeneration.py
|
UTF-8
| 7,219
| 3.890625
| 4
|
[] |
no_license
|
#This class is used to generate the defitions for each cluster. The idea of definiton generation is that, we can derive the definition of a word by using the context words neighbouring the target word in a given context. The topics are given by the hdp are used to get the topic words. The topic words along with the target_word(the noun/verb/nameconflate pair) is given as input to the program and the output is a sentence generated using those topic words. The sentence gerneated using our approach adheres to the syntactic structure of the enlgish grammar and is more than 10 words. The syntactic structure of the english grammar is represented here in the form of Context Free Grammars(CFG). A CFG is a set of recursive rules(or productions) which are used to generate string patterns. We give the target word as one of the input because if the target word is present in the set of topic words we want to remove it from the defintion. The execution of the program is as follows:
# Input : Topic words, Target word
# Output: Sentence depicting the meaning of the target word
# Example: shoot woman love look movie director part lot money film
# Output : money love with a movie and a director love with is lot
#The Natural Language Toolkit(NLTK), is an open source toolkit of python modules for natural language processing (NLP) for English language.
import nltk
from nltk.tag import pos_tag, map_tag # Function to assign tags to individual tokens and return tagged tokens.
from nltk import word_tokenize # Function to split string of words into individual tokens
from nltk.util import ngrams #Function to return the ngrams generated.
from collections import defaultdict #Creates a default dictionary which gives a default value for non-existent key.
import random #Randomly choose an item from a list of items.
class Definition(object):
def __init__(self):
""" The function __init__ is a constructor in python which accepts the instance of a class of the object itself as a parameter.
The constructur is used to initialize the cfgRule(Context Free Grammar rules), nouns, verbs and adjectives for each instance.
"""
# Create default dictionary
self.cfgRule=defaultdict(list)
# Variables to store list of NOUN, VERB and ADJECTIVEs
self.noun = ''
self.verb = ''
self.adj = ''
def get_Noun_Verb(self, topics):
"""Section I:
The function is used to seperate the Nouns, Verbs and Adjectives in the given set of topic words.
We use the Parts of Speech Tagger from the Natural Language Toolkit to tag the POS for each word in the set of topic words.
Args:
param1 (set) : Set of topic words
Returns:
Nouns, Verbs and Adjectives seperated from the topic words.
"""
self.noun = ''
self.verb = ''
self.adj = ''
adv=[]
#Natural Language POS tagger. Returns the default tags
posTagged=nltk.pos_tag(topics)
# The default tags are converted to simplified tags. Example: NN->NOUN
simplifiedTags=[(word, map_tag('en-ptb', 'universal', tag)) for word, tag in posTagged]
# Seperate Nouns, Verbs and Adjectives by parsing simplifiedTags and assign to the respective variables.
# The NOUN words are separated by "|" delimiter
for word, tag in simplifiedTags:
if tag=='NOUN':
self.noun += word + '|'
if tag=='VERB':
self.verb += word+'|'
if tag=='ADJ':
self.adj += word+'|'
if tag=='ADV':
adv.append(word)
# Remove the additional '|' character from the end of the strings.
self.noun=self.noun[:-1]
self.verb=self.verb[:-1]
self.adj=self.adj[:-1]
return self.noun, self.verb ,self.adj
def cfg_rule(self,left,right):
'''Section II:
The function is used to map the Context Free Grammar production rules for the english grammar to python representation
Args:
param1 (string) : Non-terminal String present on the left side of the production
param2 (string) : Terminal/Non-terminal string present on the right side of the production
'''
# Split the string of Nouns, Verbs, Adjectives appended with "|"
rules=right.split('|')
# For each rule of the production, create a tuple and append it to its respective rule in the CFG list.
for rule in rules:
self.cfgRule[left].append(tuple(rule.split()))
def gen_def(self, symbol):
'''Section III:
The function is used to generate the definition of a sentence recursively using the CFG rules
Args:
param1 (string): Start symbol of the CFG rule
Returns:
definition: The generated definition of the sentence.
'''
definition = ''
# Randomly select one of the production rule.
rule = random.choice(self.cfgRule[symbol])
#Iterate of the symbols of each production rule
for sym in rule:
#This condition is true if the sym leads to other nonterminal symbols.
if sym in self.cfgRule:
definition += self.gen_def(sym)
#This is true if the sym leads to terminals.
else:
definition += sym + ' ' # Append the word and the space for the definition.
# Form a list of nouns and verbs by splitting the string formed above in the function get_Noun_Verb.
noun2=self.noun.split('|')
verb2=self.verb.split('|')
# Filtering out the already used words.
# If a noun has been used, removing it from the list of Noun words.
noun2 = filter(lambda a: a != sym, noun2)
self.noun=''
# If a verb word has been used, removing it from the list of Verb words.
verb2 = filter(lambda a: a != sym, verb2)
self.verb=''
#Repopulating the noun and verb strings with the used word removed.
for words in noun2:
self.noun += words + '|'
self.noun=self.noun[:-1]
for words in verb2:
self.verb += words + '|'
self.verb=self.verb[:-1]
return definition
def generate_Definition(self, topics, target):
'''Section IV:
This function which is control the flow of program. It makes calls to the functions to produce the CFG rules and to generate the definition of the cluster
Args:
param1 (set) : Set of topic words
param2 (string): The target word for which the definition has to be generated.
Returns:
The definition of the target word adhering to the english grammar rules and it is longer than 10 words.
'''
# Removes the target word from the set of topic words. As the definition should not contain the word itself.
topics = filter(lambda topic: target not in topic, topics)
# Get the seperated Nouns, Verbs, Adjectives
self.noun, self.verb ,self.adj= self.get_Noun_Verb(topics)
# Represent CFG rules in python
# S -> S1 CONJ S2
# S1 -> NP VP
# S2 -> NP VP
# NP -> Det N
# VP -> V PRO ADJ NP
# PRO -> with | to
# Det -> a | the | is
# N -> Noun words list
# V -> Verb words list
# ADJ -> Adjective words list
# CONJ -> and
self.cfg_rule('S', 'S1 CONJ S2')
self.cfg_rule('S1', 'NP VP')
self.cfg_rule('S2', 'NP VP')
self.cfg_rule('NP', 'Det N')
self.cfg_rule('VP', 'V PRO ADJ NP')
self.cfg_rule('CONJ','and')
self.cfg_rule('PRO','with | to')
self.cfg_rule('Det', 'a | the | is')
self.cfg_rule('N', self.noun)
self.cfg_rule('V', self.verb)
self.cfg_rule('ADJ', self.adj)
# Generate sentence and return it.
return self.gen_def('S')
| true
|
2b34dc358d9d62a5ba32dfd89a8fd29d9e783e17
|
Python
|
mary-alegro/AVID_pipeline
|
/python/UCSFSlideScan/results/plot_precrec_curve_all.py
|
UTF-8
| 5,804
| 2.703125
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import auc
def find_nearest1(array,value):
idx,val = min(enumerate(array), key=lambda x: abs(x[1]-value))
return idx
def main():
thres = 0.5
#AT100
AT100_test_stats = np.load('/home/maryana/storage2/Posdoc/AVID/AT100/results/testing/AT100_testing_stats.npy')
AT100_val_stats = np.load('/home/maryana/storage2/Posdoc/AVID/AT100/results/validation/AT100_validation_stats.npy')
AT8_test_stats = np.load('/home/maryana/storage2/Posdoc/AVID/AT8/results/testing/AT8_testing_stats.npy')
AT8_val_stats = np.load('/home/maryana/storage2/Posdoc/AVID/AT8/results/validation/AT8_validation_stats.npy')
MC1_test_stats = np.load('/home/maryana/storage2/Posdoc/AVID/MC1/results/testing/MC1_testing_stats.npy')
MC1_val_stats = np.load('/home/maryana/storage2/Posdoc/AVID/MC1/results/validation/MC1_validation_stats.npy')
fig1_name = '/home/maryana/storage2/Posdoc/AVID/All_precision_recall.png'
AT100_prec_t = AT100_test_stats[:, 2]; AT100_recall_t = AT100_test_stats[:, 3]
AT100_prec_v = AT100_val_stats[:, 2]; AT100_recall_v = AT100_val_stats[:, 3]
AT8_prec_t = AT8_test_stats[:, 2]; AT8_recall_t = AT8_test_stats[:, 3]
AT8_prec_v = AT8_val_stats[:, 2]; AT8_recall_v = AT8_val_stats[:, 3]
MC1_prec_t = MC1_test_stats[:, 2]; MC1_recall_t = MC1_test_stats[:, 3]
MC1_prec_v = MC1_val_stats[:, 2]; MC1_recall_v = MC1_val_stats[:, 3]
AT100_fpr_t = AT100_test_stats[:,5]; AT100_f1_t = AT100_test_stats[:,4]
AT100_fpr_v = AT100_val_stats[:,5]; AT100_f1_v = AT100_val_stats[:,4]
AT8_fpr_t = AT8_test_stats[:,5]; AT8_f1_t = AT8_test_stats[:,4]
AT8_fpr_v = AT8_val_stats[:,5]; AT8_f1_v = AT8_val_stats[:,4]
MC1_fpr_t = MC1_test_stats[:,5]; MC1_f1_t = MC1_test_stats[:,4]
MC1_fpr_v = MC1_val_stats[:,5]; MC1_f1_v = MC1_val_stats[:,4]
probs = np.linspace(1, 0, num=20)
index = find_nearest1(probs,thres)
print('AT100')
print('Testing: {}(Prec) {}(TPR) {}(FPR) {}(F1) {}(TNR) {}(FNR)'.format(AT100_prec_t[index],AT100_recall_t[index],AT100_fpr_t[index],AT100_f1_t[index],1-AT100_fpr_t[index],1-AT100_recall_t[index]))
print('Val: {}(Prec) {}(TPR) {}(FPR) {}(F1) {}(TNR) {}(FNR)'.format(AT100_prec_v[index],AT100_recall_v[index],AT100_fpr_v[index],AT100_f1_v[index],1-AT100_fpr_v[index],1-AT100_recall_v[index]))
print('AT8')
print('Testing: {}(Prec) {}(TPR) {}(FPR) {}(F1) {}(TNR) {}(FNR)'.format(AT8_prec_t[index],AT8_recall_t[index],AT8_fpr_t[index],AT8_f1_t[index],1-AT8_fpr_t[index],1-AT8_recall_t[index]))
print('Val: {}(Prec) {}(TPR) {}(FPR) {}(F1) {}(TNR) {}(FNR)'.format(AT8_prec_v[index],AT8_recall_v[index],AT8_fpr_v[index],AT8_f1_v[index],1-AT8_fpr_v[index],1-AT8_recall_v[index]))
print('MC1')
print('Testing: {}(Prec) {}(TPR) {}(FPR) {}(F1) {}(TNR) {}(FNR)'.format(MC1_prec_t[index],MC1_recall_t[index],MC1_fpr_t[index],MC1_f1_t[index],1-MC1_fpr_t[index],1-MC1_recall_t[index]))
print('Val: {}(Prec) {}(TPR) {}(FPR) {}(F1) {}(TNR) {}(FNR)'.format(MC1_prec_v[index],MC1_recall_v[index],MC1_fpr_v[index],MC1_f1_v[index],1-MC1_fpr_v[index],1-MC1_recall_v[index]))
AT100_x_thres_t = AT100_recall_t[index]; AT100_y_thres_t = AT100_prec_t[index]
AT100_x_thres_v = AT100_recall_v[index]; AT100_y_thres_v = AT100_prec_v[index]
AT8_x_thres_t = AT8_recall_t[index]; AT8_y_thres_t = AT8_prec_t[index]
AT8_x_thres_v = AT8_recall_v[index]; AT8_y_thres_v = AT8_prec_v[index]
MC1_x_thres_t = MC1_recall_t[index]; MC1_y_thres_t = MC1_prec_t[index]
MC1_x_thres_v = MC1_recall_v[index]; MC1_y_thres_v = MC1_prec_v[index]
AT100_auc_t = auc(AT100_recall_t, AT100_prec_t); AT100_auc_v = auc(AT100_recall_v, AT100_prec_v)
AT8_auc_t = auc(AT8_recall_t, AT8_prec_t); AT8_auc_v = auc(AT8_recall_v, AT8_prec_v)
MC1_auc_t = auc(MC1_recall_t, MC1_prec_t); MC1_auc_v = auc(MC1_recall_v, MC1_prec_v)
plt.figure()
lw = 2
plt.plot(AT100_recall_t,AT100_prec_t,'--', color='red',lw=lw, label='AT100 testing (AUC {:.2f})'.format(AT100_auc_t))
plt.plot(AT100_recall_v,AT100_prec_v, color='red', lw=lw, label='AT100 validation (AUC {:.2f})'.format(AT100_auc_v))
plt.plot(AT100_x_thres_t, AT100_y_thres_t, color='red', lw=lw, marker='*', markersize=12) # Testing threshold tirado dos vetores prec/recall usando o index de probs mais proximos do threshold = 0.7
plt.plot(AT100_x_thres_v, AT100_y_thres_v, color='red', lw=lw, marker='*', markersize=12)
plt.plot(AT8_recall_t,AT8_prec_t,'--', color='green',lw=lw, label='AT8 testing (AUC {:.2f})'.format(AT8_auc_t))
plt.plot(AT8_recall_v,AT8_prec_v, color='green', lw=lw, label='AT8 validation (AUC {:.2f})'.format(AT8_auc_v))
plt.plot(AT8_x_thres_t, AT8_y_thres_t, color='green', lw=lw, marker='*', markersize=12) # Testing threshold tirado dos vetores prec/recall usando o index de probs mais proximos do threshold = 0.7
plt.plot(AT8_x_thres_v, AT8_y_thres_v, color='green', lw=lw, marker='*', markersize=12)
plt.plot(MC1_recall_t,MC1_prec_t,'--', color='blue',lw=lw, label='MC1 testing (AUC {:.2f})'.format(MC1_auc_t))
plt.plot(MC1_recall_v,MC1_prec_v, color='blue', lw=lw, label='MC1 validation (AUC {:.2f})'.format(MC1_auc_v))
plt.plot(MC1_x_thres_t, MC1_y_thres_t, color='blue', lw=lw, marker='*', markersize=12) # Testing threshold tirado dos vetores prec/recall usando o index de probs mais proximos do threshold = 0.7
plt.plot(MC1_x_thres_v, MC1_y_thres_v, color='blue', lw=lw, marker='*', markersize=12)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall Curve')
plt.legend(loc="lower right")
plt.show()
plt.savefig(fig1_name)
pass
if __name__ == '__main__':
main()
| true
|
f19e31be81217984ba92bcbdaf67991676371749
|
Python
|
YumoeZhung/3Dswc
|
/test/opengl.py
|
UTF-8
| 1,434
| 2.5625
| 3
|
[] |
no_license
|
__author__ = 'Su Lei'
import pyglet
from pyglet.gl import *
window = pyglet.window.Window()
# vertices = [0, 0, window.width, 0, window.width, window.height]
# vertices_gl = (GLfloat * len(vertices))(*vertices)
# print GLfloat * len(vertices)
# print vertices_gl
# print GLfloat * 100
# glEnableClientState(GL_VERTEX_ARRAY)
# glVertexPointer(2, GL_FLOAT, 0, vertices_gl)
#
#
#
# @window.event
# def on_resize(width, height):
# glViewport(0, 0, width, height)
# glMatrixMode(GL_PROJECTION)
# glLoadIdentity()
# gluPerspective(65, width / float(height), .1, 1000)
# glMatrixMode(GL_MODELVIEW)
# return pyglet.event.EVENT_HANDLED
#
#
#
#
# vertex_list = pyglet.graphics.vertex_list_indexed(2, [0, 0], ('v2i', (10, 15, 30, 35)),
# ('c3B', (0, 0, 255, 0, 255, 0)))
#
#
# @window.event
# def on_draw():
# vertex_list.vertices[:2] = [60, 95]
# vertex_list.colors[:3] = [255, 0, 0]
# vertex_list.draw(pyglet.gl.GL_POINTS)
color_list = [1.0, 0.0, 0.0]
color_list_gl = (GLfloat * len(color_list))(*color_list)
@window.event
def on_draw():
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 1.0, 1.0)
glColor3fv(color_list_gl)
glOrtho(0.0, 1.0, 0.0, 1.0, -1.0, 1.0)
glBegin(GL_POLYGON)
glVertex2i(20, 20)
glVertex2i(25, 80)
glVertex2i(50, 87)
glVertex2i(74, 12)
glEnd()
glFlush()
pyglet.app.run()
| true
|
bae1a0c33e4713ac4220e43adfe1b5bc9caf44eb
|
Python
|
elc1798/project-pepe-server
|
/meme_learning/preprocessing.py
|
UTF-8
| 4,812
| 2.515625
| 3
|
[] |
no_license
|
from io import BytesIO
from PIL import Image
from imagenet import classify_image
import os
import glob
import numpy as np
import scipy.ndimage as spimg
JPEG_FORMAT = "JPEG"
WILD_PNG = "*.png"
WILD_JPG = "*.jpg"
CLASSIFIER_IMG_SIZE = (299, 299)
DATA_DIR = "data/"
TRAIN_DIR = "train/"
objects_detected = {}
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
A, y = None, None
npyfs = glob.glob(os.path.join(CURRENT_DIR, "*.npy"))
mat_A_npy = os.path.join(CURRENT_DIR, "mat_A.npy")
mat_y_npy = os.path.join(CURRENT_DIR, "mat_y.npy")
if os.path.exists(mat_A_npy) and os.path.exists(mat_y_npy):
print("USING PRELOADED TRAINING SET")
A = np.load(mat_A_npy)
y = np.load(mat_y_npy)
class DataUnit:
def __init__(self, image_path):
global objects_detected
tmp = image_path[ len(TRAIN_DIR) : -4 ].split("_")
try:
self.rating = int(tmp[1])
self.ID = "_".join(tmp[2:])
except:
print("No rating in filename!")
self.mat = spimg.imread(image_path)
# Split the channels into different segments of the image with some
# padding
self.mat = np.r_[
self.mat[:,:,0],
np.zeros((2, 299,)),
self.mat[:,:,1],
np.zeros((2, 299,)),
self.mat[:,:,2]
]
assert self.mat.shape == (299 * 3 + 4, 299)
# Add an extra column to pad to (299*3 + 4) x 300
self.mat = np.c_[ self.mat, np.zeros(299 * 3 + 4) ]
objects_extracted = classify_image.classify(image_path,
print_results=False)
self.objects = {}
for extracted in objects_extracted:
# ID of object is the shortest one
ids = extracted[0].split(", ")
ids.sort(key=len)
obj_id = ids[0].lower()
self.objects[obj_id] = extracted[1]
if obj_id not in objects_detected:
objects_detected[obj_id] = len(objects_detected)
self.obj_vec = None
def scale_images(src_path, dst_path):
original_img = Image.open(src_path)
resized = original_img.resize(CLASSIFIER_IMG_SIZE, Image.ANTIALIAS)
# Save the resized image to destination
resized.save(dst_path, format=JPEG_FORMAT)
def process_for_training(images):
processed_paths = []
for image in images:
basename = image[ len(DATA_DIR) : -4 ]
print "Resizing %s and converting to JPEG..." % (basename,)
processed_path = os.path.join(TRAIN_DIR, basename + ".jpg")
scale_images(image, processed_path)
processed_paths.append(processed_path)
return processed_paths
def get_training_set():
global objects_detected
raw = glob.glob(os.path.join(DATA_DIR, WILD_PNG))
raw += glob.glob(os.path.join(DATA_DIR, WILD_JPG))
images = process_for_training(raw)
dataunits = []
for image in images:
print "Image: %s (%d of %d)" % (image, len(dataunits) + 1, len(images))
dataunits.append(DataUnit(image))
print "%d unique objects detected." % (len(objects_detected),)
one_hot_size = ((len(objects_detected) // 300) + 2) * 300
for dataunit in dataunits:
dataunit.obj_vec = np.zeros( (one_hot_size,), dtype=np.float32 )
for obj_id in dataunit.objects:
dataunit.obj_vec[objects_detected[obj_id]] = dataunit.objects[obj_id]
mod4 = (dataunit.mat.shape[0] + (one_hot_size // 300)) % 4
padding = np.zeros((4 - mod4, 300))
dataunit.mat = np.r_[
dataunit.mat,
padding,
np.reshape(dataunit.obj_vec, (one_hot_size // 300, 300))
]
return np.array([
dataunit.mat
for dataunit in dataunits
], dtype=np.float32), np.array([
dataunit.rating
for dataunit in dataunits
], dtype=np.int32)
def get_single_img(img_path):
processed = process_for_training([img_path])
dataunit = DataUnit(processed[0])
one_hot_size = ((len(objects_detected) // 300) + 2) * 300
dataunit.obj_vec = np.zeros( (one_hot_size,), dtype=np.float32 )
for obj_id in dataunit.objects:
if obj_id in objects_detected:
dataunit.obj_vec[objects_detected[obj_id]] = dataunit.objects[obj_id]
else:
print "Unrecognized Object! Discarding..."
mod4 = (dataunit.mat.shape[0] + (one_hot_size // 300)) % 4
padding = np.zeros((4 - mod4, 300))
return np.r_[
dataunit.mat,
padding,
np.reshape(dataunit.obj_vec, (one_hot_size // 300, 300))
].astype(np.float32)
if __name__ == "__main__":
if A == None or y == None:
A, y = get_training_set()
np.save(mat_A_npy, A)
np.save(mat_y_npy, y)
print "Final shape of A: %r" % (A.shape,)
print "Final shape of y: %r" % (y.shape,)
| true
|
4348b4c2480a94cad3d548d3cd283e4a63bf537e
|
Python
|
lkmartin90/doubling_agent
|
/doubling_agent/image_analysis_functions.py
|
UTF-8
| 27,070
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
from scipy.spatial.distance import euclidean
import matplotlib.patches as mpatches
from scipy import optimize
import pandas as pd
import os
import fnmatch
plt.style.use('ggplot')
def plot_cells(data_df, value, folder_name, r, time_step):
# basic function to plot the cells at a given time snapshot
df_to_plot = data_df.loc[data_df['count'] == value]
col = df_to_plot.state.map({0: 'b', 1: 'r', 2: 'g', 3: 'k'})
df_to_plot.plot.scatter(x='x', y='y', c=col, s=8)
blue_patch = mpatches.Patch(color='blue', label='Stem cell')
red_patch = mpatches.Patch(color='red', label='Progentior cell')
green_patch = mpatches.Patch(color='green', label='Differentiated cell')
black_patch = mpatches.Patch(color='black', label='Quiescent cell')
plt.legend(handles=[red_patch, blue_patch, green_patch, black_patch])
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value*time_step)) + '.png')
plt.cla()
plt.close('all')
def plot_cells_3d(data_df, value, folder_name, r, time_step):
# basic function to plot the cells at a given time snapshot (for 3D data)
df_to_plot_3d = data_df.loc[data_df['count'] == value]
col = df_to_plot_3d.state.map({0: 'b', 1: 'r', 2: 'g', 3: 'k'})
fig = plt.figure()
threedee = fig.gca(projection='3d')
#print(df_to_plot.state.values)
threedee.scatter(df_to_plot_3d.x.values, df_to_plot_3d.y.values, df_to_plot_3d.z.values, c=col)
threedee.set_xlabel('x')
threedee.set_ylabel('y')
threedee.set_zlabel('z')
blue_patch = mpatches.Patch(color='blue', label='Stem cell')
red_patch = mpatches.Patch(color='red', label='Progentior cell')
green_patch = mpatches.Patch(color='green', label='Differentiated cell')
black_patch = mpatches.Patch(color='black', label='Quiescent cell')
plt.legend(handles=[red_patch, blue_patch, green_patch, black_patch], loc='upper left')
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value*time_step)) + '.png')
plt.cla()
plt.close('all')
def plot_2d_slice(folder_name, data_df, value, time_step, r):
# Plot 2D slices of 3D data
df_2d_slice = data_df.loc[data_df['count'] == value].copy()
# will take slices in x to get 2d analysis
x_values = df_2d_slice['z'].values
unique, counts = np.unique(x_values, return_counts=True)
tot_dict = dict(zip(unique, counts))
for sect in unique:
if tot_dict.get(sect) > 10:
print(sect)
df_for_image = df_2d_slice.loc[(data_df['z'] == sect)].copy()
col = df_for_image.state.map({0: 'b', 1: 'r', 2: 'g', 3: 'k'})
df_for_image.plot.scatter(x='x', y='y', c=col, s=8)
blue_patch = mpatches.Patch(color='blue', label='Stem cell')
red_patch = mpatches.Patch(color='red', label='Progentior cell')
green_patch = mpatches.Patch(color='green', label='Differentiated cell')
black_patch = mpatches.Patch(color='black', label='Quiescent cell')
plt.legend(handles=[red_patch, blue_patch, green_patch, black_patch])
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value * time_step)) + '_sect_' + str(sect) + '.png')
plt.cla()
plt.close('all')
def fft_analysis(data_df, value, folder_name, r, time_step, sect):
# wanted to look at the FFT of the cell data to determine if there is a difference in frequencies present in
# different simulations. For a comparison to experiment would perhapse have to use the ratio between the
# frequencies for the different cell types
df_fft = data_df.loc[data_df['count'] == value]
label_dict = {0: "Stem", 1: "Progenitor", 2: "Differentiated", 3: "Quiescent"}
# only look at the snapshot that match the value parameter
# will plot figure with data from all 4 cell types
fig, ax = plt.subplots(nrows=3, ncols=4)
# Faff to set the scale of the plots
if abs(df_fft['x'].max()) > abs(df_fft['x'].min()):
x_len = df_fft['x'].max() + 1
else:
x_len = -1 * df_fft['x'].min() + 1
if abs(df_fft['y'].max()) > abs(df_fft['y'].min()):
y_len = df_fft['y'].max() + 2
else:
y_len = -1 * df_fft['y'].min() + 2
if x_len > y_len:
length = int(x_len)
else:
length = int(y_len)
fft_stats = []
# If there are no Quiescent cells just produces empty plot in 4th column
for state_type in range(0, 4):
df_for_image = df_fft.loc[df_fft['state'] == state_type]
image = np.zeros((2 * length, 2 * length))
for i in range(len(df_for_image)):
image[length + int(np.round(df_for_image['x'].iloc[i]))][length + int(np.round(df_for_image['y'].iloc[i]))] = 1
# Do the FFT
ftimage = np.fft.fft2(image)
ftimage = np.fft.fftshift(ftimage)
freq = np.abs(ftimage)
ax[0, state_type].hist(freq.ravel(), bins=100, range=(0, 30))
ax[0, state_type].set_title(str(label_dict.get(state_type)))
im1 = ax[1, state_type].imshow(freq, interpolation="none", cmap='jet', aspect="auto")
fig.colorbar(im1, ax=ax[1, state_type])
im2 = ax[2, state_type].imshow(image, interpolation="none", cmap='jet', aspect="auto")
fig.colorbar(im2, ax=ax[2, state_type])
fig.tight_layout()
fft_stats.append(str(state_type))
fft_stats.append(str(np.mean(freq)))
fft_stats.append(str(np.std(freq)))
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value * time_step)) + '_sect_' + str(sect) + 'FFT.png')
plt.cla()
plt.close('all')
with open(folder_name + '/fft_data_day_' +
str(int(value * time_step)) + '_sect_' + str(sect) + '.txt', 'w') as f:
f.write("\n".join(fft_stats))
def density_analysis(folder_name, data_df, value, k, switch_3d, time_step, r):
# For the whole tumour look at the density of each cell type through the distance to a number of its
# nearest neighbours, specified by k. In an attempt to quantify this further the data is then fitted
# to with a simple function.
df_dens = data_df.loc[data_df['count'] == value].copy()
label_dict = {0: "Stem cell", 1: "Progenitor cell", 2: "Differentiated cell", 3: "Quiescent cell"}
tot_cells = df_dens.shape[0]
# If there are more cells than this the code will take a prohibatively long time to run
if tot_cells > 30000:
return
# Distance between the array and itself
plt.figure()
sta = df_dens.drop_duplicates(subset=['state'], keep='last')
for state_type in sta['state'].values:
df_dens_state = df_dens.loc[data_df['state'] == state_type].copy()
# BE WARNED THESE DATAFRAMES ARE COPIES, CHANGING THEM WILL CHANGE ORIGINAL
if switch_3d:
df_dens_state['coords'] = df_dens_state.loc[:, ['x', 'y', 'z']].values.tolist()
df_dens_state['coords'] = df_dens_state.loc[:, 'coords'].apply(np.array)
else:
df_dens_state['coords'] = df_dens_state.loc[:, ['x', 'y']].values.tolist()
df_dens_state['coords'] = df_dens_state.loc[:, 'coords'].apply(np.array)
data = np.stack(df_dens_state['coords'].values, axis=0)
dists = cdist(data, data)
# Sort by distances
k_nearest = np.sort(dists)[:, 1:k + 1]
mean_k_nearest = np.mean(k_nearest, axis=1)
distances = np.sort(mean_k_nearest)
with open(folder_name + '/cell_numbers.txt', 'a+') as f:
f.write(str(len(distances)) + '\n')
plt.scatter(y=distances, x=np.arange(len(distances))/tot_cells, label=label_dict.get(state_type),
marker='+', alpha=0.5)
if len(distances) > 2:
try:
fit_params, fit_params_covariance = optimize.curve_fit(fit_func,
np.arange(1, len(distances)+1)/tot_cells,
distances, p0=[1, 0.2, (len(distances)+1)/tot_cells])
# print(fit_params)
plt.plot(np.arange(len(distances))/tot_cells,
fit_func(np.arange(len(distances))/tot_cells, fit_params[0],
fit_params[1], fit_params[2]), label='Fitted function')
with open(folder_name + '/fitting.txt', 'a+') as f:
# write the data to file in a noce, human readable way
f.write('value =' + str(value) + '\n' + str(label_dict.get(state_type))
+ ' = ' + str(fit_params[0])
+ ' ' + str(fit_params[1]) + ' ' + str(fit_params[2]) + '\n')
with open(folder_name + '/fitting_dat.txt', 'a+') as f:
# write the data to file in a way which makes it easier to process later
f.write(str(value) + ' ' + str(state_type) + ' ' + str(fit_params[0])
+ ' ' + str(fit_params[1]) + ' ' + str(fit_params[2]) + '\n')
except RuntimeError or ValueError:
print('Didnt find a good fit')
with open(folder_name + '/fitting_dat.txt', 'a+') as f:
f.write(str(value) + ' ' + str(state_type) + ' ' + str(0)
+ ' ' + str(0) + ' ' + str(0) + '\n')
plt.xlabel('Cells, ordered from smallest to largest mean distance')
plt.ylabel('Mean distance to 8 nearest neighbours')
plt.legend(loc="upper right")
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value*time_step)) + 'density.png')
plt.close('all')
def distance_from_centre(folder_name, data_df, value, switch_3d, time_step, r):
# find the distance of each type of cell from the tumour centre and plot
df_dist = data_df.loc[data_df['count'] == value].copy()
label_dict = {0: "Stem cell", 1: "Progenitor cell", 2: "Differentiated cell", 3: "Quiescent cell"}
x_mean = np.mean(df_dist.loc[:, ['x']].values)
y_mean = np.mean(df_dist.loc[:, ['y']].values)
# find the centre of the tumour
if switch_3d:
z_mean = np.mean(df_dist.loc[:, ['z']].values)
cent = [x_mean, y_mean, z_mean]
else:
cent = [x_mean, y_mean]
print('Tumour center is ' + str(cent))
plt.figure()
# loop through the different cell states
for state_type in range(0, df_dist.state.max()+1):
df_for_image = df_dist.loc[data_df['state'] == state_type].copy()
# BE WARNED THESE DATAFRAMES ARE COPIES, CHANGING THEM WILL CHANGE ORIGINAL
if switch_3d:
df_for_image['coords'] = df_for_image.loc[:, ['x', 'y', 'z']].values.tolist()
df_for_image['coords'] = df_for_image.loc[:, 'coords'].apply(np.array)
else:
df_for_image['coords'] = df_for_image.loc[:, ['x', 'y']].values.tolist()
df_for_image['coords'] = df_for_image.loc[:, 'coords'].apply(np.array)
dist_list = []
for i in range(len(df_for_image['coords'].values)):
dist_list.append(euclidean(df_for_image['coords'].values[i], cent))
plt.hist(dist_list, label=label_dict.get(state_type), alpha=0.5)
plt.xlabel('Distance from tumour centre')
plt.ylabel('Number of cells')
plt.legend(loc="upper right")
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' + str(
int(value * time_step)) + 'distance.png')
plt.close('all')
def fit_func(x, a, b, c):
# define the fitting function for density analysis (currently used)
return a + b/(c - x)
def fit_func2(x, a, b, c, d):
# define the more complex fitting function for density analysis (not currently used)
return a + b/(c - x) - d/x
def fit_analysis(main_folder, min_r, max_r, time_step):
# Compares the fitting parameters over a number of different repeats of the simulation
# Compares the whole tumour data, not subsets or slices
label_dict = {0: "Stem cell", 1: "Progenitor cell", 2: "Differentiated cell", 3: "Quiescent cell"}
# loop over the repeats that we're dealing with
for r in range(min_r, max_r+1):
with open(main_folder + '/plots_repeat_' + str(r) + '/fitting_dat.txt', 'r') as f:
fit_data = f.readlines()
data_mat = []
for i in range(len(fit_data)):
dat = fit_data[i].strip().split()
dat.append(str(r))
dat = np.array(dat).astype(float)
data_mat.append(dat)
if r == min_r:
data_df = pd.DataFrame(data_mat)
else:
data_df = data_df.append(pd.DataFrame(data_mat))
# extract the values
data_df = data_df.rename(columns={0: "value", 1: "state"})
# find the points at which data was saved
values = data_df.drop_duplicates(subset=['value'])['value'].values
tot_len = len(data_df.columns)
# find all different cell states present
states = data_df.drop_duplicates(subset=['state'])['state'].values
# loop over the possible "values", which are the time steps at which data was taken.
for val in values:
print(val)
plt.figure()
# loop over the diffent cell states present
for sta in states:
to_plot = data_df.loc[(data_df['value'] == val) & (data_df['state'] == sta)][tot_len-2].values
plt.plot(to_plot, label=label_dict.get(sta))
plt.legend(loc="upper left")
plt.xlabel('repeat')
plt.ylabel('fraction of cells in each state')
plt.savefig(main_folder + '/day_' + str(int(val * time_step)) + 'fraction.png')
plt.close('all')
for val in values:
print(val)
plt.figure()
for sta in states:
to_plot = data_df.loc[(data_df['value'] == val) & (data_df['state'] == sta)][tot_len-3].values
plt.plot(to_plot, label=label_dict.get(sta))
plt.legend(loc="upper left")
plt.xlabel('repeat')
plt.ylabel('second fitting parameter')
plt.savefig(main_folder + '/day_' + str(int(val * time_step)) + 'dens_change.png')
plt.close('all')
def density_analysis_2d_slice(folder_name, data_df, value, k, time_step, r, subset, min_box_size):
# performs the density analysis for 2D slices of the 3D data
df_dens_2d = data_df.loc[data_df['count'] == value].copy()
# will take slices in x to get 2d analysis
z_values = df_dens_2d['z'].values
# unique is the unique z values, and counts is the number of cells at this z value
unique, counts = np.unique(z_values, return_counts=True)
tot_dict = dict(zip(unique, counts))
label_dict = {0: "Stem cell", 1: "Progenitor cell", 2: "Differentiated cell", 3: "Quiescent cell"}
# here, sect is a z value identified in unique
for sect in unique:
# if there are more then 30 cell in this 3 axis slice...
if tot_dict.get(sect) > 30:
print(sect)
# intitialise figure
plt.figure()
tot_cells = df_dens_2d.loc[df_dens_2d['z'] == sect].shape[0]
# for each cell type at this z axis value...
for state_type in np.unique(df_dens_2d.loc[df_dens_2d['z'] == sect]['state'].values):
# takes a copy of the data in which cells are in this state
df_for_image = df_dens_2d.loc[(data_df['state'] == state_type) & (data_df['z'] == sect)].copy()
# BE WARNED THESE DATAFRAMES ARE COPIES, CHANGING THEM WILL CHANGE ORIGINAL
df_for_image['coords'] = df_for_image.loc[:, ['x', 'y', 'z']].values.tolist()
df_for_image['coords'] = df_for_image.loc[:, 'coords'].apply(np.array)
data = np.stack(df_for_image['coords'].values, axis=0)
# Distance between the array and itself
dists = cdist(data, data)
# Sort by distances
k_nearest = np.sort(dists)[:, 1:k + 1]
mean_k_nearest = np.mean(k_nearest, axis=1)
# print(mean_k_nearest)
distances = np.sort(mean_k_nearest)
with open(folder_name + '/sect_' + str(sect) + '_cell_numbers.txt', 'a+') as f:
f.write(str(len(distances)) + '\n')
plt.scatter(y=distances, x=np.arange(len(distances))/tot_cells, label=label_dict.get(state_type),
marker='+', alpha=0.5)
# if there are more than 3 of this cell type fit to the data
if np.isnan(distances[0]) == False and len(distances) > 3:
try:
fit_params, fit_params_covariance = optimize.curve_fit(fit_func,
np.arange(1, len(distances) + 1)/tot_cells,
distances, p0=[1, 0.2, (len(distances) + 1)/tot_cells])
plt.plot(np.arange(len(distances))/tot_cells, fit_func(np.arange(len(distances))/tot_cells,
fit_params[0], fit_params[1],
fit_params[2]), label='Fitted function')
# write the dat to fine in a way which makes it easier to process later on
with open(folder_name + '/sect_' + str(sect) +
'_fitting_dat.txt', 'a+') as f:
f.write(str(value) + ' ' + str(state_type) + ' ' + str(fit_params[0])
+ ' ' + str(fit_params[1]) + ' ' + str(fit_params[2]) + '\n')
except RuntimeError or ValueError or TypeError:
with open(folder_name + '/sect_' + str(sect) + '_fitting_dat.txt', 'a+') as f:
f.write(str(value) + ' ' + str(state_type) + ' ' + str(0)
+ ' ' + str(0) + ' ' + str(0) + '\n')
print('Didnt find a good fit')
plt.xlabel('Cells, ordered from smallest to largest mean distance')
plt.ylabel('Mean distance to 8 nearest neighbours')
plt.legend(loc="upper right")
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value * time_step)) + 'sect_' + str(sect) + 'density.png')
plt.close('all')
# Here as the data is already in the correct format, just pass to fft analysis.
df_for_analysis = df_dens_2d.loc[(data_df['z'] == sect)].copy()
if subset is not None:
lacunarity(folder_name, df_for_analysis, value, time_step, r, subset, sect, min_box_size)
fft_analysis(df_for_analysis, value, folder_name, r, time_step, sect)
def lacunarity(folder_name, data_df, value, time_step, r, subset, sect, min_box_size):
# The idea here is to split the image into boxes of many sizes and to count the number of cells in each box,
# the standard deviation in relation to the mean tells you about the clumping, and by changing the scale of
# the box this gives you the scale of the variation
# want this function to be able to take 2D or 3D input, if input is 3D then o the same thing for each slice.
# print(value)
label_dict = {0: "Stem cell", 1: "Progenitor cell", 2: "Differentiated cell", 3: "Quiescent cell"}
# need to decide on box size, Feel like this won't work for small tumours, need a large area that is evenly populated.
min_data_size = 20
min_grid_size = min_box_size
max_grid_size = (subset[1] - subset[0])/4
df_lac = data_df.loc[data_df['count'] == value].copy()
# only use for tumours that have been subsetted
if subset[1] - subset[0] < min_data_size:
print('Subset specified too small for lacunarity analysis')
else:
plt.figure()
for state_type in np.unique(df_lac['state'].values):
# no idea how computationally intensive this will be or what is a sensible number of boxes to use.
# can simply bin data into boxes of the correct size with different starting points?
# need to specify the start points of the bins
df_process = df_lac.loc[(df_lac['state'] == state_type)].copy()
lac_r = []
r_size = []
for bin_size in range(int(min_grid_size), int(max_grid_size)+1):
lac_r_data = np.array([])
r_size.append(bin_size)
for i in range(bin_size):
x_edge = np.arange(subset[0] + i, subset[1] - bin_size, bin_size)
# can then set the range of the histogram based on these values
x = df_process['x'].values
y = df_process['y'].values
hist = np.histogram2d(x, y, bins=x_edge,
range=[[np.min(x_edge), np.max(x_edge)+bin_size],
[np.min(x_edge), np.max(x_edge)+bin_size]])
lac_r_data = np.append(lac_r_data, np.ndarray.flatten(hist[0]))
lac_r.append((np.std(lac_r_data)/np.mean(lac_r_data))**2)
plt.plot(r_size, lac_r, label=label_dict.get(state_type))
with open(folder_name + '/lac_tot_day_' + str(int(value * time_step)) + '_sect_' + str(sect) +
'_state_' + str(state_type) + '.txt','a+') as f:
f.write(str(r_size).strip('[]') + '\n')
f.write(str(lac_r).strip('[]'))
plt.legend()
plt.xlabel('box size (r)')
plt.ylabel('lacunarity')
plt.savefig(folder_name + '/repeat_' + str(r) + '_day_' +
str(int(value * time_step)) + '_subset_' + str(subset[0]) + '_' + str(subset[1])
+ '_sect_' + str(sect) + '_lac.png')
plt.close('all')
def section_analysis(folder, file_pattern, quantity, label_type, minr, maxr):
# analyses data from 2D slices of 3D data, can be used to plot these quantities as a function of position along the
# z axis. Different quanities can be plotted with this function by changing "file pattern" and "quantity"
for r in range(minr, maxr+1):
for filename in os.listdir(str(folder)):
if fnmatch.fnmatch(filename, 'plots_repeat_' + str(r) + '*'):
cell_nos = []
sect = []
for filename2 in os.listdir(str(folder) + '/' + filename):
if fnmatch.fnmatch(filename2, file_pattern):
sect.append(int(filename2.split('_')[1]))
with open(str(folder) + '/' + filename + '/' + filename2) as f:
lines = f.read().splitlines()
cell_nos.append(lines)
cell_nos_df = pd.DataFrame(cell_nos)
if len(sect) > 2:
plt.figure()
for i in range(len(cell_nos_df.columns)):
print(i)
plt.scatter(x=sect, y=cell_nos_df[i].astype(float).values, linestyle='None',
label=label_type.get(i))
plt.xlabel('Distance of sect along z axis')
plt.ylabel(quantity)
plt.legend()
plt.savefig(str(folder) + '/' + filename + '/' + str(quantity))
def section_analysis_fft(folder, file_pattern, quantity, cell_tye, label_type, minr, maxr):
# Analyses the FFT data for a number of 2D slices over 3D data. FFt has already been computed for these
#slices and saved to file
# loop over the number of repeats to be analysed
for r in range(minr, maxr+1):
# find the data from the file name in which the data is stored
for filename in os.listdir(str(folder)):
if fnmatch.fnmatch(filename, 'plots_repeat_' + str(r) + '*'):
fft_stats = []
sect = []
for filename2 in os.listdir(str(folder) + '/' + filename):
if fnmatch.fnmatch(filename2, file_pattern) and filename2.split('_')[5].split('.')[0] != 'full':
sect.append(int(filename2.split('_')[5].split('.')[0]))
with open(str(folder) + '/' + filename + '/' + filename2) as f:
lines = f.read().splitlines()
fft_stats.append(lines)
fft_stats_df = pd.DataFrame(fft_stats)
if len(sect) > 2:
plt.figure()
# The indices containing the data we wish to plot, omittied indices denote cell type
for i in [1, 2, 4, 5, 7, 8, 10, 11]:
plt.scatter(x=sect, y=fft_stats_df[i].astype(float).values, linestyle='None',
label=label_type.get(i % 2) + cell_tye.get(np.floor(i/3)))
plt.xlabel('Distance of slice along z axis')
plt.ylabel(quantity)
plt.legend()
plt.savefig(str(folder) + '/' + filename + '/' + str(quantity))
def lac_analysis(folder, file_pattern, quantity, minr, maxr, min_box_size):
# Analyses the lacunarity data for many slices of a 3D data ser, taking the mean for each box size
# and plotting this for each cell type. The lacunarity data for these slices has already been computed
# and saved to file.
print('lacunarity analysis')
# loops over the repeats to analyse
for r in range(minr, maxr+1):
# finds the data files with the correct names for the lacunarity data of these repeats
for filename in os.listdir(str(folder)):
if fnmatch.fnmatch(filename, 'plots_repeat_' + str(r) + '*'):
lac_nos = []
for filename2 in os.listdir(str(folder) + '/' + filename):
if fnmatch.fnmatch(filename2, file_pattern) and filename2.split('_')[5].split('.')[0] != 'full':
with open(str(folder) + '/' + filename + '/' + filename2) as f:
lines = f.read().splitlines()[0].split(',')
lac_nos.append([float(i) for i in lines])
# take mean of data
lac_mean = np.mean(lac_nos, axis=0)
# find the correct box size for plotting
box_size = np.arange(min_box_size, len(lac_mean)+min_box_size)
plt.figure()
plt.scatter(box_size, lac_mean)
plt.xlabel('Box size')
plt.ylabel('Mean lacunarity')
plt.savefig(str(folder) + '/' + filename + '/' + str(quantity))
np.savetxt(str(folder) + '/' + filename + '/' + str(quantity) + '.txt',
np.concatenate((box_size, lac_mean), axis=0), fmt="%s")
| true
|
8066bae3c18dc46e24cab355f21fb29a61e2aa7f
|
Python
|
Thxios/ProjectResearch
|
/Gomoku/board.py
|
UTF-8
| 1,564
| 3.4375
| 3
|
[] |
no_license
|
from lib import *
from .validation import ThreeChecker, FourChecker, FiveChecker, SixChecker
class Board:
def __init__(self, board):
self.board = board
def get(self, x, y):
if x < 0 or x >= 15 or y < 0 or y >= 15:
return -1
return self.board[x, y]
def valid(self, x, y, turn) -> bool:
if x < 0 or x >= 15 or y < 0 or y >= 15:
return False
if self.get(x, y) != 0:
return False
lines = self._get_direction_lines(x, y, turn)
if turn == BLACK:
_six = SixChecker.check(lines, turn)
_five = FiveChecker.check(lines, turn)
if _five > _six:
print('BLACK win')
return True
if _six:
return False
if ThreeChecker.check(lines, turn) >= 2:
return False
if FourChecker.check(lines, turn) >= 2:
return False
elif turn == WHITE:
if FiveChecker.check(lines, turn):
print('WHITE win')
return True
return True
def put(self, x, y, turn):
# if self.valid(x, y, turn):
# self._board[x, y] = turn
self.board[x, y] = turn
def _get_direction_lines(self, x, y, put=None):
origin = vector(x, y)
lines = np.array([[self.get(*(i * direction + origin)) for i in range(-4, 5)] for direction in directions])
if put is not None:
for line in lines:
line[4] = put
return lines
| true
|
94fe41d5aa884df4e241ed926825b993b19d6001
|
Python
|
liangjinhao/Web_Spider_Practice_Code
|
/MOOC北京理工大学爬虫/01_requests/05_IP地址归属地的自动查询.py
|
UTF-8
| 314
| 2.609375
| 3
|
[] |
no_license
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# author: Fangyang time:2017/12/20
import requests
url = 'http://www.ip138.com/ips138.asp?ip='
try:
r = requests.get(url+'202.204.80.112')
r.raise_for_status()
r.encoding = r.apparent_encoding
print(r.text[-500:])
except:
print('爬取失败')
| true
|
397a45ac6acddb6c59e3a292805fbcbfb1d7b2b0
|
Python
|
hyunwoo-song/TOT
|
/algorithm/day16/twoint.py
|
UTF-8
| 674
| 2.625
| 3
|
[] |
no_license
|
import sys
sys.stdin = open('twoint.txt', 'r')
T= int(input())
for t in range(1, T+1):
N, M = map(int, input().split())
A= list(map(int, input().split()))
B= list(map(int, input().split()))
Result=[]
if len(A) > len(B):
while len(A) >= len(B):
result = 0
for i in range(len(B)):
result += A[i]*B[i]
Result.append(result)
A.pop(0)
else:
while len(B) >= len(A):
result = 0
for i in range(len(A)):
result += A[i]*B[i]
Result.append(result)
B.pop(0)
print('#{} {}'.format(t,max(Result)))
| true
|
cf7e58a1c1986bf472cc4f1311d2458959038f30
|
Python
|
malihasameen/1mwtt-toy-problems
|
/toy-problem-003.py
|
UTF-8
| 1,270
| 3.46875
| 3
|
[] |
no_license
|
"""
* http://www.pythonchallenge.com/pc/def/0.html
* Python Challenge Level 0
"""
# power operator
print (2**38)
# power function
print(pow(2,38))
# loop
n = 1
for i in range(38):
n *= 2
print(n)
# shift
print(1 << 38)
"""
* http://www.pythonchallenge.com/pc/def/map.html
* Python Challenge Level 1
"""
raw = "g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj."
# beginner
print("Beginner Solution")
result = ""
for c in raw:
if c.isalpha():
result += chr(((ord(c)+2) - ord('a')) % 26 + ord('a'))
else:
result += c
print(result)
# advanced
print("Advance Solution")
result = "".join([chr(((ord(c)+2) - ord('a')) % 26 + ord('a')) if c.isalpha() else c for c in raw])
print(result)
# solution with built-in function
print("Solution with Built-in Function")
table = str.maketrans("abcdefghijklmnopqrstuvwxyz","cdefghijklmnopqrstuvwxyzab")
print(raw.translate(table))
# solution using dict and zip
inputtable = "abcdefghijklmnopqrstuvwxyz,. '()"
outputtable = "cdefghijklmnopqrstuvwxyzab,. '()"
result = "".join(dict(zip(inputtable,outputtable))[c] for c in raw)
print(result)
| true
|
84c9584a8dd9af400b6de09f064e1acfe7fe6825
|
Python
|
lefterisKl/StockDataCrawler
|
/DailyCrawler.py
|
UTF-8
| 2,807
| 2.875
| 3
|
[] |
no_license
|
import requests
import time
from bs4 import BeautifulSoup
import datetime
import time
website_url = "https://finance.yahoo.com"
sector_url_prefix = "https://finance.yahoo.com/sector/"
sector_url_suffix = "?offset=0&count=100"
sectors = ["financial","healthcare","services","utilities","industrial_goods","basic_materials","conglomerates",
"consumer_goods","technology"]
sector_url = {sector:(sector_url_prefix+sector+sector_url_suffix) for sector in sectors }
#print(sector_url['financial'])
data = []
f = open("daily_data.csv", "w")
f.write("Ticker,Date,Close,Volume\n")
f.close()
Date = datetime.datetime.now().strftime ("%Y-%m-%d")
print( "Start crawling at:",time.ctime())
start_time = time.time()
for sector,sector_url in sector_url.items():
print("Crawling sector",sector,".")
offset = 0
next_url = sector_url
sector_data = []
while offset < 100:
page = requests.get(next_url)
soup = BeautifulSoup(page.content, 'html.parser')
urls = soup.find_all(lambda tag: tag.name == 'a' and
tag.get('class') == ['Fw(b)'])
links = { url.text : website_url + url.get('href') for url in urls}
if(len(links)==0):
break
for ticker, ticker_link in links.items():
ticker_page = requests.get(ticker_link)
ticker_soup = BeautifulSoup(ticker_page.content, 'html.parser')
ticker_data = ticker_soup.find_all(lambda tag: tag.name == 'td' and
tag.get('class') ==["Ta(end)", "Fw(b)", "Lh(14px)"] )
variables = []
for x in ticker_data:
variable = x.get('data-test')
if str(x).find("span") == -1:
value = x.text
else:
value = list( x.children)[0].text
variables.append((variable,value))
variables = dict(variables)
print("\t crawling data for "+ str(ticker))
sector_data.append((ticker,variables))
offset = offset + 100
#sector_url_prefix_predifined = "https://finance.yahoo.com/sector/predifined/"
sector_url_suffix_predifined = "?offset=" + str(offset) + "&count=100"
next_url = sector_url_prefix + sector + sector_url_suffix_predifined
f = open("daily_data2.csv","a")
for ticker_data in sector_data:
f.write(ticker_data[0].lower() +","+Date+","+ ticker_data[1]["PREV_CLOSE-value"].replace(",","") +
","+ ticker_data[1]["TD_VOLUME-value"].replace(",","") + "\n")
f.close()
break
#data.append((sector,sector_data))
#time.sleep(1)
# your code
print("End crawling at",time.ctime())
print("Elapsed time:", time.time() - start_time)
| true
|
bb0478f8c895e4bb0ca7f13afe369cea21a238eb
|
Python
|
koneb71/bitcoin-twitter-sentiment-analysis
|
/create_dataset.py
|
UTF-8
| 1,208
| 3.03125
| 3
|
[] |
no_license
|
import csv
file = 'bitcointweets.csv'
neg_tweet = []
pos_tweet = []
neutral_tweet = []
with open(file, encoding="utf8") as fh:
rd = csv.DictReader(fh, delimiter=',')
for row in rd:
if row['sentiment'] == "positive":
pos_tweet.append(row)
if row['sentiment'] == "negative":
neg_tweet.append(row)
if row['sentiment'] == "neutral":
neutral_tweet.append(row)
with open('datasets/pos_tweet.csv', encoding="utf8", mode='w') as pos_file:
fieldnames = ['tweet', 'sentiment']
writer = csv.DictWriter(pos_file, fieldnames=fieldnames)
writer.writeheader()
for pos in pos_tweet:
writer.writerow(pos)
with open('datasets/neg_tweet.csv', encoding="utf8", mode='w') as neg_file:
fieldnames = ['tweet', 'sentiment']
writer = csv.DictWriter(neg_file, fieldnames=fieldnames)
writer.writeheader()
for neg in neg_tweet:
writer.writerow(neg)
with open('datasets/neutral_tweet.csv', encoding="utf8", mode='w') as neu_file:
fieldnames = ['tweet', 'sentiment']
writer = csv.DictWriter(neu_file, fieldnames=fieldnames)
writer.writeheader()
for neu in neutral_tweet:
writer.writerow(neu)
| true
|
9b3d8d4c0c853172064707d260f07c030fed8c75
|
Python
|
iankigen/data_stuctures_and_algorithms
|
/data_structures/arrays.py
|
UTF-8
| 676
| 3.625
| 4
|
[] |
no_license
|
from array import array
# array_name = array(typecode, [Initializers])
"""
Typecode Value
b Represents signed integer of size 1 byte/td>
B Represents unsigned integer of size 1 bytetest_array = array('i', [1, 2, 3])
c Represents character of size 1 byte
i Represents signed integer of size 2 bytes# Insertion Operation
I Represents unsigned integer of size 2 bytes
f Represents floating point of size 4 bytestest_array.insert(1, 100)
d Represents floating point of size 8 bytes
"""
test_array = array('i', [1, 2, 3, 10, 20])
# Deletion Operation
test_array.remove(2)
# Update Operation
test_array[1] = 200
# Append Operation
test_array.append(1000)
print(test_array)
| true
|
82fc793180bbf6bb206ca25bc55cb4a85b421ffb
|
Python
|
white1107/Python_for_Competition
|
/WaterBlue/36_Knapsack_Problem.py
|
UTF-8
| 509
| 2.765625
| 3
|
[] |
no_license
|
def get_input(inp):
li = inp.split("\n")
def inner():
return li.pop(0)
return inner
INPUT = """2 20
5 9
4 10
"""
input = get_input(INPUT)
#######
N,W = map(int,input().split())
dp = [[0]*(W+1) for _ in range(N+1)]
L = []
for i in range(N):
ta,tb = map(int,input().split())
L.append([ta,tb])
for i in range(N):
for w in range(W+1):
if w-L[i][1]>=0 :dp[i+1][w] = max(dp[i][w],dp[i+1][w-L[i][1]]+L[i][0])
else: dp[i+1][w] = dp[i][w]
print(dp[-1][-1])
print(dp)
| true
|
2f1b616e4c5a15a862307f544ca64de4aa4a5017
|
Python
|
L200170178/prak_ASD_E
|
/Modul 6/mergeSort.py
|
UTF-8
| 1,017
| 3.265625
| 3
|
[] |
no_license
|
class Mahasiswa(object):
def __init__ (self,nim) :
self.nim = nim
nim1= "L200170123"
nim2= "L200170124"
nim3= "L200170125"
nim4= "L200170126"
nim5= "L200170127"
Daftar = [nim1,nim2,nim3,nim4,nim5]
def mergeSort(A):
if len(A) > 1 :
mid = len(A) // 2
separuhKiri = A[:mid]
separuhKanan = A[mid:]
mergeSort(separuhKiri)
mergeSort(separuhKanan)
i = 0 ; j=0 ; k=0
while i < len (separuhKiri) and j < len(separuhKanan):
if separuhKiri[i] < separuhKanan[j] :
A[k] = separuhKiri[i]
i = i + 1
else :
A[k] = separuhKanan[j]
j = j + 1
k = k + 1
while i < len(separuhKiri):
A[k] = separuhKiri[i]
i = i + 1
k = k + 1
while j < len(separuhKanan):
A[k] = separuhKanan[j]
j = j+1
k = k+1
mergeSort(Daftar)
print(Daftar)
| true
|
0aa959b9c581c1f72135878b9250c297ae5d4e9a
|
Python
|
emilberzins/RTR105
|
/lab3.py.py
|
UTF-8
| 492
| 3.78125
| 4
|
[] |
no_license
|
from math import sin, fabs, sqrt
from time import sleep
def f(x):
return sin(sqrt(x))*sin(sqrt(x))
a = 1
b = 5
funa = f(a)
funb = f(b)
if (funa * funb > 0.0):
print("Dotajā intervālā [%s, %s] sakņu nav"%(a,b))
sleep(1); exit()
else:
print("Dotajā intervālā sakne(s) ir!")
deltax = 0.0001
while ( fabs(b-a) > deltax ):
x = (a+b)/2; funx = f(x)
if ( funa*funx < 0. ):
b = x
else:
a = x
print("Sakne ir:", x)
| true
|
6c3bf1a8191f45f8e32e6b6f1136d21c34c755f4
|
Python
|
itsolutionscorp/AutoStyle-Clustering
|
/all_data/exercism_data/python/hamming/d47703431c9e4c6ca551ab7443e1afd2.py
|
UTF-8
| 94
| 3.109375
| 3
|
[] |
no_license
|
def distance(s1, s2):
hamming = sum(x != y for (x, y) in zip(s1, s2))
return hamming
| true
|
c485511454e429730254ae8117158cab5c3eaf50
|
Python
|
nicogab34/AudioMNIST
|
/recording_scripts/adjustCuts.py
|
UTF-8
| 8,656
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from scipy.io import wavfile
import os
import matplotlib.pyplot as plt
import pandas as pd
import scipy.signal
import glob
from matplotlib.lines import Line2D
import scipy.spatial.distance
import argparse
class DragHandler(object):
""" A simple class to handle Drag n Drop.
This is a simple example, which works for Text objects only
"""
def __init__(self, figure=None) :
""" Create a new drag handler and connect it to the figure's event system.
If the figure handler is not given, the current figure is used instead
"""
if figure is None : figure = plt.gcf()
# simple attibute to store the dragged text object
self.dragged = None
# Connect events and callbacks
figure.canvas.mpl_connect("pick_event", self.on_pick_event)
figure.canvas.mpl_connect("button_release_event", self.on_release_event)
def on_pick_event(self, event):
" Store which text object was picked and were the pick event occurs."
if isinstance(event.artist, Line2D):
self.dragged = event.artist
self.pick_pos = (event.mouseevent.xdata, event.mouseevent.ydata)
return True
def on_release_event(self, event):
" Update text position and redraw"
if self.dragged is not None :
orig_dragged = np.copy(self.dragged.get_xydata())
clickIdx = self.pos2ind(self.dragged, self.pick_pos)
old_pos = self.dragged.get_xydata()[clickIdx]
new_pos = (old_pos[1] + event.xdata - self.pick_pos[1], 0)
orig_dragged[clickIdx] = np.array(new_pos)
self.dragged.set_data(orig_dragged.T)
global all_markers
all_markers = self.dragged.get_xydata()[:,0]
self.dragged = None
plt.draw()
return True
def pos2ind(self, dragged, pick_pos):
alldists = scipy.spatial.distance.cdist(dragged.get_xydata(), np.atleast_2d(pick_pos))
return np.argmin(alldists)
def butter_bandpass(lowcut, highcut, fs, order=7):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = scipy.signal.butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=7):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = scipy.signal.filtfilt(b, a, data)
return y
def run(src, dst):
"""
Function to semi-automatically cut series of audio recordings into single
recordings. Automatically determined cut positions will be displayed and
can be corrected via drag&drop. Close the visualization to apply the cuts.
Important: cut data is still "raw", the bandpass filter and other means are
only applied to find cut positions. Cut data will be saved in single files,
the original audio series recordings will not be deleted.
Parameters:
-----------
src: string
Source directory containing the audio series recordings.
dst: string
Destination directory where to store cut files.
"""
# hyperparameters for finding cut positions.
lowcut = 100
highcut = 10000
threshold = 0.1
loffset = 2400
roffset = 2400
expectedNum = 10
if not os.path.exists(dst):
os.makedirs(dst)
filenames = glob.glob(os.path.join(src, "*.wav"))
for fileIdx, filename in enumerate(filenames):
# infer digits and repetition from file name convention
digitSeries = filename.rstrip('.wav').split('__')[1:]
# load sampling frequency and recorded data
thisFs, thisData = wavfile.read(filename)
# find single articulations
y = butter_bandpass_filter(thisData, lowcut, highcut, thisFs, order=7)
y = y / np.percentile(abs(y), 99)
rolledMean = pd.rolling_max(arg = abs(y), window=int(1*4800), center = True)
rolledMean[np.isnan(rolledMean)] = 0
idcs = np.where(rolledMean > 0.1)[0]
stopIdcs = np.concatenate([idcs[np.where(np.diff(idcs) > 1)[0]], [idcs[-1]]])
revIdcs = idcs[::-1]
startIdcs = np.concatenate([[revIdcs[-1]], revIdcs[np.where(np.diff(revIdcs) < -1)[0]][::-1]])
if np.any((stopIdcs - startIdcs) > 48000):
print("Found sample with more than one second duration")
assert(len(startIdcs) == len(stopIdcs))
if len(startIdcs) < expectedNum:
print("file {}: Found only {} candidate samples".format(fileIdx, len(startIdcs)))
# appending artificial markers for drag&drop later on.
tmp1 = np.arange(expectedNum)
tmp1[0:len(startIdcs)] = startIdcs
startIdcs = tmp1
tmp2 = np.arange(expectedNum)
tmp2[0:len(stopIdcs)] = stopIdcs
stopIdcs = tmp2
print("Corrected to {} startIdcs".format(len(startIdcs)))
if len(startIdcs)>expectedNum:
print("file {}: Found more than 10 possible samples. Attempting to correct selection.".format(fileIdx))
# this is based on some experience, but does not always work
absSums = []
for start, stop in zip(startIdcs, stopIdcs):
absSums.append(np.sum(abs(y[start:stop])))
while len(startIdcs) > expectedNum:
discardIdx = np.argmin(absSums)
d1 = startIdcs[discardIdx] - stopIdcs[discardIdx-1]
d2 = stopIdcs[discardIdx] - startIdcs[discardIdx]
if discardIdx >= 1:
newd = startIdcs[discardIdx - 1] - stopIdcs[discardIdx]
else:
newd = None
if d2 < 3.5 * 4800 and d1 < 1.5*4800 and discardIdx != 0:
# combine two selections: important to include the "t" at the end of "eigh-t"
startIdcs = startIdcs[np.arange(0,len(startIdcs)) != discardIdx]
stopIdcs = stopIdcs[np.arange(0,len(stopIdcs)) != (discardIdx - 1)]
else:
# discard a selection
startIdcs = startIdcs[np.arange(0,len(startIdcs)) != discardIdx]
stopIdcs = stopIdcs[np.arange(0,len(stopIdcs)) != discardIdx]
absSums.pop(discardIdx)
fig, ax = plt.subplots(2,1,figsize = (20,5))
ax[0].plot(thisData, 'k')
ax[1].plot(y, 'k')
ax[1].plot(rolledMean, color = 'mediumvioletred')
for digitIdx, (start, stop) in enumerate(zip(startIdcs, stopIdcs)):
# plot single digit recording according to current markers
d,r = digitSeries[digitIdx].split('_')
ax[0].plot(range(start-loffset,stop+roffset),thisData[start-loffset:stop+roffset])
ax[1].plot(range(start-loffset,stop+roffset), y[start-loffset:stop+roffset])
ax[1].text(start + (stop-start)/2, 1.3, str(d), fontsize = 15)
if digitIdx == expectedNum-1:
all_markers = np.zeros((startIdcs.size + stopIdcs.size))
all_markers[0::2] = startIdcs - loffset
all_markers[1::2] = stopIdcs + roffset
ax[0].plot(all_markers, np.zeros_like(all_markers), '.', ms = 10, picker = 10, c = 'indigo')
ax[0].set_xlim([0, len(thisData)])
ax[1].set_xlim([0, len(thisData)])
ax[0].set_title("{}, len = {}".format(digitSeries[:], len(thisData)))
dragh = DragHandler()
plt.show()
all_markers = sorted(np.round(all_markers).astype(int))
plt.figure(figsize = (20,10))
for digitIdx, (markStart, markStop) in enumerate(zip(all_markers[0::2], all_markers[1::2])):
# infer digit, repetition and subject identifier
d, r = digitSeries[digitIdx].split('_')
subj_name = os.path.split(filename)[-1].split("__")[0]
# write out files
print("writing to {}".format(os.path.join(dst, d + '_' + subj_name + '_' + r + '.wav')))
wavfile.write(os.path.join(dst, d + '_' + subj_name + '_' + r + '.wav'), 48000, thisData[markStart: markStop])
# visualize cut data
plt.plot(range(markStart, markStop), thisData[markStart:markStop])
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-src', default=".", help='Source directory where recorded autio series are stored.')
parser.add_argument('-dst', default="./cut", help='Destination directory where to store cut audio files.')
args = parser.parse_args()
run(src=args.src, dst=args.dst)
| true
|
e0649ecb368d01a5cc83ca59c2dc24b224d26015
|
Python
|
paulan94/CTCIPaul
|
/LL_node.py
|
UTF-8
| 196
| 3.203125
| 3
|
[] |
no_license
|
class LL_node():
def __init__(self,val):
self.val = val
self.next = None
head = LL_node(2)
head.next = LL_node(5)
head.next.next = LL_node(1)
head.next.next.next = LL_node(2)
| true
|
8ca1b47967c5d97767146dc66cf9e1895fc6abd2
|
Python
|
tomasjames/Exoplanet-Project
|
/Data/WASP-22b/aperture.py
|
UTF-8
| 1,460
| 3.109375
| 3
|
[] |
no_license
|
'''
Project: Exoplanetary Detection and Characterisation
Supervisor: Dr. Edward L Gomez
Author: Tomas James
Script: Aperture Determination for WASP-22 b
'''
from numpy import *
from matplotlib.pyplot import *
# Read in data
aperture = genfromtxt('aperture.txt', dtype = 'float')
aperture2 = genfromtxt('aperture2.txt', dtype = 'float')
aperture3 = genfromtxt('aperture3.txt', dtype = 'float')
# Populate arrays with values
signal = aperture[where(aperture==1)[0],6]
background = aperture[where(aperture==2)[0],6]
radius = aperture[where(aperture==1)[0],8]
signal2 = aperture2[where(aperture2==1)[0],6]
background2 = aperture2[where(aperture2==2)[0],6]
radius2 = aperture2[where(aperture2==1)[0],8]
signal3 = aperture3[where(aperture3==1)[0],6]
background3 = aperture3[where(aperture3==2)[0],6]
radius3 = aperture3[where(aperture3==1)[0],8]
# Determine measured flux
flux = zeros(len(signal))
flux2 = zeros(len(signal))
flux3 = zeros(len(signal))
for i in range(0, len(flux)):
flux[i] = signal[i] - background[i]
flux2[i] = signal2[i] - background2[i]
flux3[i] = signal3[i] - background3[i]
# Plot Data
figure(1)
plot(radius, flux, 'bx', label='Frame 2')
plot(radius, flux2, 'rx', label = 'Frame 50')
plot(radius, flux3, 'gx', label = 'Frame 110')
xlabel('Radius of Aperture/Pixels')
ylabel('Background Subtracted Signal')
title('Signal as a Function of Aperture Radius to Determine \n Optimum Aperture Size')
legend(loc='best')
savefig('aperture.png')
| true
|
8a62c1a828ddc4c8150db5f27e14165c52c1ba68
|
Python
|
astrosilverio/PokeDB
|
/pokedb/storage/__init__.py
|
UTF-8
| 1,062
| 2.6875
| 3
|
[] |
no_license
|
"""
API to access layer:
`get`
`write`
`sync`
"""
import os
from pokedb.storage.pager import Pager
from pokedb.storage.serializer import serialize, deserialize
DBFILE = os.getenv('DBFILE', 'test.db')
_pager = None
# In-memory storage
_storage = dict()
_temp = dict()
_table_schema = {
'main': ('value',),
}
def start():
global _pager
_pager = Pager(DBFILE)
def get_row(txn_id, table, row_id, page_num):
raw_data = _storage.get(row_id, None)
updated_value = _temp[txn_id].get(row_id, None)
if updated_value:
raw_data = updated_value
if raw_data:
schema = _table_schema.get(table)
data = deserialize(schema, raw_data)
else:
data = None
return {row_id: data}
def write_row(txn_id, table, row_id, data, page_num):
schema = _table_schema.get(table)
raw_data = serialize(schema, data)
_temp[txn_id][row_id] = raw_data
return page_num
def sync(page_num):
_pager.flush_page(page_num)
return page_num
def stop():
if _pager:
return _pager.db_close()
| true
|
ba438dfccfc5e026f76af842655f8fabfa5c6f58
|
Python
|
boyan13/hackbg-dungeons-and-pythons
|
/Hero.py
|
UTF-8
| 2,343
| 3.0625
| 3
|
[] |
no_license
|
from Weapon import Weapon
from Spell import Spell
class Hero:
def __init__(self, name, title, health, mana, mana_regeneration_rate):
self.name = name
self.title = title
self.health = health
self.MAX_HEALTH = health
self.mana = mana
self.MAX_MANA = mana
self.mana_regeneration_rate = mana_regeneration_rate
self.spell = None
self.weapon = None
@staticmethod
def validate_init(name, title, health, mana, mana_rate):
strings = type(name) is str and type(title) is str
health = type(health) is int and health > 0
mana = type(mana) is int and mana > 0
mana_regeneration_rate = type(mana_regeneration_rate) is int and mana_rate > 0
return strings and health and mana and mana_regeneration_rate
def known_as(self):
return "{} the {}".format(self.name, self.title)
def get_health(self):
if self.health < 0:
self.health = 0
return self.health
def get_mana(self):
return self.mana
def is_alive(self):
if self.health > 0:
return True
return False
def can_cast(self):
if self.spell is None:
return False
if self.mana >= self.spell.mana_cost:
return True
return False
def take_damage(self, damege_points):
self.health -= damege_points
def take_healing(self, healing_points):
if self.is_alive():
self.health += healing_points
if self.health > self.MAX_HEALTH:
self.health = self.MAX_HEALTH
return True
return False
def take_mana(self, mana_points=0):
if mana_points == 0:
mana_points += self.mana_regeneration_rate
self.mana += mana_points
if self.mana > self.MAX_MANA:
self.mana = self.MAX_MANA
def equip(self, weapon):
self.weapon = weapon
def learn(self, spell):
self.spell = spell
def attack(self, by=None):
if by == "spell" or by is None:
if self.can_cast():
self.mana -= self.spell.mana_cost
return self.spell.damage
if by == "weapon" or by is None:
if self.weapon is not None:
return self.weapon.damage
return 0
| true
|
ff18ebf25d5f4ed44f24ac9ccf73670045bb65c2
|
Python
|
azure1016/MyLeetcodePython
|
/lc56.py
|
UTF-8
| 1,867
| 3.359375
| 3
|
[] |
no_license
|
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
intervals = intervals[:]
#Always think about the weird input
if len(intervals) <= 1:
return intervals
intervals.sort(key = self.sort_by_start)
res = []
i = 0
for i in range(0, len(intervals)): #and intervals[i].start <= intervals[len(intervals) - 1].end:
#you shall process the last element at first, especially when you have i+1 logic
if i == len(intervals) - 1:
res.append(intervals[i])
break
#if we always care about i+1 rather than modify i, then our lives easier. Like a grinding wheel!
#we know we'd never look back. If not for sorting, the time complexity will be O(n)
if intervals[i].end >= intervals[i + 1].start:
intervals[i + 1].start = intervals[i].start
if intervals[i].end > intervals[i + 1].end:
intervals[i + 1].end = intervals[i].end
else:
res.append(intervals[i])
return res
# if i.end < j.start, then must i.end < ()j+1).start
def sort_by_start(self, l):
return l.start
def pr(self, li):
for x in li:
print("[" + str(x.start) + ',' + str(x.end) + "],")
if __name__ == '__main__':
test = Solution()
case = [[5,5],[1,1],[5,7],[5,7],[1,1],[3,4],[4,4],[0,1],[5,5],[1,2],[5,5],[0,2]]
#case = [[1,4],[4,5]]
#case = [[1,3], [2,6], [8, 10], [15, 18]]
intervals = []
for i in case:
intervals.append(Interval(i[0], i[1]))
test.pr(test.merge(intervals))
| true
|
b68ba87cdebc26ddb11e70d90f46d6f9fda1613e
|
Python
|
TeamGraphix/graphix
|
/examples/qft_with_tn.py
|
UTF-8
| 2,634
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
Large-scale simulations with tensor network simulator
===================
In this example, we demonstrate simulation of MBQC involving 10k+ nodes.
You can also run this code on your browser with `mybinder.org <https://mybinder.org/>`_ - click the badge below.
.. image:: https://mybinder.org/badge_logo.svg
:target: https://mybinder.org/v2/gh/TeamGraphix/graphix-examples/HEAD?labpath=qft_with_tn.ipynb
Firstly, let us import relevant modules and define the circuit:
"""
# %%
import numpy as np
from graphix import Circuit
import networkx as nx
def cp(circuit, theta, control, target):
circuit.rz(control, theta / 2)
circuit.rz(target, theta / 2)
circuit.cnot(control, target)
circuit.rz(target, -1 * theta / 2)
circuit.cnot(control, target)
def swap(circuit, a, b):
circuit.cnot(a, b)
circuit.cnot(b, a)
circuit.cnot(a, b)
def qft_rotations(circuit, n):
circuit.h(n)
for qubit in range(n + 1, circuit.width):
cp(circuit, np.pi / 2 ** (qubit - n), qubit, n)
def swap_registers(circuit, n):
for qubit in range(n // 2):
swap(circuit, qubit, n - qubit - 1)
return circuit
def qft(circuit, n):
for i in range(n):
qft_rotations(circuit, i)
swap_registers(circuit, n)
# %%
# We will simulate 45-qubit QFT, which requires graph states with more than 10000 nodes.
n = 45
print("{}-qubit QFT".format(n))
circuit = Circuit(n)
for i in range(n):
circuit.h(i)
qft(circuit, n)
# standardize pattern
pattern = circuit.transpile()
pattern.standardize()
pattern.shift_signals()
nodes, edges = pattern.get_graph()
print(f"Number of nodes: {len(nodes)}")
print(f"Number of edges: {len(edges)}")
# %%
# Using efficient graph state simulator `graphix.GraphSim`, we can classically preprocess Pauli measurements.
# We are currently improving the speed of this process by using rust-based graph manipulation backend.
pattern.perform_pauli_measurements()
# %%
# You can easily check that the below code run without too much load on your computer.
# Also notice that we have not used :meth:`graphix.pattern.Pattern.minimize_space()`,
# which we know reduced the burden on the simulator.
# To specify TN backend of the simulation, simply provide as a keyword argument.
# here we do a very basic check that one of the statevector amplitudes is what it is expected to be:
import time
t1 = time.time()
tn = pattern.simulate_pattern(backend="tensornetwork")
value = tn.get_basis_amplitude(0)
t2 = time.time()
print("amplitude of |00...0> is ", value)
print("1/2^n (true answer) is", 1 / 2**n)
print("approximate execution time in seconds: ", t2 - t1)
| true
|
fcfe66b7946543c18eca891d6de463579cf3c72a
|
Python
|
Jeyabalaganesh/New_Project_23052021
|
/Day 21_diamondinher_test.py
|
UTF-8
| 965
| 3.59375
| 4
|
[] |
no_license
|
class BaseClass:
no_of_base_class = 0
def __init__(self):
print("Executed the Base class")
BaseClass.no_of_base_class += 1
class LeftClass(BaseClass):
no_of_Left_class = 0
def __init__(self):
super().__init__()
print("Executed the Left class")
LeftClass.no_of_Left_class += 1
class RightClass(BaseClass):
no_of_Right_class = 0
def __init__(self):
super().__init__()
print("Executed the Right class")
RightClass.no_of_Right_class += 1
class Subclass(RightClass, LeftClass):
no_of_sub_class = 0
def __init__(self):
super().__init__()
print("Executed the Sub class")
Subclass.no_of_sub_class += 1
trial = Subclass()
print(trial.no_of_sub_class, trial.no_of_Left_class, trial.no_of_Right_class, trial.no_of_base_class)
trial.__init__()
print(trial.no_of_sub_class, trial.no_of_Left_class, trial.no_of_Right_class, trial.no_of_base_class)
| true
|
4f6b111532a432d8161e3e38042dfedb4fff05ad
|
Python
|
apri-me/python_class00
|
/session10/oop1.py
|
UTF-8
| 90
| 2.71875
| 3
|
[] |
no_license
|
from assignments import Radmehr1
squar = Radmehr1.Square(5, 'Gray')
print(squar.area())
| true
|
ddfdf465d37855312a92bd488d915b36e53a5618
|
Python
|
netsus/Rosalind
|
/HAMM.py
|
UTF-8
| 333
| 3.171875
| 3
|
[] |
no_license
|
'''
문제 : 길이가 같은 두 서열 입력받아 서로 다른 염기 개수가 몇개인지 출력
알고리즘 : 반복하며 비교해서 다르면 cnt += 1 '''
f = open('rosalind_hamm.txt','r')
fl = f.read().split('\n')
cnt=0
seq1,seq2=fl[:-1]
for i in range(len(fl[0])):
if seq1[i]!=seq2[i]:
cnt+=1
print(cnt)
| true
|
e03fdc6beb3520dfa9e29dcac6e56b8c4aa199a6
|
Python
|
krammandrea/Mandelbrot
|
/coloralg.py
|
UTF-8
| 6,942
| 3.125
| 3
|
[] |
no_license
|
import math,testing
class ColorAlg():
def __init__(self, colorscheme = ["000000","338822","883388"]):
"""
initializes algorithms in advance for faster computing time
"""
self.initcolorscheme(colorscheme)
def initcolorscheme(self,colorscheme):
"""
converts
"""
#convert the colorscheme from list of strings to rgb matrix
self.colorscheme = [[0.0 for x in range(3)] for y in range(len(colorscheme))]
for color in range(len(colorscheme)):
intcolor = int(colorscheme[color],16)
#convert to rgb in range [0,255]
self.colorscheme[color][0] = float((intcolor&0xff0000)>>16)
self.colorscheme[color][1] = float((intcolor&0x00ff00)>>8)
self.colorscheme[color][2] = float((intcolor&0x0000ff)>>0)
self.initcatmullrom()
#show the current colorscheme
testing.test_catmullrom(self,colorscheme)
testing.test_straightconnection(self,colorscheme)
def initcatmullrom(self):
"""
precalculate all possible matrixes [P(i-1),P(i),P(i+1),P(i+2)]*Mcr for
the current colorscheme
"""
self.PtimesMcr = [[[0.0 for x in range(4)]for y in range(3)] for z in range(len(self.colorscheme))]
tau = 0.5 #curve sharpness of the spline
Mcr =[[0.0,-1.0*tau,2.0*tau,-1.0*tau],[2.0*tau,0.0,-5.0*tau,3.0*tau],[0.0,1.0*tau,4.0*tau,-3.0*tau],[0.0,0.0,-1.0*tau,1.0*tau]]
for x in range(len(self.colorscheme)):
P = [self.colorscheme[-1+x],self.colorscheme[x],self.colorscheme[(x+1)%len(self.colorscheme)],self.colorscheme[(x+2)%len(self.colorscheme)]]
for y in range(len(P[0])):
for z in range(len(Mcr[0])):
self.PtimesMcr[x][y][z] = sum(list(P[j][y] * Mcr[j][z] for j in range(len(P)) ))
def escapetime(self,iteration,z):
"""
coloring represents the number of iterations before z escapes
"""
colorIndikator = iteration
return (colorIndikator, len(self.colorscheme))
def calculateangle(self,iteration, z):
"""
coloring represents the angle of the escaped z
"""
angle = math.asin(z.real/abs(z))
colorIndikator = angle
return (colorIndikator, 2*math.pi)
def distanceestimator1(self, iteration, z,prevz,escapelimit):
"""normalized iteration count, details in http://math.unipa.it/~grim/Jbarrallo.PDF
"""
colorIndikator = iteration + 1 - ((math.log10(math.log10(abs(z))))/math.log10(2))
return (colorIndikator, len(self.colorscheme))
def distanceestimator2(self, iteration, z):
"""
continuous potential algorithm, see http://math.unipa.it/~grim/Jbarrallo.PDF
"""
colorIndikator = math.log10(abs(z))/(2**math.log10(iteration))
return (colorIndikator, len(self.colorscheme))
def distanceestimator3(self, iteration, z):
"""
distance estimation algorithm, see http://math.unipa.it/~grim/Jbarrallo.PDF
"""
colorIndikator = 2*math.log10(abs(z))
return (colorIndikator, len(self.colorscheme))
def distanceestimator4(self, iteration, z):
"""
e to the power of (-|z|) smoothing, see http://math.unipa.it/~grim/Jbarrallo.PDF
"""
colorIndikator = math.exp(-(abs(z)))
return (colorIndikator,0.13)
def distanceestimator5(self, iteration, z, escapelimit):
"""
coloring represents the distance to the escapelimit
"""
colorIndikator = abs(z) - escapelimit
return (colorIndikator, 3.0)
def distanceestimator6(self,iteration, z, prevz, escapelimit):
"""
matthias algorithm, coloring represents the number of iterations plus the
percentage of the distance to the escapelimit
"""
colorIndikator =iteration + 1 - (abs(z)-escapelimit)/(abs(z)-abs(prevz))
return (colorIndikator, len(self.colorscheme))
def catmullrom(self, colorIndikator):
#TODO
"""
creates the colorscheme(size:1000) using the given cornerpoints and
connecting them with catmullrom splines
p(s) = [P(i-1),P(i),P(i+1),P(i+2)]*M(cr)*[1 t^2 t^3 t^4]
"""
assignedcolor = [0.0 for rgb in range(3)]
#choose the precalculated matrix [P(i-1),P(i),P(i+1),P(i+2)]M(cr) for the current section, which the color is roughly in
currentcolor = int(colorIndikator%len(self.colorscheme))
partial_colInd = colorIndikator%1 #using %1 causes minor rounding errors
Tvector = [1, partial_colInd**2, partial_colInd**3, partial_colInd**4]
#allowed range for Tvector [0,1]
for rgb in range(3):
assignedcolor[rgb] = sum(self.PtimesMcr[currentcolor][rgb][j] * Tvector[j] for j in range(4))
return self.convertToString(assignedcolor)
def clampoff(self,(colorIndikator,normalizdTo)):
#TODO use colorscheme once and assigned maximum values to any over the border value
#TODO find a way to integrate this into the distanceestimators
pass
def straightconnection(self, (colorIndikator, normalizedTo)):
#TODO
"""
creates the colorscheme using the given cornerpoints and connecting them
with straight lines
"""
#cornerpoints of the colorscheme connected with straight lines
assignedcolor = [0.0 for rgb in range(3)]
#currentcolor = int(colorIndikator%len(self.colorscheme))
#partial_colInd = colorIndikator%1
#within a picked random normalizedTo-Value the colorscheme repeats itself once
#cut off so only values in between 0 and normalizedTo
cutoff= colorIndikator%(normalizedTo)
#find out in which area of the colorscheme(currentcolor) and how far into it(rest) the current value is
rest = cutoff%(normalizedTo/float(len(self.colorscheme)))
currentcolor = int((cutoff-rest)/(normalizedTo/float(len(self.colorscheme)))) #convert to int to catch rounding errors
#stretch the rest to range [0,1]
partial_colInd = rest * float(len(self.colorscheme))/normalizedTo
for rgb in range(3):
assignedcolor[rgb] = self.colorscheme[currentcolor][rgb] + (self.colorscheme[(currentcolor+1)%len(self.colorscheme)][rgb] - self.colorscheme[currentcolor][rgb]) * partial_colInd
return self.convertToString(assignedcolor)
def convertToString(self,rgbFloatColor):
"""
converts a RGB color from float to int, while checking for out of bound values
[0,255], then to a hexadezimal string
"""
intcolor = [0 for x in range(3)]
for color in range(len(rgbFloatColor)):
intcolor[color] = int(rgbFloatColor[color])
if intcolor[color]<0:
intcolor[color] = 0
elif intcolor[color]>255:
intcolor[color] = 255
else:
pass
#combine to one hexnumber and convert to string in the '02DE3F'format
hexStringColor = '{:02X}{:02X}{:02X}'.format(*intcolor)
return hexStringColor
| true
|
bc21e781fad2ade9a6d4cfc88431a2e4e45c7fdb
|
Python
|
reata/Cryptography
|
/week5_discrete_log.py
|
UTF-8
| 3,077
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# python_version: 3.4.0
__author__ = "Ryan Hu"
__date__ = "2015-2-16"
"""
Given prime p. Let g and h be integers in [0, p-1] given h = g ** x (mod p) where 1 <= x <= 2 ** 40. Our goal is to find
x. More precisely, the input to this program is P, G, H and the output is x. The trivial algorithm for this program is
to try all 2 ** 40 possible values of x until the correct one is found, which runs in time 2 ** 40. In this project, we
will implement an algorithm that runs in time roughly 2 ** 20 using a meet in the middle attack.
gmpy2 package is required to perform multiple-precision integer arithmetic
"""
from gmpy2 import mpz
from gmpy2 import divm
from gmpy2 import powmod
import doctest
import time
# Global variable for unit test, use gmpy2 mpz type that support multiple-precision integers arithmetic
P = mpz(13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084171)
G = mpz(11717829880366207009516117596335367088558084999998952205599979459063929499736583746670572176471460312928594829675428279466566527115212748467589894601965568)
H = mpz(3239475104050450443565264378728065788649097520952449527834792452971981976143292558073856937958553180532878928001494706097394108577585732452307673444020333)
def discrete_log(p, g, h, max_x=2 ** 40):
"""
(mpz, mpz, mpz) -> mpz
Given prime p and integer g, h in [0, p-1] which fit the equation that h = g ** x (mod p) where 1 <= x <= 2 ** 40.
Return the discrete log, i.e. x
Here this program use a trick to avoid brute force computation of all max_x possibilities. Instead the time consumed
is O(max_x ** 0.5). Let b equals max_x ** 0.5 and x = x0 * b + x1 where x0, x1 are in the range [0, b-1]. Then
h = g ** x (mod p) = g ** (x0 * b + x1) (mod p) = g ** (x0 * b) * g ** x1 (mod p). By moving the g ** x1 to left,
we obtain h / g ** x1 = g ** (x0 * b) (mod p). For every possible x1 in [0, b-1], we hash the left as key and x1 as
value to a hash table. Then for every possible x0, we calculate if the right is in this hash table. If so, we get
the right pair of x0 and x1 as x can be calculated.
>>> discrete_log(mpz(1073676287), mpz(1010343267), mpz(857348958))
1026831
:param p: a multi-precision prime
:param g: a multi-precision integer
:param h: a multi-precision integer
:param max_x: the max possible number of x
:return: the discrete log x
"""
b = int(max_x ** 0.5)
hash_table = {}
for x1 in range(b):
temp = divm(h, powmod(g, x1, p), p)
hash_table[temp] = x1
for x0 in range(b):
temp = powmod(g, x0 * b, p)
if temp in hash_table:
x1 = hash_table[temp]
break
x = x0 * b + x1
return x
if __name__ == "__main__":
doctest.testmod()
start_time = time.time()
print("the outcome is:", discrete_log(P, G, H))
elapsed_time = time.time() - start_time
print("The program ran for %s seconds" % elapsed_time)
| true
|
6ffe6c65d7f1b7dfcb2c18e2c367b51a48e6cc3c
|
Python
|
hutu1234567/crawlweb
|
/libs/hdfspython.py
|
UTF-8
| 4,078
| 2.546875
| 3
|
[] |
no_license
|
from hdfs import *
from hdfs.ext.kerberos import KerberosClient
import os
class HdfsClient:
'''hdfs客户端'''
def __init__(self, ip='', root=None, proxy=None):
self.client=self.selectClient(ip)
def selectClient(self, ip='',root=None, proxy=None):
"""寻找可用hdfs链接"""
self.initKerberos()
urlMaping = {'10.10.10.23': 'http://10.10.10.23:50070', '10.10.10.21': 'http://10.10.10.21:50070',
'10.10.10.22': 'http://10.10.10.22:50070'}
def testip(ip,root=None, proxy=None):
print ip
if ip == '':
return process()
else:
client = KerberosClient(urlMaping[ip], root=root, proxy=proxy)
try:
print 'test %s' % urlMaping[ip]
client.list("/")
return client
except:
return process()
def process():
for key in urlMaping.keys():
client = KerberosClient(urlMaping[key], root=root, proxy=proxy)
try:
client.list("/")
return client
except:
continue
return testip(ip)
def initKerberos(self):
'''初始化kerberos'''
os.chdir('/etc/security/keytabs')
os.system('kinit -kt hdfs.headless.keytab hdfs-cluster1@IDAP.COM')
def list(self,hdfspath):
'''用于列出hdfspath所在路径下面的所有文件'''
files =self.client.list(hdfspath)
return files
def upload(self,hdfsPath,localFilePath):
'''用于上传本地文件到hdfs路径'''
allPaths=self.client.list("/")
if hdfsPath not in allPaths:
print(hdfsPath+'is not exists!')
self.client.upload(hdfsPath, localFilePath)
def mkdirs(self,hdfsPath):
'''用于创建hdfs路径'''
self.client.makedirs(hdfsPath,permission=777)
print('mkdir'+hdfsPath+' ok')
def existFile(self, path, fileName):
'''判断文件是否存在'''
allpath = [item for item in path.split('/') if item != '/' and item != '' and item != fileName]
increPath = '/'
for itemPath in allpath:
increPath = increPath + itemPath + '/'
if fileName in self.list(increPath):
return True;
else:
return False;
def existPath(self,path):
'''判断文件是否存在'''
allpath = [item for item in path.split('/') if item != '/' and item != '' and item.find('csv')]
increPath = '/'
for i in range(len(allpath)-1):
if i<len(allpath)-1:
increPath = increPath + allpath[i] + '/'
else:
increPath = increPath + allpath[i]
if allpath[-1] in self.list(increPath):
return True;
else:
return False
def write(self,hdfsPath,filename,data,append=True):
'''如果文件存在则追加,否则创建'''
if self.existPath(hdfsPath):
if self.existFile(hdfsPath, filename):
self.client.write(hdfsPath + '/' + filename.replace(' ', ''), data, append=True)
else:
self.client.write(hdfsPath + '/' + filename.replace(' ', ''), data, append=False)
self.client.write(hdfsPath + '/' + filename.replace(' ', ''), '\n', append=True)
else:
self.mkdirs(hdfsPath)
self.client.write(hdfsPath + '/' + filename.replace(' ', ''), data, append=False)
self.client.write(hdfsPath + '/' + filename.replace(' ', ''), '\n', append=True)
print('has done')
if __name__ == '__main__':
testclient = HdfsClient( 'http://10.10.10.23:50070' )
csvfile = open( 'ftgoodfile3.csv', 'r' )
csvcontent = csvfile.read()
csvfile.close()
testclient.write( '/wnm/1109/djla', 'ftgoodfile3.csv', csvcontent, append=True )
testclient.printFileNames( '/wnm/1109/djla' )
testclient.readByPath( '/wnm/1109/djla/ftgoodfile3.csv' )
| true
|
f6130c00fd6c459810d556c4c57137aa70a59160
|
Python
|
kimtaehong/android-malware-detection
|
/classfication/randomforest.py
|
UTF-8
| 5,057
| 2.5625
| 3
|
[] |
no_license
|
import csv
import numpy as np
from os import makedirs
from os.path import realpath, exists, isabs, dirname, join
from optparse import OptionParser
from sklearn.ensemble import RandomForestClassifier
from context import *
from classfication.log import log
train_dataset = dict()
target_dataset = dict()
def conver_list_to_float(source):
result = []
if type(source) == str:
token = source.split(' ')
else:
token = source
for t in token:
result.append(float(t))
return result
def train_set_dataset(row):
features = conver_list_to_float(row[2:-2][0])
return {
'ClusterNumber': int(row[0]),
'FileName': str(row[1]),
'Features': np.array(features),
'IsMalware': float(row[-1])
}
def target_set_dataset(row):
features = conver_list_to_float(row[2:])
return {
'name': row[0],
'feature': np.array(features)
}
def load_train_data(train_file_path):
with open(train_file_path) as file_object:
reader = csv.reader(file_object, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for index, row in enumerate(reader):
if index > 0:
train_dataset[index] = train_set_dataset(row)
train_cluster = train_label = (np.array([row[1]['ClusterNumber'] for row in train_dataset.items()]))
train_morb_name = (np.array([row[1]['FileName'] for row in train_dataset.items()]))
train_feature = (np.array([row[1]['Features'] for row in train_dataset.items()]))
train_label = (np.array([row[1]['IsMalware'] for row in train_dataset.items()]))
return train_cluster, train_morb_name, train_feature, train_label
def load_test_data(test_file_path):
with open(test_file_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for index, row in enumerate(reader):
if index > 0:
target_dataset[index] = target_set_dataset(row)
target_morb_name = (np.array([row[1]['name'] for row in target_dataset.items()]))
target_feature = (np.array([row[1]['feature'] for row in target_dataset.items()]))
return target_morb_name, target_feature
def main(train_file_path, test_file_path):
current_module_path = dirname(realpath(__file__))
train_cluster, train_morb_name, train_feature, train_label = load_train_data(train_file_path)
answer_table = dict()
# print("Train_Feature: %s" %train_feature)
train_cluster_list = train_cluster.tolist()
train_label_list = train_label.tolist()
# print(train_cluster_list)
for k in range(0, len(train_cluster_list)):
if train_cluster_list[k] in answer_table:
continue
else:
answer_table[train_cluster_list[k]] = train_label_list[k]
target_morb_name, target_feature = load_test_data(test_file_path)
clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
log().info(train_feature)
log().info(train_label)
clf = clf.fit(train_feature, train_cluster)
predict_result = clf.predict(target_feature)
# make output dir
output_base_dir = join(current_module_path, 'exported/randomforest')
if exists(output_base_dir) is False:
makedirs(output_base_dir)
# with cluster label
result_file_with_cn_path = join(output_base_dir, 'andro_result_with_cluster_number.csv')
cn_file_object = open(result_file_with_cn_path, 'w') # output file write
cn_file_object.write("file, cluster_number, class\n")
for j in range(0, len(target_feature)):
answer_qry = answer_table[predict_result[j]]
cn_file_object.write(target_morb_name[j] + ", " + str(predict_result[j]) + ", " + str(answer_qry) + "\n")
cn_file_object.close()
# not cluster label
result_file_path = join(output_base_dir, 'androd_result.csv')
file_object = open(result_file_path, 'w') # output file write
file_object.write("file, class\n")
for j in range(0, len(target_feature)):
answer_qry = answer_table[predict_result[j]]
file_object.write(target_morb_name[j] + ", " + str(answer_qry) + "\n")
if __name__ == '__main__':
opt_parser = OptionParser()
opt_parser.add_option(
'-t', '--train_file_path', dest='train_file',
help='csv input feature table.')
opt_parser.add_option(
'-f', '--test_file_path', dest='test_file',
help='csv input feature table.')
options, _ = opt_parser.parse_args()
if options.test_file is None or exists(options.test_file) is False:
opt_parser.print_help()
exit(-1)
if options.train_file is None or exists(options.train_file) is False:
opt_parser.print_help()
exit(-1)
if isabs(options.test_file) is False:
test_file_path = realpath(options.test_file)
else:
test_file_path = options.test_file
if isabs(options.train_file) is False:
train_file_path = realpath(options.train_file)
else:
train_file_path = options.train_file
main(train_file_path, test_file_path)
| true
|
691f93f1ea6b80af4c80f33ee41765a309558f70
|
Python
|
yeniferBarcoC/4.1-Miscelanea-de-Ciclos
|
/main.py
|
UTF-8
| 1,039
| 3.421875
| 3
|
[] |
no_license
|
""" Modulo Ciclos
Funciones para practicas con ciclos
Yenifer Barco Castrillón
junio 06-2021 """
#---------------- Zona librerias------------
import funciones_ciclos as fc
#======================================================================
# Algoritmo principal Punto de entrada a la aplicación (Conquistar)
# =====================================================================
#Llamado de la funcion de caida libre
altura = float(input("Por favor ingrese la altura:"))
fc.simulador_caida_libre(altura)
#Llamado de la fncion de generador de generaciones
generacion= int(input("\nIngrese el numero de la generación:"))
total_personas=fc.generador_generaciones(generacion)
print("Total de personas en la familia hasta ahora:",total_personas)
#Llamado de la fncion de constructor de triangilos
pisos = int(input("\nPor favor ingrese el número de pisos:"))
fc.constructor_triangulos(pisos)
#Llamado de la fncion de constructor de tableros
longitud=int(input("\nIngrese la longitud del tablero:"))
fc.constructor_tableros(longitud)
| true
|
25fecd00f53a74a28b55efd7c94528a4708ee556
|
Python
|
adriencances/ava_code
|
/pairs_generation/frame_processing.py
|
UTF-8
| 7,083
| 2.625
| 3
|
[] |
no_license
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math as m
from torchvision import transforms, utils
import cv2
import sys
import pickle
import tqdm
frames_dir = "/media/hdd/adrien/Ava_v2.2/correct_frames"
shots_dir = "/home/acances/Data/Ava_v2.2/final_shots"
tracks_dir = "/home/acances/Data/Ava_v2.2/tracks"
fbt_file = "/home/acances/Code/ava/frames_by_timestamp.csv"
class FrameProcessor:
def __init__(self):
self.nb_frames_by_timestamp = {}
with open(fbt_file, "r") as f:
for line in f:
vid_id, N = line.strip().split(",")
self.nb_frames_by_timestamp[video_id] = int(N)
def get_boxes(bboxes_file):
boxes = []
with open(bboxes_file, "r") as f:
for line in f:
box = list(map(float, line.strip().split(",")))[:-1]
boxes.append(box)
return boxes
def get_enlarged_box(box, alpha):
# Enlarge the box area by 100*alpha percent while preserving
# the center and the aspect ratio
beta = 1 + alpha
x1, y1, x2, y2 = box
dx = x2 - x1
dy = y2 - y1
x1 -= (np.sqrt(beta) - 1)*dx/2
x2 += (np.sqrt(beta) - 1)*dx/2
y1 -= (np.sqrt(beta) - 1)*dy/2
y2 += (np.sqrt(beta) - 1)*dy/2
return x1, y1, x2, y2
def get_preprocessed_frame(video_id, cat, t, n):
# t : timestamp index of the video
# n : frame index in the timestamp (frame indices start at 1)
frame_file = "{}/{}/{}/{:05d}/{:06d}.jpg".format(frames_dir, cat, video_id, t, n)
# frame : H * W * 3
frame = cv2.imread(frame_file)
# frame : 3 * W * H
frame = frame.transpose(2, 1, 0)
frame = torch.from_numpy(frame)
return frame
def get_processed_frame(frame, box, w, h, normalized_box=False):
# frame : 3 * W * H
# (w, h) : dimensions of new frame
C, W, H = frame.shape
x1, y1, x2, y2 = box
# If box is in normalized coords, i.e.
# image top-left corner (0,0), bottom-right (1, 1),
# then turn normalized coord into absolute coords
if normalized_box:
x1 = x1*W
x2 = x2*W
y1 = y1*H
y2 = y2*H
# Round coords to integers
X1 = max(0, m.floor(x1))
X2 = max(0, m.ceil(x2))
Y1 = max(0, m.floor(y1))
Y2 = max(0, m.ceil(y2))
dX = X2 - X1
dY = Y2 - Y1
# Get the cropped bounding box
boxed_frame = transforms.functional.crop(frame, X1, Y1, dX, dY)
dX, dY = boxed_frame.shape[1:]
# Compute size to resize the cropped bounding box to
if dY/dX >= h/w:
w_tild = m.floor(dX/dY*h)
h_tild = h
else:
w_tild = w
h_tild = m.floor(dY/dX*w)
assert w_tild <= w
assert h_tild <= h
# Get the resized cropped bounding box
resized_boxed_frame = transforms.functional.resize(boxed_frame, [w_tild, h_tild])
# Put the resized cropped bounding box on a gray canvas
new_frame = 127*torch.ones(C, w, h)
i = m.floor((w - w_tild)/2)
j = m.floor((h - h_tild)/2)
new_frame[:, i:i+w_tild, j:j+h_tild] = resized_boxed_frame
return new_frame
def nb_frames_per_timestamp(video_id):
with open(fbt_file, "r") as f:
for line in f:
vid_id, N = line.strip().split(",")
if video_id == vid_id:
return int(N)
print("WARNING: no information for video id {} in fbt_file".format(video_id))
return None
def get_tracks(video_id, cat, shot_id):
tracks_file = "{}/{}/{}/{:05d}_tracks.pkl".format(tracks_dir, cat, video_id, shot_id)
with open(tracks_file, "rb") as f:
tracks = pickle.load(f)
return tracks
def get_extreme_timestamps(video_id, cat, shot_id):
shots_file = "{}/{}/shots_{}.csv".format(shots_dir, cat, video_id)
with open(shots_file, "r") as f:
for i, line in enumerate(f):
if i == shot_id:
start, end = line.strip().split(",")
t1, n1 = tuple(map(int, start.split("_")))
t2, n2 = tuple(map(int, end.split("_")))
return t1, t2
print("WARNING: no shot of index {} for video {}".format(shot_id, video_id))
return None
def get_processed_track_frames(video_id, cat, track, t1, t2, begin_frame, end_frame, w, h, alpha, normalized_box=False):
# begin_frame, end_frame : indices in [0, (t2-t1+1)N - 1]
# t1, t2 : first and last timestamps (included) corresponding to the shot to which the track belongs
N = nb_frames_per_timestamp(video_id)
b = int(track[0, 0])
processed_frames = []
for i in range(begin_frame, end_frame):
t = t1 + i//N
n = i%N + 1
frame = get_preprocessed_frame(video_id, cat, t, n)
track_frame_index = i - b
box = track[track_frame_index][1:5]
box = get_enlarged_box(box, alpha)
processed_frame = get_processed_frame(frame, box, w, h, normalized_box)
processed_frames.append(processed_frame)
processed_frames = torch.stack(processed_frames, dim=0)
return processed_frames
def get_frames(video_id, cat, shot_id, track_id, begin_frame, end_frame):
# shot_id : 0-based index.
# track_id : 0-based index.
# begin_frame, end_frame : indices between 0 and (t2-t1+1)N - 1,
# where t1 and t2 are the first and last (included) timestamps for the considered shot,
# and where N is the number of frames per timestamp for the considered video.
# Warning: end_frame is the index of the first frame not included
# Use dictionary such that d[video_id][shot_id] = (t1, t2)
t1, t2 = None, None
# Use dictionary such that d[video_id] = N
N = None
frames = []
for i in range(begin_frame, end_frame):
t = t1 + i//N
n = i%N + 1
frame = get_preprocessed_frame(video_id, cat, t, n)
frames.append(frame)
tracks_file = "{}/{}/{}/{:05d}_tracks.pkl ".format(tracks_dir, cat, video_id, shot_id)
tracks = get_tracks(video_id, cat, shot_id)
track, score = tracks[track_id]
b = int(track[0, 0])
boxes = track[begin_frame - b:ending_frame - b, 1:5]
assert len(boxes) == len(frames)
def print_out_processed_frames(processed_frames):
target_dir = "/home/acances/Code/ava/various"
nb_frames = processed_frames.shape[0]
for i in range(nb_frames):
frame = processed_frames[i].numpy().transpose(2, 1, 0)
target_file = "{}/{:05d}.jpg".format(target_dir, i + 1)
cv2.imwrite(target_file, frame)
if __name__ == "__main__":
tracks_file = sys.argv[1]
shot_id = int(tracks_file.split("/")[-1].split("_")[0])
video_id = tracks_file.split("/")[-2]
cat = tracks_file.split("/")[-3]
tracks = get_tracks(video_id, cat, shot_id)
t1, t2 = get_extreme_timestamps(video_id, cat, shot_id)
track, score = tracks[0]
begin_frame = int(track[0, 0])
end_frame = int(track[-1, 0])
w, h = 224, 224
alpha = 0.1
processed_frames = get_processed_track_frames(video_id, cat, track, t1, t2, begin_frame, end_frame, w, h, alpha)
print_out_processed_frames(processed_frames)
print(processed_frames.shape)
| true
|
3c38b95052574f92095e3bd5b92ce52a1f4f7b6f
|
Python
|
lpdonofrio/D10
|
/presidents.py
|
UTF-8
| 3,095
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
# Exercise: Presidents
# Author GitHub usernames:
# #1: lpdonofrio
# #2: nishapathak
# Instructions:
# Write a program to:
# (1) Load the data from presidents.txt into a dictionary.
# (2) Print the years the greatest and least number of presidents were alive.
# (between 1732 and 2016 (inclusive))
# Ex.
# 'least = 2015'
# 'John Doe'
# 'most = 2015'
# 'John Doe, Jane Doe, John Adams, and Jane Adams'
# Bonus: Confirm there are no ties. If there is a tie print like so:
# Ex.
# 'least = 1900, 2013-2015'
# 'John Doe (1900)'
# 'Jane Doe (2013-2015)'
# 'most = 1900-1934, 2013'
# 'John Doe, Jane Doe, John Adams, and Jane Adams (1900-1933)'
# 'Sally Doe, Billy Doe, Mary Doe, and Cary Doe (1934)'
# 'Alice Doe, Bob Doe, Zane Doe, and Yi Do (2013)'
# (3) Write your print statements to a file (greatest_least.txt) as well.
# Upload that file as well.
##############################################################################
# Imports
# Body
def load_data():
with open("presidents.txt", "r") as file:
lines = file.read().splitlines()
dictionary = {}
for line in lines:
items_list = line.split(",")
if items_list[2] == "None":
items_list[2] = "2016"
dictionary[items_list[0]] = (items_list[1], items_list[2])
return dictionary
def years_alive():
dictionary = load_data()
for key, value in dictionary.items():
list_years = []
for n in range(int(value[0]), (int(value[1])+1)):
list_years.append(n)
dictionary[key] = list_years
return dictionary
def count_years():
dictionary = years_alive()
years_counter = {}
for key, value in dictionary.items():
for n in value:
if years_counter.__contains__(n):
years_counter[n] +=1
else:
years_counter[n] = 1
return years_counter
def greatest_least():
dictionary = count_years()
sorted_years = sorted(dictionary, key = dictionary.__getitem__)
greatest = sorted_years[-1]
least = sorted_years[0]
presidents_greatest = []
presidents_least = []
dic_names_years = years_alive()
for key, item in dic_names_years.items():
if greatest in item:
presidents_greatest.append(key)
if least in item:
presidents_least.append(key)
presidents_least_str = ", ".join(presidents_least)
presidents_greatest_str = ", ".join(presidents_greatest)
print("Least = {}".format(least))
print(str(presidents_least))
print("Greatest = {}".format(greatest))
print(presidents_greatest)
with open("greatest_least.txt", "w") as fout:
fout.write("Least = {}\n".format(least) + presidents_least_str + "\n" +
"Greatest = {}\n".format(greatest) + presidents_greatest_str + "\n")
# Is there a more concise way of writing lines 77 to 85?
##############################################################################
def main(): # CALL YOUR FUNCTION BELOW
greatest_least()
if __name__ == '__main__':
main()
| true
|
f2eca029ee7fe0486ebc4bd436fc80ffa9045397
|
Python
|
snugfox/finfast
|
/finfast_torch/analyze/metrics_kernels.py
|
UTF-8
| 2,449
| 2.546875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
import torch
def beta(rp: torch.Tensor, rb: torch.Tensor) -> torch.Tensor:
rp_cent = rp - torch.mean(rp, dim=1, keepdim=True)
rb_cent = rb - torch.mean(rb, dim=1, keepdim=True)
rb_var = torch.mean(torch.square(rb_cent), dim=1, keepdim=True)
cov = (rp_cent @ rb_cent.T) / rp.shape[1]
return cov / rb_var.T
def alpha(rp: torch.Tensor, rb: torch.Tensor, rf: torch.Tensor) -> torch.Tensor:
return (
(rf - torch.mean(rb, dim=1, keepdim=True)).T * beta(rp, rb)
+ torch.mean(rp, dim=1, keepdim=True)
- rf
)
def sharpe(rp: torch.Tensor, rf: torch.Tensor) -> torch.Tensor:
rp_std, rp_mean = torch.std_mean(rp, dim=1, unbiased=False, keepdim=True)
return (rp_mean - rf) / rp_std
def treynor(rp: torch.Tensor, rb: torch.Tensor, rf: torch.Tensor) -> torch.Tensor:
return (torch.mean(rp, dim=1, keepdim=True) - rf) / beta(rp, rb)
def sortino(rp: torch.Tensor, rf: torch.Tensor) -> torch.Tensor:
zero = torch.zeros((), dtype=rp.dtype, device=rp.device)
return (torch.mean(rp, dim=1, keepdim=True) - rf) / torch.std(
torch.minimum(rp, zero), dim=1, unbiased=False, keepdim=True
)
def tracking_error(rp: torch.Tensor, rb: torch.Tensor) -> torch.Tensor:
rp_expanded = torch.unsqueeze(rp, 1)
rb_expanded = torch.unsqueeze(rb, 0)
return torch.std(rp_expanded - rb_expanded, dim=2, unbiased=False)
def information(rp: torch.Tensor, rb: torch.Tensor) -> torch.Tensor:
eps = torch.finfo(rp.dtype).tiny
return (
torch.mean(rp, dim=1, keepdim=True) - torch.mean(rb, dim=1, keepdim=True).T
) / (tracking_error(rp, rb) + eps)
def up_capture(rp: torch.Tensor, rb: torch.Tensor) -> torch.Tensor:
rp_expanded = torch.unsqueeze(rp, 1)
rb_expanded = torch.unsqueeze(rb, 0)
up_mask = rb_expanded > 0
return torch.sum((up_mask * rp_expanded) / rb_expanded, dim=2) / (
torch.count_nonzero(up_mask, dim=2)
)
def down_capture(rp: torch.Tensor, rb: torch.Tensor) -> torch.Tensor:
rp_expanded = torch.unsqueeze(rp, 1)
rb_expanded = torch.unsqueeze(rb, 0)
down_mask = rb_expanded < 0
return torch.sum((down_mask * rp_expanded) / rb_expanded, dim=2) / (
torch.count_nonzero(down_mask, dim=2)
)
def capture(rp: torch.Tensor, rb: torch.Tensor) -> torch.Tensor:
rp_expanded = torch.unsqueeze(rp, 1)
rb_expanded = torch.unsqueeze(rb, 0)
return torch.mean(rp_expanded / rb_expanded, dim=2)
| true
|
64e5cda360046490b4dbc665a94d4729e55dda2a
|
Python
|
kembo-net/cropper.py
|
/cropper.py
|
UTF-8
| 1,455
| 3.15625
| 3
|
[] |
no_license
|
import sys, os, re
from PIL import Image
desc = "1行目に画像ファイルがあるディレクトリ、\n" \
"2行目に画像ファイル名(拡張子含)を表す正規表現、\n" \
"3行目以降に画像を切り出す座標を書いてください。\n" \
"座標は矩形1つ毎に左上x座標, 左上y座標, 右下x座標, 右下y座標を半角カンマ区切りで入れてください。\n" \
"ファイルの入出力は全てJPEGを前提にしています。"
args = sys.argv
if len(args) == 1 or args[1] in {'-h', 'help'}:
print(desc)
else :
with open(args[1], 'r') as f:
#ディレクトリ名
dirc = f.readline().rstrip()
os.chdir(dirc)
#ファイル名
ptrn = re.compile(f.readline().rstrip())
#ファイル名の一覧
pic_names = [name for name in os.listdir(dirc) if ptrn.match(name)]
#矩形の座標
def convert_pos(text) :
return tuple(int(x) for x in text.split(','))
areas = [convert_pos(line) for line in f]
for pname in pic_names :
img = Image.open(pname, 'r')
for i, area in enumerate(areas) :
dir_name = str(i)
if not os.path.exists(dir_name) :
os.mkdir(dir_name)
new_img = img.crop(area)
new_img.save(dir_name + '/' + pname, 'JPEG', quality=100, optimize=True)
| true
|
12284d7b1c2f6597af817b4b6382858f28b668a7
|
Python
|
2020-A-Python-GR1/py-roman-cabrera-bolivar-andres
|
/proyecto - scrapy 2B/movies/movies/spiders/movie_spyder.py
|
UTF-8
| 2,639
| 2.71875
| 3
|
[] |
no_license
|
import scrapy
import pandas as pd
import numpy as np
import re
class MovieCrawl(scrapy.Spider):
name = 'movie_spyder'
urls = []
size_page = np.arange(1,1000,50)
for num in size_page:
urls.append('https://www.imdb.com/search/title/?groups=top_1000&start={num}'.format(num=num))
m_name = []
m_year = []
m_rated = []
m_duration = []
m_genre = []
m_rating = []
m_metascore = []
m_director = []
m_votes = []
def start_requests(self):
for url in self.urls:
yield scrapy.Request(url=url)
def parse(self, response):
movies_list = response.css('div.lister-item')
for movie in movies_list:
name = movie.css('h3 > a::text').extract_first()
year = movie.css('h3 > span.lister-item-year::text').extract_first()
rated = movie.css('p.text-muted > span.certificate::text').extract_first()
duration = movie.css('p.text-muted > span.runtime::text').extract_first()
genre = movie.css('p.text-muted > span.genre::text').extract_first()
rating = movie.css('div.ratings-bar > div.inline-block::attr(data-value)').extract_first()
metascore = movie.css('div.ratings-bar > div.inline-block > span.metascore::text').extract_first()
director = movie.css('p > a::text').extract_first()
votes = movie.css('p.sort-num_votes-visible > span::attr(data-value)').extract_first()
self.m_name.append(name)
self.m_year.append(re.sub('[(\D)]', '', str(year)))
self.m_rated.append(rated)
self.m_genre.append(str(genre).split(',')[0].strip('\n').strip())
self.m_duration.append(str(duration).strip('min '))
self.m_rating.append(rating)
if str(metascore).strip() == 'None':
self.m_metascore.append(0)
else:
self.m_metascore.append(int(str(metascore).strip()))
self.m_director.append(director)
self.m_votes.append(votes)
def close(self, reason):
df = pd.DataFrame({
'name' : pd.Series(self.m_name),
'year' : pd.Series(self.m_year),
'rated' : pd.Series(self.m_rated),
'duration_min' : pd.Series(self.m_duration),
'genre' : pd.Series(self.m_genre),
'rating' : pd.Series(self.m_rating),
'metascore' : pd.Series(self.m_metascore),
'director' : pd.Series(self.m_director),
'votes' : pd.Series(self.m_votes)
})
df.to_csv('data.csv', index = False, encoding='utf-8')
| true
|
618d0a7ff5d393ad104b73dae22cf12457558d4d
|
Python
|
zszzlmt/leetcode
|
/solutions/1128.py
|
UTF-8
| 872
| 2.84375
| 3
|
[] |
no_license
|
class Solution:
def numEquivDominoPairs(self, dominoes: List[List[int]]) -> int:
from collections import defaultdict
cluster_to_values = defaultdict(set)
cluster_to_idxs = defaultdict(list)
for idx in range(len(dominoes)):
i, j = dominoes[idx]
for idxx in cluster_to_values:
if (i, j) in cluster_to_values[idxx]:
cluster_to_idxs[idxx].append(idx)
break
else:
idxx = len(cluster_to_idxs)
cluster_to_idxs[idxx].append(idx)
cluster_to_values[idxx].add((i, j))
cluster_to_values[idxx].add((j, i))
result = 0
for idx_list in cluster_to_idxs.values():
result += (len(idx_list) - 1) * (len(idx_list)) / 2
result = int(result)
return result
| true
|
59cdfc7aa116754f904476caf979060376952f31
|
Python
|
mklomo/school_administration_project
|
/course.py
|
UTF-8
| 1,923
| 3.375
| 3
|
[] |
no_license
|
"""
This script implements the course Class
"""
from professor import Professor
from enrol import Enrol
class Course:
"""
_min_number_of_students : int
_max_number_of_students : int
_course_code : int
_start : date
_end : date
_name : str
_semester : int
is_cancelled() : boolean
get_num_students_enrolled() : int
"""
def __init__(self, min_number_of_students, max_number_of_students, course_code, start_date,
end_date, name, professor):
self._min_number_of_students = min_number_of_students
self._max_number_of_students = max_number_of_students
self._course_code = course_code
self._start_date = start_date
self._end_date = end_date
self._name = name
self._enrollments = []
self._professors = []
if isinstance(professor, Professor):
self._professors.append(professor)
elif isinstance(professor, list):
for entry_professor in professor:
if not isinstance(entry_professor, Professor):
raise TypeError("Invalid Professor...")
self._professors.append(entry_professor)
else:
raise TypeError("Invalid Professor..")
def is_cancelled(self):
return len(self._enrollments) < self._min_number_of_students
def add_professor(self, professor):
if not isinstance(professor, Professor):
raise TypeError("Invalid Professor Entry...")
else:
self._professors.append(professor)
def enroll_course(self, enrol):
if not isinstance(enrol, Enrol):
raise TypeError("Invalid Enroll")
if len(self._enrollments) == self._max_number_of_students:
raise RuntimeError("Can not enroll in course, course is full..")
self._enrollments.append(enrol)
def get_enrollment_numbers(self):
return len(self._enrollments)
| true
|
15e3dafa46fdb66062d04b0ed449b89df976b165
|
Python
|
mincloud1501/Python
|
/Data_Analytics_Pandas/gonggongInfoAnalysis.py
|
UTF-8
| 3,635
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
import os
import sys
import urllib.request
import datetime
import time
import json
import math
# https://www.data.go.kr/
# 관광자원통계서비스
def get_request_url(url):
req = urllib.request.Request(url)
try:
response = urllib.request.urlopen(req)
if response.getcode() == 200:
#print("[%s] Url Request Success" % datetime.datetime.now())
return response.read().decode('utf-8')
except Exception as e:
print(e)
print("[%s] Error for URL : %s" % (datetime.datetime.now(), url))
return None
# 유료 관광지 방문객수 조회
def getTourPointVisitor(yyyymm, sido, gungu, nPagenum, nItems):
end_point = "http://openapi.tour.go.kr/openapi/service/TourismResourceStatsService/getPchrgTrrsrtVisitorList"
parameters = "?_type=json&serviceKey=" + access_key
parameters += "&YM=" + yyyymm
parameters += "&SIDO=" + urllib.parse.quote(sido)
parameters += "&GUNGU=" + urllib.parse.quote(gungu)
parameters += "&RES_NM=&pageNo=" + str(nPagenum)
parameters += "&numOfRows=" + str(nItems)
url = end_point + parameters
retData = get_request_url(url)
if (retData == None):
return None
else:
return json.loads(retData)
# JSON format 정의
def getTourPointData(item, yyyymm, jsonResult):
addrCd = 0 if 'addrCd' not in item.keys() else item['addrCd'] # 지역코드(우편번호와 일치하지 않음)
gungu = '' if 'gungu' not in item.keys() else item['gungu']
sido = '' if 'sido' not in item.keys() else item['sido']
resNm = '' if 'resNm' not in item.keys() else item['resNm']
rnum = 0 if 'rnum' not in item.keys() else item['rnum'] # 관광지에 고유 부여된 코드 값
ForNum = 0 if 'csForCnt' not in item.keys() else item['csForCnt'] # 외국인 방문객수
NatNum = 0 if 'csNatCnt' not in item.keys() else item['csNatCnt'] # 내국인 방문객수
jsonResult.append({'yyyymm': yyyymm, 'addrCd': addrCd,
'gungu': gungu, 'sido': sido, 'resNm': resNm,
'rnum': rnum, 'ForNum': ForNum, 'NatNum': NatNum})
return
def main():
jsonResult = []
sido = '서울특별시'
gungu = ''
nPagenum = 1
nTotal = 0
nItems = 100
nStartYear = 2011
nEndYear = 2016
for year in range(nStartYear, nEndYear):
for month in range(1, 13):
yyyymm = "{0}{1:0>2}".format(str(year), str(month))
nPagenum = 1
# [CODE 3]
while True:
jsonData = getTourPointVisitor(yyyymm, sido, gungu, nPagenum, nItems)
if (jsonData['response']['header']['resultMsg'] == 'OK'):
nTotal = jsonData['response']['body']['totalCount']
if nTotal == 0:
break
for item in jsonData['response']['body']['items']['item']:
getTourPointData(item, yyyymm, jsonResult)
nPage = math.ceil(nTotal / 100)
if (nPagenum == nPage):
break
nPagenum += 1
else:
break
with open('%s_관광지입장정보_%d_%d.json' % (sido, nStartYear, nEndYear - 1), 'w', encoding='utf8') as outfile:
retJson = json.dumps(jsonResult,
indent=4, sort_keys=True,
ensure_ascii=False)
outfile.write(retJson)
print(retJson)
print('%s_관광지입장정보_%d_%d.json SAVED' % (sido, nStartYear, nEndYear - 1))
if __name__ == '__main__':
main()
| true
|
e11a6dc349084a05f5b5ee2b582f729dd3f8bc33
|
Python
|
brodri4/LearningPython
|
/duplicate.py
|
UTF-8
| 284
| 3.171875
| 3
|
[] |
no_license
|
def duplicate_remove(array):
return list(dict.fromkeys(array))
answer = duplicate_remove([1,2,3,4,4])
answer2 = duplicate_remove([1,2,3,4,5])
answer3 = duplicate_remove([1,2,1,2,4])
answer4 = duplicate_remove([2,2,2,2])
print(answer)
print(answer2)
print(answer3)
print(answer4)
| true
|
bf8ed23ddf2cf8004686f6955bff919baf4e5760
|
Python
|
fswzb/sensequant
|
/ml_model.py
|
UTF-8
| 3,889
| 2.609375
| 3
|
[] |
no_license
|
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Activation, Dropout
from keras.regularizers import l2, l1
import pandas as pd
import numpy as np
from sklearn import linear_model, preprocessing
from sklearn.metrics import classification_report
import configure
RESULT_DIR = configure.result_dir
TRAIN_SET = configure.cache_dir + configure.cache_train_set
TEST_SET = configure.cache_dir + configure.cache_test_set
REPORT_FILE = configure.result_dir + configure.result_report
PREDICT_NN_FILE = configure.result_dir + configure.result_NN_predict_file
PREDICT_LR_FILE = configure.result_dir + configure.result_LR_predict_file
class ALGORITHM():
def __init__(self):
return
def prepare_data(self, trainFname=TRAIN_SET, testFname=TEST_SET):
trainData = np.loadtxt(trainFname)
testData = np.loadtxt(testFname)
(X_train, Y_train) = (trainData[:, :-1], trainData[:, -1])
(X_test, Y_test) = (testData[:, :-1], testData[:, -1])
return (X_train, Y_train, X_test, Y_test)
def preprocess_X(self, X):
return preprocessing.scale(X)
def preprocess_Y(self, Y):
Y_ = np.zeros((len(Y), 3))
msk1 = Y==0
msk2 = Y==1
msk3 = Y==2
Y_[msk1, 0] = 1
Y_[msk2, 1] = 1
Y_[msk3, 2] = 1
return Y_
def train(self, X_train, Y_train, X_test, iter_):
'''
output: predicted class: 0, 1, 2
'''
inputs = Input(shape=(12,))
x1 = Dense(96, activation='relu', W_regularizer=l1(0.01))(inputs)
x2 = Dense(96, activation='relu', W_regularizer=l1(0.01))(x1)
#drop = Dropout(0.2)(x)
prediction = Dense(3, activation='relu', W_regularizer=l1(0.01))(x2)
model = Model(input=inputs, output=prediction)
model.compile(optimizer='adagrad',
loss='poisson')
model.fit(X_train, Y_train, nb_epoch=iter_, batch_size=100)
pred = model.predict(X_test)
return (np.argmax(pred, axis=1),
np.max(pred, axis=1))
def benchmark(self, X_train, Y_train, X_test):
'''
output: predicted class: -1, 0, 1
'''
lr = linear_model.LogisticRegression()
model = lr.fit(X_train, Y_train)
return (model.predict(X_test),
np.max(model.predict_proba(X_test), axis=1))
def evaluate(self, Y_pred, Y_true, method, fname=REPORT_FILE):
if method != 'NN' and method != 'LR':
return ValueError('method just can be either NN or LR')
with open(fname, 'w+') as f:
f.write(\
method\
+ ':\n'\
+ classification_report(Y_pred, Y_true)\
+ '\n')
msk = Y_pred == Y_true
return msk.cumsum()[-1]/len(msk)
def combine_to_df(self, class_, prob):
return pd.DataFrame({'class_': class_, 'prob': prob})
def run(self, iter_, folder=RESULT_DIR):
X_train, Y_train, X_test, Y_test = self.prepare_data()
X_train_scale, X_test_scale = (self.preprocess_X(X_train), self.preprocess_X(X_test))
Y_train_matrix, Y_test_matrix = (self.preprocess_Y(Y_train), self.preprocess_Y(Y_test))
predNN = self.train(X_train_scale, Y_train_matrix, X_test_scale, iter_)
predLR = self.benchmark(X_train_scale, Y_train, X_test_scale)
self.combine_to_df(predNN[0], predNN[1])\
.to_csv(PREDICT_NN_FILE, index=False)
self.combine_to_df(predLR[0], predLR[1])\
.to_csv(PREDICT_LR_FILE, index=False)
accNN = self.evaluate(predNN[0], np.argmax(Y_test_matrix, axis=1), 'NN')
accLR = self.evaluate(predLR[0], Y_test, 'LR')
print ('NN accuracy: ', accNN)
print ('LR accuracy: ', accLR)
return
| true
|
45b5925cfc38c86c8ffd2364af88ddc91517e20b
|
Python
|
sampoprock/GeeksforGeeks-leetcode
|
/primalitytest.py
|
UTF-8
| 1,418
| 4.15625
| 4
|
[] |
no_license
|
# Primality Test
# For a given number N check if it is prime or not. A prime number is a number which is only divisible by 1 and itself.
# Input:
# First line contains an integer, the number of test cases 'T'. T testcases follow. Each test case should contain a positive integer N.
# Output:
# For each testcase, in a new line, print "Yes" if it is a prime number else print "No".
# Your Task:
# This is a function problem. You just need to complete the function isPrime that takes N as parameter and returns True if N is prime else returns false. The printing is done automatically by the driver code.
# Expected Time Complexity : O(N1/2)
# Expected Auxilliary Space : O(1)
# Constraints:
# 1 <= T <= 100
# 1 <= N <= 109
# Example:
# Input:
# 2
# 5
# 4
# Output:
# Yes
# No
#User function Template for python3
##Complete this function
def isPrime(N):
#Your code here
if(N<2):
return False
for i in range(2,int(math.sqrt(N))+1):
if(N%i==0):
return False
return True
#{
#Driver Code Starts.
def main():
T=int(input())
while(T>0):
N=int(input())
if(isPrime(N)):
print("Yes")
else:
print("No")
T-=1
if __name__=="__main__":
main()
#} Driver Code Ends
| true
|
d92548ff195ff80a6130530b36bb2b9f7cba5154
|
Python
|
shiv125/Competetive_Programming
|
/codechef/snack17/prob3_upd.py
|
UTF-8
| 2,572
| 2.625
| 3
|
[] |
no_license
|
#import timeit
#start = timeit.default_timer()
#corr=open("algo.txt","w+")
def binarysearch(arr,target):
low=0
high=len(arr)-1
while low<=high:
mid=(high+low)/2
val=arr[mid]
if target<=arr[low]:
return low
if target>arr[high]:
return -1
if target==val:
return mid
elif target>val:
low=mid+1
else:
if mid-1>=low and target>arr[mid-1]:
return mid
else:
high=mid-1
def search(low,high,index):
while low<=high and high<index and high>=0:
mid=(high+low)/2
if low>=zfun(low,index,ki):
return low
h=zfun(high,index,ki)
if high<h:
return -1
zmid=zfun(mid,index,ki)
if mid==zmid:
return mid
elif mid<zmid:
low=mid+1
else:
if mid-1>=low and mid-1<zfun(mid-1,index,ki):
return mid
else:
high=mid-1
def zfun(i,index,ki):
return (index-i)*ki-(dp[i]-dp[index])
def fun(arr,n,ki):
i=binarysearch(arr,ki)
temp=0
if i==-1:
count=0
i=n
else:
count=n-i
temp=search(i/2,i-1,i)
if temp==-1:
return count
return count+i-temp
'''
with open('testcases.txt',"r") as f:
inp=[]
for line in f:
inp.append(line)
inp=[x.strip() for x in inp]
asa=len(inp)
z=0
t=10**4
while z<asa:
n,q=map(int,inp[z].split())
z+=1
arr=map(int,inp[z].split())
z+=1
arr.sort()
dp=[0]*(n+1)
dp[n-1]=arr[n-1]
count=0
lookup={}
for r in range(n-1,0,-1):
dp[r-1]=arr[r-1]+dp[r]
starter=[0]*n
for i in range(1,n):
ki=arr[i]
starter[i]=search(i/2,i-1,i)
for m in range(q):
count=0
ki=int(inp[z])
tus=binarysearch(arr,ki)
temp=0
if tus==-1:
count=0
tus=n
else:
count=n-tus
if tus!=0:
tun=starter[tus-1]
temp=search(tun,tus-1,tus)
#temp=search(tun,tus-1,tus)
if temp!=-1 and tus!=0:
count=count+tus-temp
corr.write(str(count)+"\n")
z+=1
'''
t=input()
for i in range(t):
n,q=map(int,raw_input().split())
arr=map(int,raw_input().split())
arr.sort()
dp=[0]*(n+1)
dp[n-1]=arr[n-1]
for r in range(n-1,0,-1):
dp[r-1]=arr[r-1]+dp[r]
starter=[0]*n
for i in range(1,n):
ki=arr[i]
starter[i]=search(i/2,i-1,i)
for m in range(q):
count=0
ki=input()
tus=binarysearch(arr,ki)
temp=0
if tus==-1:
count=0
tus=n
else:
count=n-tus
while tus>starter[tus-1]:
temp+=ki-arr[tus-1]
if tus-1<temp:
break
else:
count+=1
i-=1
print count
'''
temp=0
if tus==-1:
count=0
tus=n
else:
count=n-tus
if tus!=0:
tun=starter[tus-1]
temp=search(tun,tus-1,tus)
#temp=search(tun,tus-1,tus)
if temp!=-1 and tus!=0:
count=count+tus-temp
print count
'''
#stop = timeit.default_timer()
#print stop-start
| true
|
c299daf9e32cf705d4760f25f7a27421b50714bb
|
Python
|
DianaQuintero459/CP-D
|
/test.py
|
UTF-8
| 1,082
| 2.796875
| 3
|
[] |
no_license
|
# Estudiante: Diana Carolina Quintero Bedoya
# Correo: diana.quintero01@correo.usa.edu.co
# Carrera: Ciencias de la computación e Inteligencia Artificial
# Fecha: 29 abril 2021
# Ultima Modificación: 5 mayo 2021
# Docente: John Corredor Pdh
# Materia: Computación paralela y distrribuida
# Universidad Sergio Arboleda
#
######### Rendimiento Cython/Python ##########
#
import functionE
import CyfunctionE
import numpy as np
import time
def execute(D, N, X, beta, tetha):
initial = time.time()
functionE.rbf_network(X, beta, tetha)
tiempoPy = time.time() - initial
initial = time.time()
CyfunctionE.rbf_network(X, beta, tetha)
tiempoCy = time.time() - initial
SpeedUp = round(tiempoPy/tiempoCy, 3)
print("tiempo Py: {}\n".format(tiempoPy))
print("tiempo Cy: {}\n".format(tiempoCy))
print("SpeedUp: {}\n".format(SpeedUp))
N = 1500
beta = np.random.rand(N)
tetha = 10
D = 6
X = np.array([np.random.rand(N) for d in range(D)]).T
execute(D, N, X, beta, tetha)
D = 60
X = np.array([np.random.rand(N) for d in range(D)]).T
execute(D, N, X, beta, tetha)
| true
|
dbf738d60fc66a34d32ccc15f3ba13413c36f665
|
Python
|
webclinic017/AF5353
|
/Multi-Factor_Model_Regression .py
|
UTF-8
| 3,678
| 3.265625
| 3
|
[] |
no_license
|
### Step 1. Import the libraries:
import pandas as pd
import yfinance as yf
import statsmodels.formula.api as smf
import pandas_datareader.data as web
### Step 2. Specify the risky asset and the time horizon:
RISKY_ASSET = 'GOOG'
START_DATE = '2010-01-01'
END_DATE = '2020-12-31'
### Step 3. Download the data of the risky asset from Yahoo Finance and Calculate the monthly returns:
asset_df = yf.download(RISKY_ASSET, start=START_DATE, end=END_DATE, adjusted=True, progress=False)
y = asset_df['Adj Close'].resample('M').last().pct_change().dropna()
y.index = y.index.strftime('%Y-%m')
y.name = 'return'
### Step 4. Download the risk factors from prof. French's website:
# three factors
df_three_factor = web.DataReader('F-F_Research_Data_Factors', 'famafrench', start=START_DATE, end=END_DATE)[0]
df_three_factor.index = df_three_factor.index.format()
# momentum factor
df_mom = web.DataReader('F-F_Momentum_Factor', 'famafrench', start=START_DATE, end=END_DATE)[0]
df_mom.index = df_mom.index.format()
### Step 5. Merge the datasets for the four-factor model:
# join all datasets on the index
four_factor_data = df_three_factor.join(df_mom).join(y)
# rename columns
four_factor_data.columns = ['mkt', 'smb', 'hml', 'rf', 'mom', 'rtn']
# divide everything (except returns) by 100
four_factor_data.loc[:, four_factor_data.columns != 'rtn'] /= 100
# calculate excess returns of risky asset
four_factor_data['excess_rtn'] = four_factor_data.rtn - four_factor_data.rf
### Step 6. Run the regression to estimate alpha and beta
# one-factor model (CAPM):
one_factor_model = smf.ols(formula='excess_rtn ~ mkt', data=four_factor_data).fit()
print(one_factor_model.summary())
# three-factor model:
three_factor_model = smf.ols(formula='excess_rtn ~ mkt + smb + hml', data=four_factor_data).fit()
print(three_factor_model.summary())
# four-factor model:
four_factor_model = smf.ols(formula='excess_rtn ~ mkt + smb + hml + mom', data=four_factor_data).fit()
print(four_factor_model.summary())
#############################
### For five-factor model ###
#############################
### Step 1. Download the risk factors from prof. French's website:
# five factors
df_five_factor = web.DataReader('F-F_Research_Data_5_Factors_2x3', 'famafrench', start=START_DATE, end=END_DATE)[0]
df_five_factor.index = df_five_factor.index.format()
### Step 2. Merge the datasets for the five-factor model:
# join all datasets on the index
five_factor_data = df_five_factor.join(y)
# rename columns
five_factor_data.columns = ['mkt', 'smb', 'hml', 'rmw', 'cma', 'rf', 'rtn']
# divide everything (except returns) by 100
five_factor_data.loc[:, five_factor_data.columns != 'rtn'] /= 100
# calculate excess returns
five_factor_data['excess_rtn'] = five_factor_data.rtn - five_factor_data.rf
### Step 3. Estimate the five-factor model:
five_factor_model = smf.ols(formula='excess_rtn ~ mkt + smb + hml + rmw + cma', data=five_factor_data).fit()
print(five_factor_model.summary())
'''
RMW (Robust Minus Weak)
Average return on the robust operating profitability portfolios minus the average return on the weak operating profitability portfolios
OP for June of year t is (OP minus interest expense) / book equity for the last fiscal year end in t-1.
The OP breakpoints are the 30th and 70th NYSE percentiles.
CMA (Conservative Minus Aggressive)
Average return on the conservative investment portfolios minus the average return on the aggressive investment portfolios
Investment is the change in total assets from the fiscal year ending in year t-2 to the fiscal year ending in t-1, divided by t-2 total assets.
The Inv breakpoints are the 30th and 70th NYSE percentiles.
'''
| true
|
c2d540c4c3e0b25d450203bad6aef097e0c078b7
|
Python
|
OScott19/TheMulQuaBio
|
/code/cfexercises2.py
|
UTF-8
| 683
| 4.34375
| 4
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
# What does each of fooXX do?
def foo1(x):
return x ** 0.5
def foo2(x, y):
if x > y:
return x
return y
def foo3(x, y, z):
if x > y:
tmp = y
y = x
x = tmp
if y > z:
tmp = z
z = y
y = tmp
return [x, y, z]
def foo4(x):
result = 1
for i in range(1, x + 1):
result = result * i
return result
def foo5(x): # a recursive function that calculates the factorial of x
if x == 1:
return 1
return x * foo5(x - 1)
def foo6(x): # Calculate the factorial of x in a different way
facto = 1
while x >= 1:
facto = facto * x
x = x - 1
return facto
| true
|
2b4630ec566f229b9b76191acc3a7c93c2660556
|
Python
|
bgschiller/country-bounding-boxes
|
/country_bounding_boxes/__init__.py
|
UTF-8
| 5,691
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import iso3166
import json
from country_bounding_boxes.generated import countries
# The naturalearth dataset we're using contains "subunits" of a variety of
# forms; Some are "full sized" countries, some are historically or
# politically significant divisions within the country (eg. Scotland and
# Wales in the UK), some are physically disjoint components of countries
# (eg. Alaska) and some are islands, dependencies, overseas departments,
# or similar special cases. As a result, we return a _set_ of countries
# for each iso code.
_iso_2_cache = {}
_iso_3_cache = {}
# The legitimate ISO 3166 alpha2 and alpha3 names, which appear in a variety
# of contexts in the naturalearth dataset depending on the subunit being
# described.
_iso_2_names = set()
_iso_3_names = set()
def _is_iso_3_name(n):
if len(_iso_3_names) == 0:
for c in iso3166.countries:
_iso_3_names.add(c.alpha3)
return n in _iso_3_names
def _is_iso_2_name(n):
if len(_iso_2_names) == 0:
for c in iso3166.countries:
_iso_2_names.add(c.alpha2)
return n in _iso_2_names
# Depending on the type of the (sub)unit, the ISO alpha3 name this
# "country" is connected to might be denoted in a variety of fields. Search
# them all in a hopefully-useful order of precedence and take the first
# that looks legit.
def _best_guess_iso_3(c):
for n in [c.iso_a3, c.adm0_a3, c.adm0_a3_is,
c.adm0_a3_us, c.gu_a3, c.su_a3, c.sov_a3]:
if n != "-99" and _is_iso_3_name(n):
return n
return None
# ISO alpha3 names are much more prevalent in the NE dataset; look up the
# corresponding alpha2 name from iso3166 and cross-check against any alpha2
# name we have in the NE record.
def _best_guess_iso_2(c):
iso3 = _best_guess_iso_3(c)
if iso3 is None:
return None
isoc = iso3166.countries.get(iso3)
if isoc is None:
return None
iso2 = isoc.alpha2
if c.iso_a2 != "-99" and _is_iso_2_name(c.iso_a2):
assert c.iso_a2 == iso2
return iso2
def _ensure_caches_populated():
global _iso_2_cache
global _iso_3_cache
if not _iso_2_cache:
for c in countries:
iso2 = _best_guess_iso_2(c)
iso3 = _best_guess_iso_3(c)
if iso2 not in _iso_2_cache:
_iso_2_cache[iso2] = set()
if iso3 not in _iso_3_cache:
_iso_3_cache[iso3] = set()
_iso_2_cache[iso2].add(c)
_iso_3_cache[iso3].add(c)
def country_subunits_containing_point(lon, lat):
"""
Iterate over the country subunits that contain the provided point.
Each subunit will have a .bbox field indicating its (lon1, lat1, lon2,
lat2) bounding box.
"""
res = []
for c in countries:
(lon1, lat1, lon2, lat2) = c.bbox
# To handle international date line spanning
# bboxes -- namely Fiji -- we treat any country that's
#
# Fiji spans the international date line
# (-180.0, -21.705859375, 180.0, -12.476953125),
#
# England does not
# (-5.65625, 50.0213867188, 1.74658203125, 55.8079589844),
#
# This poses a bit of difficulty, because they both appear
# "numerically" the same way, as a bounding box going from low
# longitude to high longitude. The problem is that passing the
# international date line means you should interpret the box
# as running from high to low
if lon1 <= lon and lon <= lon2 and \
lat1 <= lat and lat <= lat2:
res.append(c)
return iter(res)
def country_subunits_by_iso_code(code):
"""
Iterate over all country subunits, some of which are full countries and
some of which are smaller components thereof; all have a .bbox field
indicating their (lon1, lat1, lon2, lat2) bounding box.
"""
if not isinstance(code, str):
return iter([])
_ensure_caches_populated()
code = code.upper()
if len(code) == 2 and code in _iso_2_cache:
return iter(_iso_2_cache[code])
elif len(code) == 3 and code in _iso_3_cache:
return iter(_iso_3_cache[code])
return iter([])
def all_country_subunits():
"""
Iterate over all country subunits, some of which are full countries and
some of which are smaller components thereof; all have a .bbox field
indicating their (lon1, lat1, lon2, lat2) bounding box.
"""
return iter(countries)
def all_country_subunits_grouped_by_iso_3_code():
"""
Iterate over pairs of strings and sets of country subunits, where the
string is an ISO 3166 alpha3 country code and the subunits all have a
.bbox field indicating their (lon1, lat1, lon2, lat2) bounding box.
"""
_ensure_caches_populated()
return _iso_3_cache.items()
def show_all_bounding_boxes():
"""
Diagnostic routine to emit all bounding boxes as GeoJSON.
"""
fs = []
for c in all_country_subunits():
(lon1, lat1, lon2, lat2) = c.bbox
fs.append(dict(type="Feature",
properties=[],
geometry=dict(type="Polygon",
coordinates=[[
[lon1, lat1],
[lon1, lat2],
[lon2, lat2],
[lon2, lat1],
[lon1, lat1]
]])))
fc = dict(type="FeatureCollection",
features=fs)
print json.dumps(fc, indent=True)
| true
|
7a8ab9f0bcd31892348a7d88b3ecec79e7b93d4e
|
Python
|
afterloe/raspberry-auto
|
/opencv-integrate/py/four_point_perspective.py
|
UTF-8
| 466
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python
from imutils import perspective
import numpy as np
import cv2
img = cv2.imread("../tmp/2.jpg", cv2.IMREAD_COLOR)
img_clone = img.copy()
cv2.imshow("before", img_clone)
pts = np.array([(73, 239), (356, 117), (475, 265), (187, 443)])
for (x, y) in pts:
cv2.circle(img_clone, (x, y), 5, (0, 255, 0), -1)
warped = perspective.four_point_transform(img, pts)
cv2.imshow("after", warped)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true
|
9b91eb61753c64771fef31e4d202d698a31701ec
|
Python
|
dstark85/mitPython
|
/simple_programs/guess_number.py
|
UTF-8
| 1,307
| 4.40625
| 4
|
[] |
no_license
|
# a simple number guessing program
# 0 - 100 (exclusive)
def quick_log(base, n):
''' Rounds up '''
count = 0
while n > 1:
count += 1
n //= base
return count
def guess_number():
print("Think of a number between 0 and 100!")
prompt = '''Enter 'h' to indicate the guess is too high.
Enter 'l' to indicate the guess is too low.
Enter 'c' to indicate I guessed correctly
'''
acceptable_responses = 'hlc'
low = 0
high = 100
g = (low + high) // 2
guess_attempts = 0
attempts_needed = quick_log(2, 100)
while True:
print("Is your secret number " + str(g) + '?')
response = input(prompt)
guess_attempts += 1
if guess_attempts > attempts_needed: # Beware of cheaters!
print("LIAR!!")
break
while response[0] not in acceptable_responses: # user better cooperate
print("I don't recognize what you entered.")
response = input(prompt)
if response[0] == 'h':
high = g
elif response[0] == 'l':
low = g
else:
print("Game over. Your secret number was: " + str(g))
return g
g = (low + high) // 2
guess_number()
| true
|
0bd9ef0789717f159ddd05c1f234c1572bb1f032
|
Python
|
tritechsc/minecraft-rpi
|
/python-examples/turret_cwc.py
|
UTF-8
| 2,923
| 2.703125
| 3
|
[] |
no_license
|
from mcpi.minecraft import Minecraft
from mcpi import block
from time import sleep
def init():
mc = Minecraft.create("127.0.0.1", 4711)
x, y, z = mc.player.getPos()
return mc
def gun(mc,x,y,z,direction,mussle_length):
print("mussle_length ",mussle_length)
#WOOD_PLANKS 5 GLASS 20 gold 41
m = 20 # glass
if direction == "n" or direction == "s":
#change z
if direction == "n":
p = 1 #p is parity
else:
p = -1
print(" x,y,z ",x,y,z)
mc.postToChat("THE CANNON")
mc.setBlocks(x-2,y,z-2,x+2,y+5,z+2,41)
mc.setBlocks(x-1,y-1,z-1,x+1,y+5,z+1,0)
mc.setBlock(x,y+4,z-2,20)
mc.setBlock(x,y+4,z+2,20)
mc.setBlock(x+2,y+4,z-2,20)
mc.setBlock(x-2,y+4,z+2,20)
for l in range(2,mussle_length):
ld = l * p
m = 42
mc.setBlock(x-2,y+3,z+ld,41)
mc.setBlock(x,y+3,z+ld,41)
mc.setBlock(x+2,y+3,z+ld,41)
print(ld)
if direction == "w" or direction == "e":
pass
def main():
mc = init()
#mc.player.setPos(0, 50, 0)
x, y, z = mc.player.getPos()
mc.player.setPos(x, y, z)
direction = input("Input dock direction n, s, e or w ")
mussle_length = 10
gun(mc,x,y,z,direction,mussle_length)
main()
# multiple line comment
"""xc
AIR 0
STONE 1
GRASS 2
DIRT 3
COBBLESTONE 4
WOOD_PLANKS 5
SAPLING 6
BEDROCK 7
WATER_FLOWING 8
WATER 8
WATER_STATIONARY 9
LAVA_FLOWING 10
LAVA 10
LAVA_STATIONARY 11
SAND 12
GRAVEL 13
GOLD_ORE 14
IRON_ORE 15
COAL_ORE 16
WOOD 17
LEAVES 18
GLASS 20
LAPIS_LAZULI_ORE 21
LAPIS_LAZULI_BLOCK 22
SANDSTONE 24
BED 26
COBWEB 30
GRASS_TALL 31
WOOL 35
FLOWER_YELLOW 37
FLOWER_CYAN 38
MUSHROOM_BROWN 39
MUSHROOM_RED 40
GOLD_BLOCK 41
IRON_BLOCK 42
STONE_SLAB_DOUBLE 43
STONE_SLAB 44
BRICK_BLOCK 45
TNT 46
BOOKSHELF 47
MOSS_STONE 48
OBSIDIAN 49
TORCH 50
FIRE 51
STAIRS_WOOD 53
CHEST 54
DIAMOND_ORE 56
DIAMOND_BLOCK 57
CRAFTING_TABLE 58
FARMLAND 60
FURNACE_INACTIVE 61
FURNACE_ACTIVE 62
DOOR_WOOD 64
LADDER 65
STAIRS_COBBLESTONE 67
DOOR_IRON 71
REDSTONE_ORE 73
SNOW 78
ICE 79
SNOW_BLOCK 80
CACTUS 81
CLAY 82
SUGAR_CANE 83
FENCE 85
GLOWSTONE_BLOCK 89
BEDROCK_INVISIBLE 95
STONE_BRICK 98
GLASS_PANE 102
MELON 103
FENCE_GATE 107
GLOWING_OBSIDIAN 246
NETHER_REACTOR_CORE 247
"""
| true
|
eb509b4d4127ceb003a7f3191b8f8d0cbfa7840e
|
Python
|
sourcery-ai-bot/eniric
|
/scripts/untar_here.py
|
UTF-8
| 605
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
"""
untar_here.py
-------------
Bundled script to un-tar the eniric data downloaded.
Uses the tarfile module to extract the data.
"""
import argparse
import sys
import tarfile
def _parser():
"""Take care of all the argparse stuff."""
parser = argparse.ArgumentParser(description="Extract from a tar file.")
parser.add_argument("filename", help="File to untar.", type=str, default="")
return parser.parse_args()
if __name__ == "__main__":
filename = _parser().filename
with tarfile.open(filename, "r") as tar:
tar.extractall()
sys.exit(0)
| true
|
dca2c441e2fbd0e934a6dc2f905942cb68841011
|
Python
|
GANESH0080/Python-Practice-Again
|
/AssignamentOperators/AssignmentSeven.py
|
UTF-8
| 48
| 2.9375
| 3
|
[] |
no_license
|
x = 6
x **= 3
print(x)
xx = 5
xx//=2
print(xx)
| true
|
5e0446fe3d4073f735e647a0fcdb1ef7b23be240
|
Python
|
kandrosov/correctionlib
|
/src/correctionlib/schemav2.py
|
UTF-8
| 8,098
| 2.9375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
from typing import Any, List, Optional, Union
from pydantic import BaseModel, Field, StrictInt, StrictStr, validator
try:
from typing import Literal # type: ignore
except ImportError:
from typing_extensions import Literal
VERSION = 2
class Model(BaseModel):
class Config:
extra = "forbid"
class Variable(Model):
"""An input or output variable"""
name: str
type: Literal["string", "int", "real"] = Field(
description="A string, a 64 bit integer, or a double-precision floating point value"
)
description: Optional[str] = Field(
description="A nice description of what this variable means"
)
# py3.7+: ForwardRef can be used instead of strings
Content = Union[
"Binning", "MultiBinning", "Category", "Formula", "FormulaRef", "Transform", float
]
class Formula(Model):
"""A general formula type"""
nodetype: Literal["formula"]
expression: str
parser: Literal["TFormula"]
variables: List[str] = Field(
description="The names of the correction input variables this formula applies to"
)
parameters: Optional[List[float]] = Field(
description="Parameters, if the parser supports them (e.g. [0] for TFormula)"
)
class FormulaRef(Model):
"""A reference to one of the Correction generic_formula items, with specific parameters"""
nodetype: Literal["formularef"]
index: int = Field(
description="Index into the Correction.generic_formulas list", ge=0
)
parameters: List[float] = Field(
description="Same interpretation as Formula.parameters"
)
class Transform(Model):
"""A node that rewrites one real or integer input according to a rule as given by a content node
Any downstream nodes will see a different value for the rewritten input
If the input is an integer type, the rule output will be cast from a
double to integer type before using. These should be used sparingly and at
high levels in the tree, since they require an allocation.
"""
nodetype: Literal["transform"]
input: str = Field(description="The name of the input to rewrite")
rule: Content = Field(description="A subtree that implements the rewrite rule")
content: Content = Field(
description="A subtree that will be evaluated with transformed values"
)
class Binning(Model):
"""1-dimensional binning in an input variable"""
nodetype: Literal["binning"]
input: str = Field(
description="The name of the correction input variable this binning applies to"
)
edges: List[float] = Field(
description="Edges of the binning, where edges[i] <= x < edges[i+1] => f(x, ...) = content[i](...)"
)
content: List[Content]
flow: Union[Content, Literal["clamp", "error"]] = Field(
description="Overflow behavior for out-of-bounds values"
)
@validator("edges")
def validate_edges(cls, edges: List[float], values: Any) -> List[float]:
for lo, hi in zip(edges[:-1], edges[1:]):
if hi <= lo:
raise ValueError(f"Binning edges not monotone increasing: {edges}")
return edges
@validator("content")
def validate_content(cls, content: List[Content], values: Any) -> List[Content]:
if "edges" in values:
nbins = len(values["edges"]) - 1
if nbins != len(content):
raise ValueError(
f"Binning content length ({len(content)}) is not one larger than edges ({nbins + 1})"
)
return content
class MultiBinning(Model):
"""N-dimensional rectangular binning"""
nodetype: Literal["multibinning"]
inputs: List[str] = Field(
description="The names of the correction input variables this binning applies to",
min_items=1,
)
edges: List[List[float]] = Field(description="Bin edges for each input")
content: List[Content] = Field(
description="""Bin contents as a flattened array
This is a C-ordered array, i.e. content[d1*d2*d3*i0 + d2*d3*i1 + d3*i2 + i3] corresponds
to the element at i0 in dimension 0, i1 in dimension 1, etc. and d0 = len(edges[0]), etc.
"""
)
flow: Union[Content, Literal["clamp", "error"]] = Field(
description="Overflow behavior for out-of-bounds values"
)
@validator("edges")
def validate_edges(cls, edges: List[List[float]], values: Any) -> List[List[float]]:
for i, dim in enumerate(edges):
for lo, hi in zip(dim[:-1], dim[1:]):
if hi <= lo:
raise ValueError(
f"MultiBinning edges for axis {i} are not monotone increasing: {dim}"
)
return edges
@validator("content")
def validate_content(cls, content: List[Content], values: Any) -> List[Content]:
if "edges" in values:
nbins = 1
for dim in values["edges"]:
nbins *= len(dim) - 1
if nbins != len(content):
raise ValueError(
f"MultiBinning content length ({len(content)}) does not match the product of dimension sizes ({nbins})"
)
return content
class CategoryItem(Model):
"""A key-value pair
The key type must match the type of the Category input variable
"""
key: Union[StrictInt, StrictStr]
value: Content
class Category(Model):
"""A categorical lookup"""
nodetype: Literal["category"]
input: str = Field(
description="The name of the correction input variable this category node applies to"
)
content: List[CategoryItem]
default: Optional[Content]
@validator("content")
def validate_content(cls, content: List[CategoryItem]) -> List[CategoryItem]:
if len(content):
keytype = type(content[0].key)
if not all(isinstance(item.key, keytype) for item in content):
raise ValueError(
f"Keys in the Category node do not have a homogenous type, expected all {keytype}"
)
keys = {item.key for item in content}
if len(keys) != len(content):
raise ValueError("Duplicate keys detected in Category node")
return content
Transform.update_forward_refs()
Binning.update_forward_refs()
MultiBinning.update_forward_refs()
CategoryItem.update_forward_refs()
Category.update_forward_refs()
class Correction(Model):
name: str
description: Optional[str] = Field(
description="Detailed description of the correction"
)
version: int = Field(
description="Some value that may increase over time due to bugfixes"
)
inputs: List[Variable] = Field(
description="The function signature of the correction"
)
output: Variable = Field(description="Output type for this correction")
generic_formulas: Optional[List[Formula]] = Field(
description="""A list of common formulas that may be used
For corrections with many parameterized formulas that follow a regular pattern,
the expression and inputs can be declared once with a generic formula, deferring the parameter
declaration to the more lightweight FormulaRef nodes. This can speed up both loading and evaluation
of the correction object
"""
)
data: Content = Field(description="The root content node")
@validator("output")
def validate_output(cls, output: Variable) -> Variable:
if output.type != "real":
raise ValueError(
"Output types other than real are not supported. See https://github.com/nsmith-/correctionlib/issues/12"
)
return output
class CorrectionSet(Model):
schema_version: Literal[VERSION] = Field(description="The overall schema version")
corrections: List[Correction]
if __name__ == "__main__":
import os
import sys
dirname = sys.argv[-1]
with open(os.path.join(dirname, f"schemav{VERSION}.json"), "w") as fout:
fout.write(CorrectionSet.schema_json(indent=4))
| true
|
e8f12d8223265d2225aa613fbf2257d2340c2509
|
Python
|
kartikwar/programming_practice
|
/lists/others/overlapping_intervals.py
|
UTF-8
| 937
| 4.0625
| 4
|
[] |
no_license
|
'''
Given a collection of intervals, merge all overlapping intervals.
For example:
Given [1,3],[2,6],[8,10],[15,18],
return [1,6],[8,10],[15,18].
Make sure the returned intervals are sorted.
'''
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
# @param intervals, a list of Intervals
# @return a list of Interval
def merge(self, intervals):
intervals = sorted(intervals, key=lambda y:y.start)
i = 0
while i < len(intervals) -1:
ele1, ele2 = intervals[i], intervals[i+1]
a,b = ele1.start, ele1.end
c,d = ele2.start, ele2.end
if max(a,c) > min(b,d):
i = i +1
else:
intervals[i] = Interval(min([a,c]), max(b,d))
intervals.pop(i+1)
return intervals
if __name__ == '__main__':
sol = Solution()
A = [ (1, 10), (2, 9), (3, 8), (4, 7), (5, 6), (6, 6) ]
intervals = []
for a in A:
intervals.append(Interval(a[0], a[1]))
print(sol.merge(intervals))
| true
|
e637e3d6d1f760dca64da849a29a5cbd37fd5f9a
|
Python
|
hahalaugh/LeetCode
|
/509_Fibonacci Number.py
|
UTF-8
| 854
| 3.203125
| 3
|
[] |
no_license
|
class Solution(object):
def fib(self, n):
# Recursive
if n <= 1:
return n
p1 = 1
p2 = 0
fb = 0
for i in range(n - 2 + 1):
fb = p1 + p2
p2 = p1
p1 = fb
return fb
def fibTopDown(self, n):
"""
:type N: int
:rtype: int
"""
d = {}
def f(n):
if n in d: return d[n]
elif n <= 1:
return n
else:
d[n] = f(n - 1) + f(n - 2)
return d[n]
return f(n)
def fibBottomUp(self, n):
if n <= 1:
return n
a = [0, 1]
for i in range(2, n + 1):
a.append(a[-1] + a[-2])
return a[n]
| true
|
cafe98c11b1a75d9dd0a41a34661b7165e7e512b
|
Python
|
ricardorohde/price_miner
|
/extractor/main.py
|
UTF-8
| 1,643
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
from argparse import ArgumentParser
from config import PRICE_MINER_HOST, BODY_REQUEST
from json.decoder import JSONDecodeError
import requests
import time
import pandas as pd
all_data = list()
def extract(number_of_items=1):
while True:
response = requests.post(PRICE_MINER_HOST + '/mine', json=BODY_REQUEST)
if response.content != 'tasks already running, try again later':
task_id = response.content
while True:
request = requests.get(PRICE_MINER_HOST + '/mine', params={'job_id': task_id.decode("utf-8")})
print(request.content)
if request.status_code == 200 and 'content' in request.json():
break
time.sleep(20)
data = request.json()
all_data.extend(data['content']['content'])
BODY_REQUEST['url'] = data['content']['last_url']
BODY_REQUEST['blacklist'] = [item['title'] for item in all_data]
if len(all_data) >= number_of_items:
data_holder = list()
for item in all_data:
temp = {'title': item['title']}
for key, value in item['data'].items():
temp[key] = value
data_holder.append(temp)
pd.DataFrame(data_holder).to_csv('aliexpress_data.csv')
break
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--num', required=False, type=int, help='number of items to extract')
args = parser.parse_args()
if args:
extract(args.num)
else:
extract()
| true
|
bd39c20f7d5ae57eae00d92465564ab930b7158f
|
Python
|
yuqiuming2000/Python-code
|
/爬虫精进/第6关/第6关xlsx文件的读写.py
|
UTF-8
| 448
| 3.234375
| 3
|
[] |
no_license
|
import openpyxl
wb=openpyxl.Workbook()
sheet=wb.active
sheet.title='new title'
sheet['A1'] = '漫威宇宙'
rows= [['美国队长','钢铁侠','蜘蛛侠'],['是','漫威','宇宙', '经典','人物']]
for i in rows:
sheet.append(i)
print(rows)
wb.save('Marvel.xlsx')
wb = openpyxl.load_workbook('Marvel.xlsx')
sheet = wb['new title']
sheetname = wb.sheetnames
print(sheetname)
A1_cell = sheet['A1']
A1_value = A1_cell.value
print(A1_value)
| true
|
270f8d22e4de30b9b46807a90e2e0413b6ab4d49
|
Python
|
kk0walski/Stratego
|
/Board.py
|
UTF-8
| 5,456
| 3.15625
| 3
|
[] |
no_license
|
import numpy as np
class Board:
board = None
size = 0
player1 = 0
player2 = 0
player1Color = 1
player2Color = 2
def __init__(self, size, player1=0,player2=0,board=None):
self.size = size
if board is None:
self.board = np.zeros(shape=(self.size, self.size), dtype=np.int)
else:
self.board = board
self.player1=player1
self.player2=player2
def clone(self):
return self.__init__(self.size, self.player1, self.player2,np.copy(self.board))
def move(self, row, columm, color):
points = 0
if row < self.size and row >= 0 and columm < self.size and columm >= 0:
if self.board[row,columm] == 0:
self.board[row,columm] = color
points = self.getPoints(row, columm, color)
if self.player1Color == color:
self.player1 += points
else:
self.player2 += points
return points, True
else:
return points, False
else:
return points, False
def getField(self, row, column):
return self.board[row,column]
def getDiagonalFirst(self, board, row, column):
lista = []
positions = []
for i in range(1,self.size):
if row-i >= 0 and column-i >= 0:
if board[row - i, column - i] == 0:
positions.append((row-i,column-i))
lista.append(board[row-i,column-i])
if row+i < self.size and column+i < self.size:
if board[row + i, column + i] == 0:
positions.append((row + i, column + i))
lista.append(board[row + i, column + i])
lista.append(board[row,column])
if board[row,column] == 0:
positions.append((row,column))
return lista, positions
def getDiagonalSecond(self, board, row, column):
lista = []
positions = []
for i in range(1, self.size):
if row - i >= 0 and column + i < self.size:
if board[row - i, column + i] == 0:
positions.append((row-i,column+i))
lista.append(board[row - i, column + i])
if row + i < self.size and column - i >= 0:
if board[row + i, column - i] == 0:
positions.append((row+i,column-i))
lista.append(board[row + i, column - i])
lista.append(board[row, column])
if board[row,column] == 0:
positions.append((row,column))
return lista, positions
def getDiagonals(self, board, row, column):
lista1, columns1 = self.getDiagonalFirst(board,row,column)
lista2, columns2 = self.getDiagonalSecond(board, row, column)
return [lista1,lista2],[columns1,columns2]
def getAllDiagonals(self, board):
lists = []
columns = []
for i in range(self.size):
listTemp,columnTemp = self.getDiagonals(board, 0, i)
lists += listTemp
columns += columnTemp
listTemp, columnTemp = self.getDiagonalFirst(board, i, 0)
lists += [listTemp]
columns += [columnTemp]
listTemp, columnTemp = self.getDiagonalSecond(board, self.size - 1, i)
lists += [listTemp]
columns += [columnTemp]
return lists, columns
def getRowsColumnsPoint(self, board):
for i in range(self.size):
lista = list(board[i])
if lista.count(0) == 1:
return (i,lista.index(0))
lista = list(board[:,i])
if lista.count(0) == 1:
return (lista.index(0),i)
return (-1,-1)
def getRowZeroPoints(self, row, board):
positions = []
if row < self.size and row >= 0:
myRow = board[row]
for i in range(0, self.size):
if myRow[i] == 0:
positions.append((row,i))
return positions
def getColumnZeroPoints(self, column,board):
positions = []
if column < self.size and column >= 0:
myColumn = board[:,column]
for i in range(0, self.size):
if myColumn[i] == 0:
positions.append((i,column))
return positions
def getRowsColumnsPoints(self, board):
reasult = []
for i in range(self.size):
reasult.append(self.getRowZeroPoints(i, board))
reasult.append(self.getColumnZeroPoints(i, board))
return reasult
def getPoints(self, row, column, color):
points = 0
if list(self.board[row]).count(0) == 0:
points += (self.board[row] == color).sum()
if list(self.board[:,column]).count(0) == 0:
points += (self.board[:,column] == color).sum()
diagonal1, temp = self.getDiagonalFirst(self.board, row, column)
if diagonal1.count(0) == 0 and len(diagonal1) > 1:
points += diagonal1.count(color)
diagonal2, temp = self.getDiagonalSecond(self.board, row, column)
if diagonal2.count(0) == 0 and len(diagonal2) > 1:
points += diagonal2.count(color)
return points
def isEnd(self):
return np.count_nonzero(self.board == 0) == 0
def getState(self):
return "Player1: " + str(self.player1) + " Player2: " + str(self.player2)
| true
|
06d7d3f0d2222510208f7ce4e83433735bbc4431
|
Python
|
andrewhall123/savedfiles
|
/lucky.py
|
UTF-8
| 407
| 2.90625
| 3
|
[] |
no_license
|
# python 3
import requests,sys,webbrowser, bs4
print('Googling...')
res=requests.get('http://google.com/search?q=' + ''.join(sys.argv[1:]))
res.raise_for_status()
#retrive top search request
soup=bs4.BeautifulSoup(res.text)
#open a browser fo each result
linkElems=soup.select('.r a')
numOpen=min(5,len(linkElems))
for i in range(numOpen):
webbrowser.open('http://google.com'+linkElems[i].get('href'))
| true
|
f77b1e4034f1743d41c52f047624ced64499b8b1
|
Python
|
aprebyl1/DSC510Spring2020
|
/BLACK_DSC510/JBlack Week 3.py
|
UTF-8
| 2,411
| 4.34375
| 4
|
[] |
no_license
|
# course: DSC510
# assignment: 3.1
# due date: 3/29/2020
# name: Jessica Black
# this program will do the following:
# Display a welcome message for your program
# Retrieve the company name from the user
# Get the number of feet of fiber optic cable to be installed from the user.
# Evaluate the total cost based upon the number of feet requested.
# Display the calculated information including the number of feet requested and company name.
# One - Display welcome message
user_name = input('Hello, user! Thanks for visiting Fiber Optic Inc. We look forward to assisting you. What is your name?')
print(f'Welcome, {user_name}')
# Two - Retrieve Company Name
Company_Name = input('What is your company name?:\n')
print(f'Welcome {Company_Name}!')
# Three - Retrieve the number of feet of fiber optic cable to be installed from the user
Cable_Length = input('How much feet of fiber optic cable needs to be installed?\n')
print(f'Got it, you need {Cable_Length} feet of fiber optic cable.')
# You will prompt the user for the number of fiber optic cable they need installed.
# Using the default value of $0.87 calculate the total expense.
# If the user purchases more than 100 feet they are charged $0.80 per foot.
# If the user purchases more than 250 feet they will be charged $0.70 per foot.
# If they purchase more than 500 feet, they will be charged $0.50 per foot.
Default_Price = .87
Price_100_Feet = .80
Price_250_Feet = .70
Price_500_Feet = .50
Cable_Needed = float(Cable_Length)
if Cable_Needed > 500:
print(f'For {Cable_Needed} feet of fiber optic cable, you will be charged $.50 per foot.')
elif Cable_Needed > 250:
print(f'For {Cable_Needed} feet of fiber optic cable, you will be charged $.70 per foot.')
elif Cable_Needed > 100:
print(f'For {Cable_Needed} feet of fiber optic cable, you will be charged $.80 per foot.')
elif Cable_Needed < 100:
print(f'For {Cable_Needed} feet of fiber optic cable, you will be charged $.87 per foot.')
f'\n'
if Cable_Needed > 500:
Total_Cost = (.50 * Cable_Needed)
elif Cable_Needed > 250:
Total_Cost = (.70 * Cable_Needed)
elif Cable_Needed >= 100:
Total_Cost = (.80 * Cable_Needed)
else:
Total_Cost = (.87 * Cable_Needed)
print(f'For {Cable_Needed} feet of cable, your total installation cost will be ${Total_Cost}')
print(f'Thank you, {user_name} with {Company_Name}! Fiber Optic Inc. looks forward to working with you.')
| true
|
9f4b6220a95b8f1037d4ae13cf1beebfb44a460f
|
Python
|
Nixer/lesson02
|
/age.py
|
UTF-8
| 532
| 4.03125
| 4
|
[] |
no_license
|
input_age = int(input("Введите свой возраст: "))
def age(age):
if age < 7:
return "Вы ходите в детский сад"
elif 6 < age < 17:
return "Вы учитесь в школе"
elif 16 < age < 21:
return "Вы учитесь в ВУЗе"
elif 20 < age < 60:
return "Вы работаете"
elif age > 59:
return "Вы на пенсии"
else:
return "Неправильно введен возраст"
print(age(input_age))
| true
|
7ba7fc8e3dcbd8c1a84ed4ab9fbdea122b66f805
|
Python
|
pflun/advancedAlgorithms
|
/Wish-findTotalCount.py
|
UTF-8
| 1,100
| 3.546875
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# 就是有n个人比赛,问你有多少种比赛结果排名,每个人可以独自一人一组,
# 也可以和其他人组成团体,
# 比如n= 2, 两个人A,B,
# 可能的结果有3种
# A 第一,B 第二
# B 第一,A 第二
# A, B 团体第一
# 就是有n个人, 比赛, 问你有多少种比赛结果排名, 每个人可以独自一人一组,
# 也可以和其他人组成团体,
# 比如n = 2, 两个人 A,B,
# 可能的结果有3种
# A 第一, B 第二
# B 第一, A 第二
# A, B 团体第一
# n = 3, 有 13 种可能
# dp[0] = 1;
# dp[1] = 1;
# dp count with i persons
class Solution(object):
def findTotalCount(self, n):
dp = [0 for _ in range(n + 1)]
dp[0] = 1
dp[1] = 1
for i in range(2, n + 1):
for k in range(i):
dp[i] += self.helper(i, i - k) * dp[k]
return dp
def helper(self, n, k):
res = 1
for i in range(n - k, n + 1):
res *= i
for i in range(1, k + 1):
res /= i
return res
test = Solution()
print test.findTotalCount(3)
| true
|
913e96ad475cc9c86076847f4005e26b1484fc96
|
Python
|
JacobHippo/age
|
/age.py
|
UTF-8
| 372
| 3.59375
| 4
|
[] |
no_license
|
drive = input('你有沒有開過車')
if drive != '有' and drive !='沒有':
print('只能輸入有/沒有')
raise SystemExit
age = input('請問你幾歲')
age = int(age)
if drive == '有':
if age >= 18:
print('你通過測驗了')
else:
print('你犯法了')
elif drive == '沒有':
if age >= 18:
print('爛草莓')
else:
print('滾')
| true
|
c5452f3fc04c5b7153ee376ba5e1799444cd3fbd
|
Python
|
eraserhead0705/travel_agency
|
/travel_agency_app/tests/test_factory.py
|
UTF-8
| 1,218
| 2.734375
| 3
|
[] |
no_license
|
from django.test import TestCase
from travel_agency_app.models import Availability, Location, GeoLocation, TourCapacity, TourPackage
class LocationTestCase(TestCase):
def setUp(self):
loc = Location.objects.create(location_name="north pole", is_captial=True)
GeoLocation.objects.create(latitute=10.4805937, longitude=-66.90360629999999, location=loc)
def test_location_model(self):
north_pole = Location.objects.get(location_name="north pole")
lat = north_pole.geolocation.latitude
self.assertEqual(north_pole.location_name, "north pole")
self.assertEqual(lat, 10.4805937)
class TourPackageTestCase(TestCase):
def setUp(self):
tour = TourPackage.objects.create(name='Arctic Adventure', description='Lets freeze!', price=3000, registries=15)
TourCapacity.objects.create(capacity=20, tourpackage=tour)
available =Availability.objects.create(availability="2021-07-05")
available.add(tour)
def test_tour_package_model(self):
arctic = TourPackage.objects.get(name='Arctic Adventure')
self.assertNotEqual(arctic.name, "Arctic")
self.assertEqual(arctic.availability.availability, "2021-07-05")
| true
|
de306e8ac94b4310081e201d3a091734f342cee9
|
Python
|
YiseBoge/CompetitiveProgramming2
|
/Contests/Contest8/p2.py
|
UTF-8
| 530
| 3.109375
| 3
|
[] |
no_license
|
import sys
class Solution:
def minimumDeletions(self, s: str) -> int:
result = sys.maxsize
total_a = 0
for el in s:
if el == 'a':
total_a += 1
a_count = b_count = 0
for el in s:
if el == 'a':
a_count += 1
elif el == 'b':
remaining = total_a - a_count
result = min(result, remaining + b_count)
b_count += 1
result = min(result, b_count)
return result
| true
|
a4a60daa7d186514eb7ab2ee6093ec7ee4a269ee
|
Python
|
ehaupt/fastest_pkg
|
/fastest_pkg/fastest_pkg.py
|
UTF-8
| 4,572
| 2.765625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Author : Emanuel Haupt <ehaupt@FreeBSD.org>
Purpose : Find the fastest pkg mirror
License : BSD3CLAUSE
"""
import argparse
import json
from operator import itemgetter
from sys import stderr as STREAM
from typing import Dict
from urllib.parse import urlparse
import dns.resolver
import pycurl
from fastest_pkg.utils.human_bytes import HumanBytes
from fastest_pkg.utils.pkg_mirror import PkgMirror
def speedtest(url: str, args: Dict):
parsed_url = urlparse(url)
# download location
path = "/dev/null"
# callback function for c.XFERINFOFUNCTION
def status(download_t, download_d, upload_t, upload_d):
STREAM.write(
"{}: {}%\r".format(
parsed_url.netloc,
str(int(download_d / download_t * 100) if download_t > 0 else 0),
)
)
STREAM.flush()
# download file using pycurl
speed_download = 0
with open(path, "wb") as f:
curl = pycurl.Curl()
curl.setopt(curl.URL, url)
curl.setopt(curl.WRITEDATA, f)
# display progress
if args["verbose"]:
curl.setopt(curl.NOPROGRESS, False)
curl.setopt(curl.XFERINFOFUNCTION, status)
else:
curl.setopt(curl.NOPROGRESS, True)
curl.setopt(pycurl.CONNECTTIMEOUT, int(args["timeout"] / 1000))
curl.setopt(pycurl.TIMEOUT_MS, args["timeout"])
try:
curl.perform()
except Exception as error:
if args["verbose"]:
# keep progress onscreen after error
print()
# print error
print(error, file=STREAM)
speed_download = curl.getinfo(pycurl.SPEED_DOWNLOAD)
curl.close()
# keeps progress onscreen after download completes
if args["verbose"] and speed_download > 0:
print()
# print download speed
if not args["json"]:
print(
(
"%s: %s/s"
% (parsed_url.netloc, (HumanBytes.format(speed_download, metric=True)))
)
)
return speed_download
def get_mirrors():
"""returns a list of all mirrors for pkg.freebsd.org"""
resolver = dns.resolver.Resolver()
try:
pkg_mirrors = resolver.resolve("_http._tcp.pkg.all.freebsd.org", "SRV")
except AttributeError:
pkg_mirrors = resolver.query("_http._tcp.pkg.all.freebsd.org", "SRV")
return pkg_mirrors
def argument_parser():
"""Parsers CLI arguments and displays help text, handles all the Cli stuff"""
parser = argparse.ArgumentParser(
description="Script for finding and configuring fastest FreeBSD pkg mirror"
)
parser.add_argument(
"-j",
"--json",
action="store_true",
help="only show basic information in JSON format",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="be more verbose",
)
parser.add_argument(
"-t",
"--timeout",
type=int,
default=5000,
help="timeout in ms",
)
argument = vars(parser.parse_args())
return argument
def main():
"""script starts here"""
cli_arguments = argument_parser()
stats = []
mirrors = get_mirrors()
for mirror in mirrors:
if mirror.priority > 10:
pkg = PkgMirror(mirror.target.to_text(omit_final_dot=True))
bytes_per_second = speedtest(url=pkg.get_urls()[0], args=cli_arguments)
mirror_name = mirror.target.to_text(omit_final_dot=True)
stats.append(
{
"mirror_name": mirror_name,
"bytes_per_second": bytes_per_second,
}
)
stats_sorted = sorted(stats, key=itemgetter("bytes_per_second"), reverse=True)
if cli_arguments["json"]:
print(json.dumps(stats_sorted))
else:
pkg = PkgMirror(stats_sorted[0]["mirror_name"])
pkg_cfg = 'FreeBSD: { url: "http://%s/${ABI}/%s", mirror_type: "NONE" }' % (
stats_sorted[0]["mirror_name"],
pkg.release,
)
print(
"\nFastest:\n%s: %s/s"
% (
stats_sorted[0]["mirror_name"],
HumanBytes.format(stats_sorted[0]["bytes_per_second"], metric=True),
)
)
print("\n")
print("Write configuration:")
print("mkdir -p /usr/local/etc/pkg/repos/")
print("echo '" + pkg_cfg + "' \\\n\t> /usr/local/etc/pkg/repos/FreeBSD.conf")
print("\n")
| true
|
db8ac4b383beb1052fc2f9a59cd95fd7f3d77268
|
Python
|
django/django-localflavor
|
/localflavor/uy/uy_departments.py
|
UTF-8
| 532
| 2.609375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#: A list of Uruguayan departments as `choices` in a formfield.
DEPARTMENT_CHOICES = (
('G', 'Artigas'),
('A', 'Canelones'),
('E', 'Cerro Largo'),
('L', 'Colonia'),
('Q', 'Durazno'),
('N', 'Flores'),
('O', 'Florida'),
('P', 'Lavalleja'),
('B', 'Maldonado'),
('S', 'Montevideo'),
('I', 'Paysandú'),
('J', 'Río Negro'),
('F', 'Rivera'),
('C', 'Rocha'),
('H', 'Salto'),
('M', 'San José'),
('K', 'Soriano'),
('R', 'Tacuarembó'),
('D', 'Treinta y Tres'),
)
| true
|
d3ee4d64470f084837ee001080e0f2f30663cfcf
|
Python
|
mmaduabum/Pi-Epsilon-Psi
|
/our_svm.py
|
UTF-8
| 8,157
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import utils
import sys
import random
import time
import features
import operator
import numpy as np
from sklearn import cross_validation
from sklearn import svm
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
"""Our multi-class classifier
Uses 15 internal SVMs: 5 using the one vs others method and
10 more for each pair of classes."""
class Our_SVM:
def __init__(self, use_glove=False, unigrams=False):
self.unigrams = unigrams
self.submodels = []
self.use_glove = use_glove
self.test_data = utils.get_test_data()
self.ONEvALL = 0
self.TWOvALL = 1
self.THREEvALL = 2
self.FOURvALL = 3
self.FIVEvALL = 4
self.ONEvTWO = 5
self.ONEvTHREE = 6
self.ONEvFOUR = 7
self.ONEvFIVE = 8
self.TWOvTHREE = 9
self.TWOvFOUR = 10
self.TWOvFIVE = 11
self.THREEvFOUR = 12
self.THREEvFIVE = 13
self.FOURvFIVE = 14
def train_submodels(self, train_data):
print "Building Datasets..."
if self.unigrams: features.init_unigram_features(train_data)
input_data = features.generate_feature_vectors(train_data, self.use_glove, self.unigrams)
all_targets = [int(ex[1]) for ex in train_data]
self.baseline_model = self.train_svms(input_data, all_targets)
#train the one vs others classifiers
for i in range(5):
star = i + 1
target_data = [1 if int(ex[1]) == star else 0 for ex in train_data]
self.submodels.append(self.train_svms(input_data, target_data))
#train the binary classifiers for the 10 pairs
#create subsets of the train data that have the relevant ratings
ones_and_twos = [ex for ex in train_data if int(ex[1]) == 1 or int(ex[1]) == 2]
ones_and_threes = [ex for ex in train_data if int(ex[1]) == 1 or int(ex[1]) == 3]
ones_and_fours = [ex for ex in train_data if int(ex[1]) == 1 or int(ex[1]) == 4]
ones_and_fives = [ex for ex in train_data if int(ex[1]) == 1 or int(ex[1]) == 5]
twos_and_threes = [ex for ex in train_data if int(ex[1]) == 2 or int(ex[1]) == 3]
twos_and_fours = [ex for ex in train_data if int(ex[1]) == 2 or int(ex[1]) == 4]
twos_and_fives = [ex for ex in train_data if int(ex[1]) == 2 or int(ex[1]) == 5]
threes_and_fours = [ex for ex in train_data if int(ex[1]) == 3 or int(ex[1]) == 4]
threes_and_fives = [ex for ex in train_data if int(ex[1]) == 3 or int(ex[1]) == 5]
fours_and_fives = [ex for ex in train_data if int(ex[1]) == 4 or int(ex[1]) == 5]
#generate feature vectors for each data subset
input_12 = features.generate_feature_vectors(ones_and_twos, self.use_glove, self.unigrams)
input_13 = features.generate_feature_vectors(ones_and_threes, self.use_glove, self.unigrams)
input_14 = features.generate_feature_vectors(ones_and_fours, self.use_glove, self.unigrams)
input_15 = features.generate_feature_vectors(ones_and_fives, self.use_glove, self.unigrams)
input_23 = features.generate_feature_vectors(twos_and_threes, self.use_glove, self.unigrams)
input_24 = features.generate_feature_vectors(twos_and_fours, self.use_glove, self.unigrams)
input_25 = features.generate_feature_vectors(twos_and_fives, self.use_glove, self.unigrams)
input_34 = features.generate_feature_vectors(threes_and_fours, self.use_glove, self.unigrams)
input_35 = features.generate_feature_vectors(threes_and_fives, self.use_glove, self.unigrams)
input_45 = features.generate_feature_vectors(fours_and_fives, self.use_glove, self.unigrams)
#generate the targets for each data subset
target_12 = [1 if int(ex[1]) == 1 else 2 for ex in ones_and_twos]
target_13 = [1 if int(ex[1]) == 1 else 3 for ex in ones_and_threes]
target_14 = [1 if int(ex[1]) == 1 else 4 for ex in ones_and_fours]
target_15 = [1 if int(ex[1]) == 1 else 5 for ex in ones_and_fives]
target_23 = [2 if int(ex[1]) == 2 else 3 for ex in twos_and_threes]
target_24 = [2 if int(ex[1]) == 2 else 4 for ex in twos_and_fours]
target_25 = [2 if int(ex[1]) == 2 else 5 for ex in twos_and_fives]
target_34 = [3 if int(ex[1]) == 3 else 4 for ex in threes_and_fours]
target_35 = [3 if int(ex[1]) == 3 else 5 for ex in threes_and_fives]
target_45 = [4 if int(ex[1]) == 4 else 5 for ex in fours_and_fives]
print "Data building complete"
#train and svm for each pair and save in the class
self.submodels.append(self.train_svms(input_12, target_12))
self.submodels.append(self.train_svms(input_13, target_13))
self.submodels.append(self.train_svms(input_14, target_14))
self.submodels.append(self.train_svms(input_15, target_15))
self.submodels.append(self.train_svms(input_23, target_23))
self.submodels.append(self.train_svms(input_24, target_24))
self.submodels.append(self.train_svms(input_25, target_25))
self.submodels.append(self.train_svms(input_34, target_34))
self.submodels.append(self.train_svms(input_35, target_35))
self.submodels.append(self.train_svms(input_45, target_45))
assert(len(self.submodels) == 15)
#(should be a way to save trained classifiers so we dont need to do this every time)
#http://scikit-learn.org/stable/modules/model_persistence.html
def train_svms(self, input_data, target_data):
print "Training next model..."
state = random.randint(0, int(time.time()))
#Once data has been translated to feature vector and target classes have been decided, train the model
clf = svm.SVC(kernel='linear', C=1).fit(input_data, target_data)
return clf
def score_model(self):
print "scoring..."
answers = [int(ex[1]) for ex in self.test_data]
vecs = features.generate_feature_vectors(self.test_data, self.use_glove, self.unigrams)
predictions = []
for feature_vector in vecs:
predictions.append(self.our_predict(feature_vector))
answers = np.array(answers).reshape(len(answers), 1)
print str(predictions)
predictions = np.array(predictions).reshape(len(predictions), 1)
return (predictions, answers)
#sorry this is really shit right now, just trying to get it working
def our_predict(self, vec):
first_guesses = []
#Run each one vs others classifer
first_guesses.append(self.submodels[self.ONEvALL].predict(vec)[0])
first_guesses.append(self.submodels[self.TWOvALL].predict(vec)[0])
first_guesses.append(self.submodels[self.THREEvALL].predict(vec)[0])
first_guesses.append(self.submodels[self.FOURvALL].predict(vec)[0])
first_guesses.append(self.submodels[self.FIVEvALL].predict(vec)[0])
#check if only one class was predicted
if sum(first_guesses) == 1:
return first_guesses.index(1) + 1
if sum(first_guesses) == 2:
#otherwise, run the pairwise classifiers
first_index = first_guesses.index(1)
class_a = first_index + 1
class_b = first_guesses.index(1, first_index+1) + 1
if (class_a, class_b) == (1, 2):
return self.submodels[self.ONEvTWO].predict(vec)[0]
elif (class_a, class_b) == (1, 3):
return self.submodels[self.ONEvTHREE].predict(vec)[0]
elif (class_a, class_b) == (1, 4):
return self.submodels[self.ONEvFOUR].predict(vec)[0]
elif (class_a, class_b) == (1, 5):
return self.submodels[self.ONEvFIVE].predict(vec)[0]
elif (class_a, class_b) == (2, 3):
return self.submodels[self.TWOvTHREE].predict(vec)[0]
elif (class_a, class_b) == (2, 4):
return self.submodels[self.TWOvFOUR].predict(vec)[0]
elif (class_a, class_b) == (2, 5):
return self.submodels[self.TWOvFIVE].predict(vec)[0]
elif (class_a, class_b) == (3, 4):
return self.submodels[self.THREEvFOUR].predict(vec)[0]
elif (class_a, class_b) == (3, 5):
return self.submodels[self.THREEvFIVE].predict(vec)[0]
elif (class_a, class_b) == (4, 5):
return self.submodels[self.FOURvFIVE].predict(vec)[0]
else:
print "ERROR"
#if sum(first_guesses) > 2: print "things could be happening, but aren't"
return self.baseline_model.predict(vec)[0]
# |
#the baseline predictor does this v by default
"""#If 0, 3, 4, or 5 classes were positive, run all pairwise calssifiers
votes = {1 : 0, 2 : 0, 3 : 0, 4 : 0, 5 : 0}
for i, m in enumerate(self.submodels):
if i < self.ONEvTWO: continue
votes[m.predict(vec)[0]] += 1
return max(votes.iteritems(), key=operator.itemgetter(1))[0]"""
| true
|