blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
313a96903c5efd5d62f7f6d9fc0cfb710f3734d8 | Python | nahyland/RPi---Class | /VelotoPosit.py | UTF-8 | 742 | 3.328125 | 3 | [] | no_license |
## Variable setup ##
min_posit = 0 # Lower extreme position, from input voltage
max_posit = 5 # Upper extreme position position, from input voltage
des_posit = 3 # Desired position, based on voltage
dc = 0 # Duty cycle of PWM output to motor
# Range calculation
posit_range = max_posit - min_posit
#- Loop for speed control -#
while 1:
# Check position relative to lower extreme
dist_check = (curr_posit - des_posit) / posit_range
if dist_check > 0.5: # If motor has 1/ of the max distance to cover
dc = 95 # Sets speed high to cover more distance
elif dist_check > 0.25 & dist_check <= 0.5 # Slowing on approach
dc -= 10
elif dist_check <= 0.25 & dist_check =/= 0 # Tapering velocity to 0
dc -= 5
else
#- End Loop -# | true |
f3e45c07f3367b2985b029baab9c4a23086d8820 | Python | westminsterandrew/pyclass | /helloWrld.py/друг.py | UTF-8 | 337 | 4.40625 | 4 | [] | no_license |
import random
name = input("Enter your name: ")
salary = int(input("Enter your salary: "))
raise_per = (random.randint(1, 100))
raise_amount = (raise_per / 100) * salary + salary
print(name + ", your current salary is $" + str(salary))
print("Your raise is %" + str(raise_per))
print(name + ", your new salary is $" + str(raise_amount)) | true |
6704ae2d0b5cd980dfb33378f957a7e16a694d31 | Python | milan001/BlindPeopleAssistant | /VehicleDetTrak/Vehicle.py | UTF-8 | 5,609 | 2.5625 | 3 | [] | no_license |
import cv2
import sys
import os
import re
tracker = cv2.TrackerMedianFlow_create()
def bb_intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[0]+boxA[2],boxB[0]+ boxB[2])
yB = min(boxA[1]+boxA[3],boxB[1]+ boxB[3])
# compute the area of intersection rectangle
interArea = (xB - xA + 1) * (yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
# Read video
#video = cv2.VideoCapture("t3.mp4")
if(len(sys.argv)<2):
print("Input Video Name as command line argument.")
exit()
video=cv2.VideoCapture(sys.argv[1])
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
#Get number of frames in video
length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
os.chdir("darknet")
nframes=20
prevBbox=[]
for i in range(int(length/nframes)):
# Read frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
cv2.imwrite('tem.jpg',frame)
os.system("./darknet detector test cfg/voc.data cfg/tiny-yolo-voc.cfg tiny-yolo-voc.weights "+"tem.jpg"+" > tmp.txt")
f = open("tmp.txt", "r")
temp=0
crop=[]
s=0
bbox=[]
for line in f:
wordList = re.sub("[^\w]", " ", line).split()
if temp==1:
bbox.append(map(int, re.findall(r'\d+', line)))
s=s+1
temp=0
if wordList[0]=="car" and int(wordList[1])>=30:
temp=1
if(s==0):
for i in range(nframes):
ok, frame = video.read()
cv2.imshow("Tracking", frame)
k = cv2.waitKey(1) & 0xff
if k == 27 :
break
continue
tracker=[None]*len(bbox)
center=[None]*len(bbox)
marked=[None]*len(bbox)
area=[None]*len(bbox)
arBool=[None]*len(bbox)
for i in range(len(bbox)):
marked[i]=0
arBool[i]=0
bbox[i]=list(bbox[i])
for j in range(len(prevBbox)):
if(bb_intersection_over_union(bbox[i],prevBbox[j])>0.9):
marked[i]=prevMark[j]
if(prevBbox[j][2]*prevBbox[j][3] < bbox[i][2]*bbox[i][3]):
arBool[i]=1
bbox[i][2]=bbox[i][2]-bbox[i][0]
bbox[i][3]=bbox[i][3]-bbox[i][1]
bbox[i]=tuple(bbox[i])
area[i]=bbox[i][2]*bbox[i][3]
center[i]=(bbox[i][0]+(bbox[i][2]/2),bbox[i][1]+(bbox[i][3]/2))
p1 = (int(bbox[i][0]), int(bbox[i][1]))
p2 = (int(bbox[i][0] + bbox[i][2]), int(bbox[i][1] + bbox[i][3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
print(bbox[i])
# Initialize tracker with first frame and bounding box
tracker[i] = cv2.TrackerMedianFlow_create()
ok = tracker[i].init(frame, bbox[i])
print(marked[i],".........................")
for i in range(len(bbox)):
if(marked[i]>=3):
os.system("say A car is coming towards you. Please move aside.")
# tts = gTTS("A car is coming towards you. Please move aside.", lang='en')
# tts.save("audio.mp3")
# os.system("mpg321 audio.mp3")
cv2.imshow("Tracking", frame)
coun=0
m=0
while coun<nframes:
coun=coun+1
# Read a new frame
ok1, frame = video.read()
if not ok1:
break
# Start timer
timer = cv2.getTickCount()
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
bbox = [None] * len(tracker)
ok = [None] * len(tracker)
centerFin=[None]*len(bbox)
areaFin=[None]*len(bbox)
# Update tracker
for i in range(len(tracker)):
ok[i], bbox[i] = tracker[i].update(frame)
if ok:
centerFin[i]=(bbox[i][0]+(bbox[i][2]/2),bbox[i][1]+(bbox[i][3]/2))
areaFin[i]=bbox[i][2]*bbox[i][3]
if (abs(centerFin[i][0]-center[i][0])<50 and areaFin[i]>area[i]):
if(m==0):
m=1
marked[i]=marked[i]+1
# Tracking success
p1 = (int(bbox[i][0]), int(bbox[i][1]))
p2 = (int(bbox[i][0] + bbox[i][2]), int(bbox[i][1] + bbox[i][3]))
cv2.rectangle(frame, p1, p2, (100,0,0), 2, 1)
else:
# Tracking success
marked[i]=0
p1 = (int(bbox[i][0]), int(bbox[i][1]))
p2 = (int(bbox[i][0] + bbox[i][2]), int(bbox[i][1] + bbox[i][3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
prevBbox=[None] * len(tracker)
prevBbox=bbox
prevMark=[None] * len(tracker)
prevMark=marked
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 :
break
| true |
2a725a4366abec6d646a3e3df9c9f6c8b28746fb | Python | DanHefrman/stuff | /RANDOM/limnpy/limnpy/dashboard.py | UTF-8 | 1,705 | 2.5625 | 3 | [] | no_license | import csv, yaml, json
import os, logging
import datetime
from operator import itemgetter
from collections import Sequence, MutableSequence
import codecs
#import colorbrewer
import itertools
import pandas as pd
import pprint
import copy
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Dashboard(object):
default_dashboard = {
'name' : '',
'headline' : '',
'subhead' : '',
'tabs': []}
default_tab = {'name' : '',
'graph_ids' : []}
def __init__(self, id, name, headline = '', subhead='', tabs=None):
self.id = id
self.dashboard = copy.deepcopy(Dashboard.default_dashboard)
self.dashboard['name'] = name
self.dashboard['headline'] = headline
self.dashboard['subhead'] = subhead
self.dashboard['tabs'] = tabs if tabs is not None else []
def add_tab(self, name, graphs=[]):
tab = {
"name" : name,
"graph_ids" : [g.graph['slug'] for g in graphs]}
self.dashboard['tabs'].append(tab)
def add_graph(self, tab_name, graph):
if tab_name not in self['tabs']:
self.dashboard['tabs'].append(self.default_tab)
tab = [tab for tab in self.dashboard['tabs'] if tab['name'] == tab_name][0]
tab['graph_ids'].append(graph.graph['slug'])
def write(self, basedir='.'):
db_dir = os.path.join(basedir, 'dashboards')
if not os.path.exists(db_dir):
os.makedirs(db_dir)
db_path = os.path.join(db_dir, self.id + '.json')
json.dump(self.dashboard, open(db_path, 'w'), indent=2)
def __str__(self):
return json.dumps(self.dashboard, indent=2)
| true |
20606b644116cafa3503f00e2c20afb46ba053f5 | Python | rstaniek/taxi-dataset-analysis | /csv-splitter/tmanager.py | UTF-8 | 5,704 | 2.90625 | 3 | [] | no_license | import threading
import multiprocessing
import time
from datetime import datetime
#from utils import Executable
class ThreadRunException(Exception):
def __init__(self, message, errors=None):
super(Exception, self).__init__(message)
self.errors = errors
class ProcessThread(threading.Thread):
def __init__(self, threadID, name, file, callback):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.file = file
self.callback = callback
self.executable = None
self.method_args = None
def set_file(self, file):
#check if thread is not executing now
if not self.isAlive():
self.file = file
else:
raise ThreadRunException('Thread is currently running! Cannot change the source file.', self.name)
def method_set(self, value):
self.executable = value
def method_args_set(self, kwargs):
self.method_args = kwargs
def run(self):
if self.method_args is None:
self.method_args = dict()
self.method_args['file'] = self.file
self.method_args['thread'] = self.name
#self.executable = self.executable.__class__.__init__(self.executable.__class__)
#self.executable = Executable()
#execute the injected method with a parameters passed as a dictionary
self.executable.run(self.method_args)
#when the work is done
self.callback(self.threadID)
class ThreadManager:
THREAD_CHECK_INTERVAL = 0.5
def __init__(self, files, method_to_invoke=None, method_args=None, core_c=None, iterable_args=None):
print('Initliailzing thread manager...')
if core_c is None:
self.CORE_COUNT = int(multiprocessing.cpu_count())
else:
self.CORE_COUNT = core_c
self.avail_threads = list()
self.working_threads = list()
self.file_stack = files
self.method_to_invoke = method_to_invoke
if iterable_args is None:
self.iterable_args = None
else:
self.iterable_args = iterable_args
if method_args is None:
self.method_args = dict()
else:
self.method_args = method_args
#self.method_args['files'] = self.file_stack
for _i in range(self.CORE_COUNT):
self.avail_threads.append(
ProcessThread(_i,
'processThread-%d' % _i,
None,
self.__on_thread_finished__))
print('Thread Manager initialized.')
def set_method_to_invoke(self, method):
self.method_to_invoke = method
def set_method_args(self, kwargs):
self.method_args = kwargs
def set_iterable_args(self, args):
self.iterable_args = args
@property
def task_count(self):
return len(self.file_stack)
def __str__(self):
return '[{}] Process running...\nThread count: {}\nThreads active: {}\nTasks to process: {}'.format(str(datetime.now()), self.CORE_COUNT, len(self.working_threads), self.task_count)
#a callback invoked when a certain thread is finished with its tasks.
#sends back ID for further identification in the manager
def __on_thread_finished__(self, id):
#release a thread from the worker list and push it to idle list
thread = next(t for t in self.working_threads if id == t.threadID)
self.working_threads.remove(thread)
del thread
#reinstantiate a new thread with the same ID after deleting the previous one
self.avail_threads.append(ProcessThread(id, 'processThread-%d' % id, None, self.__on_thread_finished__))
print('[{}] Thread #{} has exited its process\nTHREADS IDLE: {}\nTHREADS ACTIVE: {}\nTasks left: {}'.format(str(datetime.now()), id, len(self.avail_threads), len(self.working_threads), self.task_count))
def __assign_tasks__(self):
#while there are still files on the stack
if len(self.file_stack) > 0:
#while there are still threads available
while len(self.avail_threads) > 0 and self.task_count > 0:
thread = self.avail_threads.pop()
thread.method_set(self.method_to_invoke)
if self.iterable_args is not None:
arg = self.iterable_args.pop()
self.method_args['iterable'] = arg
thread.method_args_set(self.method_args)
try:
#pop a file from a stack and assign it to a process
file = self.file_stack.pop()
thread.set_file(file)
print('File assigned to thread: {}'.format(thread.name))
except ThreadRunException as ex:
print(ex)
#start the thread
thread.start()
#add it to the list of working threads
self.working_threads.append(thread)
def run(self):
#run a loop until we processed all the requests
while self.task_count > 0:
#print info status
#if int(datetime.now().strftime('%S')) % 30 == 0:
#print(self.__str__())
time.sleep(ThreadManager.THREAD_CHECK_INTERVAL)
#assign tasks to the lazy idle bastards
self.__assign_tasks__()
print('All tasks have been assigned')
print(self.__str__())
while len(self.working_threads) > 0:
time.sleep(ThreadManager.THREAD_CHECK_INTERVAL)
print('All threads have finished')
print(self.__str__())
| true |
6672a6e77f872e38fe8c0518ecf0f89cd807cb7a | Python | BenjaminUJun/slick | /pox/ext/slick/elements/DnsDpi/loadcache.py | UTF-8 | 978 | 2.90625 | 3 | [] | no_license | import glob
from collections import defaultdict
class LoadCache:
"""LoadCache for laoding the blocked domain names."""
def __init__(self):
DNS_BLOCK_LIST_DIR = "/tmp/blacklists" # Make it programmable.
self.data = defaultdict(list) # A dictionary with Domain Name as key and IP address list as resolved addresses.
self.block_dir = DNS_BLOCK_LIST_DIR
self.block_domain_dict = {}
def _list_files(self):
file_list = []
for infile in glob.glob(self.block_dir+"/*/domains"):
file_list.append(infile)
if (len(file_list) == 0 ):
raise Exception,"ERROR: Could not find the domain names to block"
return file_list
# This function loads the blacklists file.
def load_files(self):
for item in self._list_files():
f = open(item,'r')
for domain in f.readlines():
self.block_domain_dict[domain.rstrip()] = True
def is_blocked_domain(self,domain_name):
if(self.block_domain_dict.has_key(domain_name)):
return True
else:
return False
| true |
04c5817fa8347b5b6d5c97ae68fdba75e212c873 | Python | V1cK1m/The-Group-Forex | /Forex/simple_forecast.py | UTF-8 | 7,213 | 3.671875 | 4 | [] | no_license | # import necessary libraries
import sys
import matplotlib.pyplot as plt
import pandas as pd
# import necessary modules
sys.path.append("/home/excviral/Pycharm/PycharmProjects/Adaptive-forex-forecast/Adaptive filters/")
sys.path.append("/home/excviral/Pycharm/PycharmProjects/Adaptive-forex-forecast/Feature extractors/")
from lms import lms
from feature_extractior_functions import simple_amv_extractor
from normalization_functions import simple_normalize
# import dataset
dataset = pd.read_csv('data.csv')
data = list(dataset['Price'].values)
# =====================================================================================================================
# ===== Helper functions ==============================================================================================
# =====================================================================================================================
def predict(inputs, weights):
'''
Model to predict the exchange rate based on inputs and weights
This function takes in features as input, in this case the features are actual value, mean value, and variance, and
then the features are multiplied with its respective weight and added up to compute the predicted value.
Mathematically: y_k = a*w0 + m*w1 + v*w2
:param inputs: (list) containing features of the dataset
:param weights: (list) containing weights for each feature of the dataset
:return: (float) predicted outcome based on input and weights
'''
return sum([i * j for i, j in zip(inputs, weights)])
def plot_convergence_characteristics(errors):
'''
Function to plot the convergence characteristics viz. plot of (error)^2 v/s pattern number
Convergence characteristics shows the rate of convergence of the error of prediction v/s actual value, to 0.
:param errors: (list) containing errors corresponding to each pattern
:return: none
'''
errors_squared = [i * i for i in errors]
plt.plot(errors_squared)
plt.xlabel('Pattern number')
plt.ylabel('(Error)^2')
plt.show()
# =====================================================================================================================
# ===== Forecast Algorithm ============================================================================================
# =====================================================================================================================
# first we normalize the data, we use simple normalization technique of dividing each value in data by max value of data
n_data = simple_normalize(data)
# define window size
window_size = 10
# extract the feature-patterns (a,m,v,tv) from the data, and store it in a list
feature_table = simple_amv_extractor(window_size, n_data[:(len(n_data) - 1)])
# We will be using 80% of the feature-patterns from the feature table for training the model, and the remaining 20% of
# the feature-patters will be used for testing the model
# separating training data and test data and storing them in respective lists for later use
training_data = feature_table[:int(len(feature_table) * 0.8)]
testing_data = feature_table[int(len(feature_table) * 0.8):]
# Now that the data is ready, we train our model to compute optimum weights
# Model trainer
def train_model(training_data, mu):
'''
This function trains the prediction model to find the optimum weights for which prediction error is minimum
Algorithm: Initially the model starts with weights = [0,0,0], the model then predicts some value, then it computes
error vs the actual value/desired value and adjusts the weights accordingly. This is repeated until all patterns in
the training set are exhausted. At the end, the error should have converged to zero, this can be seen in the
convergence characteristics plot generated at the end of training.
NOTE: The convergence of error is highly dependent on the choice of 'mu' - the convergence coefficient.
Theoretically, its value should lie between 0 and 1, when mu is closer towards zero learning rate will be slow,
but accuracy will be more, when it is closer to 1, learning rate will be faster, but accuracy will be poor.
:param training_data: (list) containing features selected to train the model
:param mu: (float) convergence coeffecient
:return: (list) containing optimized weights, which can be used for prediction
'''
# This list will store weights, initially the weights will be zeros
weights = [0, 0, 0]
# This list will store errors corresponding to each pattern
errors = []
# This loop optimizes the weights, such that error converges to zero
for i in training_data:
# Inputs to the predictor model [a, m, v]
x_k = i[:len(i) - 1]
# Desired value or target value, that is to be predicted
d_k = i[len(i) - 1]
# Predict the output price based on the input and current weights
y_k = predict(x_k, weights)
# Compare the predicted price to the desired price, and compute the error
e_k = d_k - y_k
# Store the error to the list
errors.append(e_k)
# Compute new weights based on the previous weights, mu, previous input, previous error using lms algorithm
weights = lms(weights, mu, x_k, e_k)
plot_convergence_characteristics(errors)
return weights
weights = train_model(training_data, 0.000195)
# Now that we have computed the optimum weights, we test it against the feature-patterns that we had stored earlier
# start testing
def test_model(testing_data, weights):
'''
This function tests the performance of the model so that we can know the accuracy of the model
:param testing_data: (list) containing features that are to be tested
:param weights: (list) containing optimum weights generated during training
:return: none
'''
# Lists to store errors, desired/target value, and predicted value for each test-pattern
errors = []
desired = []
predicted = []
# This loop predicts a value for each pattern, computes error against desired value and stores them in lists above
for i in testing_data:
# Inputs to the predictor model
x_k = i[:len(i) - 1]
# Desired value or target value, that is to be predicted
d_k = i[len(i) - 1]
# Predict the output price based on the input and optimum weights generated during training
y_k = predict(x_k, weights)
# Store the predicted value and desired value to the respectivs lists
predicted.append(y_k)
desired.append(d_k)
# Compute the error of prediction against the desired value
e_k = d_k - y_k
# Store the error to the list
errors.append(e_k)
# Plot the predicted and desired values to compare the error
plt.plot(desired, 'g-', label="Desired Values")
plt.plot(predicted, 'r-', label="Predicted Values")
plt.xlabel('Pattern number')
plt.ylabel('Normalized Exchange rate')
plt.legend(loc='best')
plt.show()
test_model(testing_data, weights)
# To predict exchange rate against a new feature after training, simply plug the features into 'predict' function
| true |
b32a2e9e81eb335e8e522be23a5910f47b3f699f | Python | breylee/PythonPractice | /04-Sequences.py | UTF-8 | 903 | 3.90625 | 4 | [] | no_license | #sequences
#lists
print("list demo")
x = [5,12,13,200]
print(x)
x.append(-2) #[5,12,13,200,-2]
print(x)
del x[2] #[5,12,200,-2]
print(x)
z = x[1:3] #[12,200]
print(z)
yy = [3,4,5,12,13]
print(yy[3:]) #[12,13]
print(yy[:3]) #[3,4,5]
print(yy[-1]) #[13]
x.insert(2,28) #[5,12,28,200,-2]
print(x)
print(28 in x) #True
print(13 in x) #False
print(x.index(28)) #2
x.remove(200)
print(x) #[5,12,28,-2] #removes first only
w = x + [1,"ghi"] #[5,12,28,-2,1,'ghi']
print(w)
qz = 3*[1,2,3] #[1,2,3,1,2,3,1,2,3]
print(qz)
x=[1,2,3] #[1,2,3]
print(x)
x.extend([4,5]) #[1,2,3,4,5]
print(x)
g = [1,2,3]
g.append([4,5]) #[1,2,3,[4,5]]
print(g)
y = x.pop(0) #1
print(y)
print(x) #[2,3,4,5]
t = [5,12,13]
t.reverse() #[13,12,5]
print(t)
#append vs extend
#append [ 1 , 2 , 3 , 8 , 88 , [ 8 , 8 8] ]
#extend [ 1 , 2 , 3 , 8 , 88]
#swapping
x = 5
y = 10
z = 15
print(x,y,z)
[x,y,z] = [y,z,x]
print(x,y,z) | true |
3e51f3048d015aed080e9d1d80e5901f5d4d5612 | Python | JConwayAWT/pgss15cb | /spec/other/Testing2.py | UTF-8 | 3,664 | 2.703125 | 3 | [] | no_license |
#updated version, using better data structure
import os
from lottka_volterra_sim import *
class Result():
def __init__(self, result = False, name = None, description = None, status = None):
self.result = result
self.name = name
self.description = description
self.status = status
def run_test( self, test_function ):
self.result = test_function()
if self.result.result == True:
print self.result.name
print "PASS / "
else:
print self.result.name
print self.result.description
print self.result.status
def check_for_negatives(list_molecule_numbers, result_object):
counter = 0
for number in list_molecule_numbers:
if number < 0:
result_object.result = False
result_object.status = "FAIL: Test failed at iteration {} in output {}.".format(counter, list_molecule_numbers)
else:
result_object.result = True
result_object.status = "PASS / "
counter = counter + 1
return result_object
def neg_output():
result = Result()
result.name = "Negative Output"
result.description = "Checks to see if the output has negative numbers. If they are negative, the output will describe where."
main()
LottkaVolterraR = open('R.dat', 'r')
lv_textR = LottkaVolterraR.read()
LottkaVolterraW = open('W.dat', 'r')
lv_textW = LottkaVolterraW.read()
list_molecule_numbersR = []
list_molecule_numbersW = []
for line in LottkaVolterraR:
all_dataR = line.split()
molecule_numbersR = float(all_data[-1])
list_molecule_numbersR.append(molecule_numbersR)
for line in LottkaVolterraW:
all_dataW = line.split()
molecule_numbersW = float(all_dataW[-1])
list_molecule_numbersW.append(molecule_numbersW)
result = check_for_negatives(list_molecule_numbersR, result)
if result.result == False:
return result
result = check_for_negatives(list_molecule_numbersW, result)
if result.result == False:
return result
else:
result.result = True
result.status = "PASS / "
return result
r = Result()
r.run_test( neg_output )
# list_molecule_numbers = []
# for line in LottkaVolterra:
# all_data = line.split()
# molecule_numbers = float(all_data[-1])
# list_molecule_numbers.append(molecule_numbers)
# def check_for_negatives(list_molecule_numbers):
# counter = 0
# for number in list_molecule_numbers:
# if number < 0:
# print "Test failed at {}".format(counter)
# counter = counter + 1
# print check_for_negatives(list_molecule_numbers)
# except Exception, e:
# print str(e)
# def closeFile():
# try:
# os.system('TASKKILL /F /IM python.exe')
# except Exception, e:
# print str(e)
# LottkaVolterra = open('C:\Users\Me\Desktop\R.dat', 'r')
# print LottkaVolterra
# list_molecule_numbers = []
# for line in LottkaVolterra:
# all_data = line.split()
# molecule_numbers = float(all_data[-1])
# list_molecule_numbers.append(molecule_numbers)
# def check_for_negatives(list_molecule_numbers):
# counter = 0
# for number in list_molecule_numbers:
# if number < 0:
# print "Test failed at {}".format(counter)
# counter = counter + 1
# print check_for_negatives(list_molecule_numbers)
| true |
9c0360b9dd7512c46916dd6a12d528aefe3572d4 | Python | CoreSheep/python | /python/daySixteen/ArgsAndKwargs.py | UTF-8 | 751 | 3.546875 | 4 | [] | no_license | """
using **args and **kwargs in python function definition
"""
def args_test(Id, *args):
print("Student Id: %d" % Id)
print("profile:")
Index = 1
for arg in args:
print("info{}: {}".format(Index, arg))
Index += 1
print()
def kwargs_test(Id, **kwargs):
print("the first param is: ", Id)
for key in kwargs:
print("%s: %s" % (key, kwargs[key]))
if __name__ == "__main__":
# both list and tuple are fine
profile = ('sheepcore', 'class 1606', 'male', 'handsome')
args_test(20164586, *profile)
args_test(20161111, 'marshall', 'class 1508', 'male')
profile2 = {'name': 'Anna Hathaway', 'class': 'class 1709', 'gender':
'female'}
kwargs_test(Id=20184434, **profile2)
| true |
8bf0bd75b696d3993b527653fbdc80665e13f397 | Python | Vishvas-Enjamuri/MQTools | /mqtools/examples/testit.py | UTF-8 | 827 | 2.625 | 3 | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import sys
from ruamel.yaml import YAML
import json
def dumpData(data):
if "Q_NAME" in data:
qname = data["Q_NAME"]
fn = "./queues/"+qname+".yml"
with open(fn, 'w') as outfile:
print("file name",fn)
yaml.dump(data, outfile)
elif "CHANNEL_NAME" in data:
name = data["CHANNEL_NAME"]
fn = "./channel/"+name+".yml"
with open(fn, 'w') as outfile:
print("file name",fn)
yaml.dump(data, outfile)
else: yaml.dump(data, sys.stdout)
yaml=YAML()
print (sys.version_info)
for x in range(100):
line = sys.stdin.readline()
if not line: break
# print("==line==",line,"!")
j = json.loads(line)
print(json.dumps(j, indent=2,separators=(',', ':')))
| true |
100fe3cf99cb94ed159c3034105e417041269a3d | Python | robertodevpython/analytics-scripts | /v2/etl/user.py | UTF-8 | 1,455 | 2.5625 | 3 | [] | no_license | import sqlalchemy as db
from utils import log
from v2.etl import ETL
from v2.models import User
class ETLUser(ETL):
def extract(self):
from resources.database import connection
log.info("Carregando usuários do sistema...")
statement = db.sql.text(
"""
SELECT
core_user.id as id
, core_user.email as email
FROM core_user
WHERE
date_joined >= :created
ORDER BY
date_joined DESC
"""
)
self.data = connection.execute(statement, created=self.date_limit)
def transform(self):
pass
def load(self):
from v2.database import session
users = [item for item in self.data]
loaded_ids = [item["id"] for item in users]
log.info(f"Verificando quais dos {len(users)} usuários existem no analytics...")
qs = session.query(User.id).filter(User.id.in_(loaded_ids))
existing_ids = [id[0] for id in qs.all()]
log.info(f"Ignorando {len(existing_ids)} registros existentes...")
items_to_add = []
for item in users:
if item["id"] not in existing_ids:
items_to_add.append(User(id=item["id"], email=item["email"]))
log.info(f"Inserindo {len(items_to_add)} novos usuários no analytics...")
session.bulk_save_objects(items_to_add)
session.commit()
| true |
6debb860bc24ae0b8b0d2ee6daa9c8c68863a6f0 | Python | roobiuli/Serv_cl | /Server.py | UTF-8 | 1,414 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python
import socket
import sys
import os
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_port = int(sys.argv[1])
server_address = ('localhost', server_port)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
sock.listen(1)
def listCurrentDir():
data = os.listdir('.')
stringData = ', '.join(data)
connection.sendall(stringData)
def catFile(data):
filePath = data.split()
if os.path.isfile(filePath[1]):
print 'Sending', filePath[1]
fileContent = open(filePath[1], 'rb')
l = fileContent.read(1024)
while (l):
connection.send(l)
l = fileContent.read(1024)
fileContent.close()
print "Done Sending"
else:
connection.sendall('File not found')
##
while True:
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'A new connection established:', client_address
while True:
data = connection.recv(1024)
if data == 'exit':
connection.sendall('exit')
connection.close()
break
#elif data == 'ls':
elif data == 'ls':
listCurrentDir()
continue
elif 'cat ' in data:
catFile(data)
continue
else:
connection.sendall('404')
continue
finally:
print 'Server closing'
break | true |
7304d48b9f4d7f8efc5979cf83741e2f32c0e1bf | Python | Bondzio/rmqClient | /util/functions.py | UTF-8 | 632 | 2.671875 | 3 | [] | no_license | import re
def is_cidr(str_cidr):
pattern='^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))$'
m=re.match(pattern,str_cidr)
if m == None:
return False
else:
return True
def is_ipv4(str_ipv4):
pattern='^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$'
m=re.match(pattern,str_ipv4)
if m == None:
return False
else:
return True
if __name__ == "__main__":
print is_cidr('192.168.0.0/24')
print is_ipv4('192.167.1.333')
| true |
8425b5e7701ada1d44e8d79627e24b5daddbb0f6 | Python | takeshisToCoding/Covid19-CSSE | /graph_covid.py | UTF-8 | 2,756 | 2.90625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from scipy.signal import savgol_filter
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Graph country curves.')
parser.add_argument('--country', action='store',
default='Mexico',
help='Which country to plot, defaul Mexico')
parser.add_argument('--file', action='store',
help='Preprocessed csv dataset file',
required=True)
parser.add_argument('--log', action='store_true',
help='Use logarithmic scale',
default=False)
parser.add_argument('--infected', action='store_true', default=False,
help='Plot infected, instead of confirmed cases ')
parser.add_argument('--no_plot', action='store_true', default=False,
help='Do not plot anything')
args = parser.parse_args()
print("Reading file: " + args.file)
try:
covid_df = pd.read_csv(args.file)
except pd.errors.ParserError:
print("Error reading file")
exit(1)
except FileNotFoundError:
print("Error file "+ args.file + " Not found")
exit(1)
data_cols = ['Confirmed', 'Deaths', 'Recovered']
model_cols = ['Infected', 'Deaths', 'Recovered']
if args.infected:
plot_cols = model_cols
else:
plot_cols = data_cols
# covid_df.set_index('Date',inplace=True)
covid_df['Infected'] = covid_df['Confirmed'] - \
covid_df['Deaths'] - covid_df['Recovered']
country_df = covid_df.loc[covid_df['Country'] == args.country]
country_df = country_df.loc[country_df['Confirmed'] > 0]
# Some countries have more than one region so use groupby
coalesced_df = country_df.groupby('Date').sum()
coalesced_diff = coalesced_df.diff()
# First difference is noisy, use Savitzky–Golay to smooth it out
data_np = coalesced_diff[plot_cols[0]].to_numpy()
# print(data_np)
filt_data = savgol_filter(data_np, 13, 2, axis=0)
coalesced_diff['Smoothed'] = filt_data
# plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))
coalesced_df[plot_cols].plot(logy=args.log)
cases_fig = plt.gcf()
cases_fig.autofmt_xdate()
plt.title(args.country + " cases")
ext_cols = plot_cols + ['Smoothed']
coalesced_diff[ext_cols].plot(logy=args.log)
diff_fig = plt.gcf()
diff_fig.autofmt_xdate()
plt.title(args.country + " new cases")
if not args.no_plot:
plt.show()
else:
cases_fig.savefig(args.country + '_cases.png')
diff_fig.savefig(args.country + '_new_cases.png')
| true |
689df451760b70af53f2f055bf16a965353e7ad8 | Python | KalinHar/OOP-Python-SoftUni | /exams/apr2020/tests/test_battlefield.py | UTF-8 | 2,558 | 2.875 | 3 | [] | no_license | from project.battle_field import BattleField
from project.player.beginner import Beginner
from project.player.advanced import Advanced
from project.card.trap_card import TrapCard
from project.card.magic_card import MagicCard
from unittest import TestCase, main
class TestBattleField(TestCase):
def setUp(self):
self.b = BattleField
self.att_b = Beginner("atb")
self.att_a = Advanced("ata")
self.en_b = Beginner("enb")
self.en_a = Advanced("ena")
def test_fight_dead_attacker(self):
self.att_b.health = 0
with self.assertRaises(Exception) as ex:
self.b.fight(self.att_b, self.en_a)
self.assertEqual("Player is dead!", str(ex.exception))
def test_fight_dead_enemy(self):
self.en_a.health = 0
with self.assertRaises(Exception) as ex:
self.b.fight(self.att_b, self.en_a)
self.assertEqual("Player is dead!", str(ex.exception))
def test_fight_dead_both(self):
self.att_b.health = 0
self.en_a.health = 0
with self.assertRaises(Exception) as ex:
self.b.fight(self.att_b, self.en_a)
self.assertEqual("Player is dead!", str(ex.exception))
def test_fight_with_attacker_lose(self):
cm = MagicCard("mag")
ct = TrapCard("tra")
self.att_b.card_repository.cards = [ct]
self.en_a.card_repository.cards = [cm, ct]
with self.assertRaises(Exception) as ex:
self.b.fight(self.att_b, self.en_a)
self.assertEqual("Player's health bonus cannot be less than zero.", str(ex.exception))
def test_fight_with_enemy_lose(self):
cm = MagicCard("mag")
ct = TrapCard("tra")
self.att_a.card_repository.cards = [ct, cm, ct]
self.en_b.card_repository.cards = [cm]
with self.assertRaises(Exception) as ex:
self.b.fight(self.att_a, self.en_b)
self.assertEqual("Player's health bonus cannot be less than zero.", str(ex.exception))
def test_fight_correct(self):
cm = MagicCard("mag")
ct = TrapCard("tra")
self.att_a.card_repository.cards = [cm]
self.en_b.card_repository.cards = [ct]
self.assertEqual(250, self.att_a.health)
self.assertEqual(50, self.en_b.health)
self.b.fight(self.att_a, self.en_b)
self.assertEqual(180, self.att_a.health)
self.assertEqual(90, self.en_b.health)
self.assertEqual(150, ct.damage_points)
self.assertEqual(5, cm.damage_points)
if __name__ == "__main__":
main()
| true |
0d7b630647a8c73c6176ebc7c61dd0a713336bfb | Python | tomlxq/ps_py | /huawei/test_FindMinNum.py | UTF-8 | 363 | 2.546875 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from FindMinNum import FindMinNum
class test_FindMinNum(unittest.TestCase):
def test_func(self):
self.assertEquals("0", FindMinNum.func(self, "10", 1))
self.assertEquals("200", FindMinNum.func(self, "10200", 1))
self.assertEquals("131", FindMinNum.func(self, "2615371", 4))
| true |
cd008dfa45e0db6d06f111dc6b5864c80aa07cb7 | Python | Cheribat/6.189Python | /assignment/hw2/OPT2_1.py | UTF-8 | 340 | 4.09375 | 4 | [] | no_license | # author : Will Fu
# date : 2016-09-09
def list_int(a_list):
""" Return the elements of the list
of type int.
"""
new_list = []
for value in a_list:
if isinstance(value, int):
new_list.append(value)
return new_list
list1 = [0, 'A', 12.5, 8, "hello", 583]
print(list_int(list1))
| true |
3ca09dc5d7cf73755ac138160008e88756ad8a09 | Python | DaMacho/data-science-school-5th | /day20 advanced topics/argparse_example/argparse_ex1.py | UTF-8 | 353 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# help text 예제
# 실행 인자를 주지 않을 경우, 자동으로 help text 안내
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("num", help="number you want to print from 1")
args = parser.parse_args()
num = int(args.num)
for i in range(1, num+1):
print i,
# python argparse_ex1.py 10
| true |
20ecb45b6fc385b2769a550106036809c1d46e36 | Python | matthewssullivan/CybersecurityFinalProject | /returnPortDescription.py | UTF-8 | 1,108 | 3.21875 | 3 | [] | no_license | #CSCI 5742
#Cybersecurity Programming
#Final Project
#Portscanner interface with CVE
#Jonathan Trejo and Matt Sullivan
#11/27/2018
#returnPortDescription.py
import csv
def returnPortDescription(portNum): #function to return the description from the list of ports
with open("service-names-port-numbers (1).csv", mode = 'r') as f: #opens the file
csvDict=csv.DictReader(f) #reads it in as a dictionary
for row in csvDict: #step through the file
workNums=row["Port Number"].split("-") #checks if it's a range
if len(workNums) <2: #if it is not
if portNum == int(row["Port Number"]): #return the description
return row["Description"]
else: #if it is a range
if int(workNums[0]) <= portNum <= int(workNums[1]): #check and see if it's in range
return row["Description"] #return the description
f.close()
| true |
61368c0ece44a700ea1bae0040cccdee858389fb | Python | timokoch/porespy | /porespy/filters/__funcs__.py | UTF-8 | 26,839 | 2.859375 | 3 | [] | no_license | from collections import namedtuple
import scipy as sp
import scipy.ndimage as spim
import scipy.spatial as sptl
from scipy.signal import fftconvolve
from tqdm import tqdm
from numba import jit
from skimage.segmentation import clear_border
from skimage.morphology import ball, disk, square, cube
from skimage.morphology import reconstruction, watershed
from porespy.tools import randomize_colors
from porespy.tools import get_border, extend_slice
from porespy.tools import fftmorphology
def snow_partitioning(im, r_max=4, sigma=0.4, return_all=False):
r"""
This function partitions the void space into pore regions using a
marker-based watershed algorithm. The key to this function is that true
local maximum of the distance transform are found by trimming various
types of extraneous peaks.
The SNOW network extraction algorithm (Sub-Network of an Over-segmented
Watershed) was designed to handle to perculiarities of high porosity
materials, but it applies well to other materials as well.
Parameters
----------
im : array_like
Can be either (a) a boolean image of the domain, with ``True``
indicating the pore space and ``False`` elsewhere, or (b) a distance
transform of the domain calculated externally by the user. Option (b)
is faster if a distance transform is already available.
r_max : scalar
The radius of there spherical structuring element to use in the Maximum
filter stage that is used to find peaks. The default is 4
sigma : scalar
The standard deviation of the Gaussian filter used in step 1. The
default is 0.4. If 0 is given then the filter is not applied, which is
useful if a distance transform is supplied as the ``im`` argument that
has already been processed.
return_all : boolean (default is False)
If set to ``True`` a named tuple is returned containing the original
image, the distance transform, the filtered peaks, and the final
pore regions.
Returns
-------
An image the same shape as ``im`` with the void space partitioned into
pores using a marker based watershed with the peaks found by the
SNOW algorithm [1]. If ``return_all`` is ``True`` then a **named tuple**
is returned with the following attribute:
* ``im``: The binary image of the void space
* ``dt``: The distance transform of the image
* ``peaks``: The peaks of the distance transform after applying the
steps of the SNOW algorithm
* ``regions``: The void space partitioned into pores using a marker
based watershed with the peaks found by the SNOW algorithm
References
----------
[1] Gostick, J. "A versatile and efficient network extraction algorithm
using marker-based watershed segmenation". Physical Review E. (2017)
"""
tup = namedtuple('results', field_names=['im', 'dt', 'peaks', 'regions'])
im = im.squeeze()
print('_'*60)
print("Beginning SNOW Algorithm")
if im.dtype == 'bool':
print('Peforming Distance Transform')
dt = spim.distance_transform_edt(input=im)
else:
dt = im
im = dt > 0
tup.im = im
tup.dt = dt
if sigma > 0:
print('Applying Gaussian blur with sigma =', str(sigma))
dt = spim.gaussian_filter(input=dt, sigma=sigma)
peaks = find_peaks(dt=dt)
print('Initial number of peaks: ', spim.label(peaks)[1])
peaks = trim_saddle_points(peaks=peaks, dt=dt, max_iters=500)
print('Peaks after trimming saddle points: ', spim.label(peaks)[1])
peaks = trim_nearby_peaks(peaks=peaks, dt=dt)
peaks, N = spim.label(peaks)
print('Peaks after trimming nearby peaks: ', N)
tup.peaks = peaks
regions = watershed(image=-dt, markers=peaks, mask=dt > 0)
regions = randomize_colors(regions)
if return_all:
tup.regions = regions
return tup
else:
return regions
def find_peaks(dt, r=4, footprint=None):
r"""
Returns all local maxima in the distance transform
Parameters
----------
dt : ND-array
The distance transform of the pore space. This may be calculated and
filtered using any means desired.
r : scalar
The size of the structuring element used in the maximum filter. This
controls the localness of any maxima. The default is 4 voxels.
footprint : ND-array
Specifies the shape of the structuring element used to define the
neighborhood when looking for peaks. If none is specified then a
spherical shape is used (or circular in 2D).
Returns
-------
An ND-array of booleans with ``True`` values at the location of any local
maxima.
Notes
-----
It is also possible ot the ``peak_local_max`` function from the
``skimage.feature`` module as follows:
``peaks = peak_local_max(image=dt, min_distance=r, exclude_border=0,
indices=False)``
This automatically uses a square structuring element which is significantly
faster than using a circular or spherical element.
"""
dt = dt.squeeze()
im = dt > 0
if footprint is None:
if im.ndim == 2:
footprint = disk
elif im.ndim == 3:
footprint = ball
else:
raise Exception("only 2-d and 3-d images are supported")
mx = spim.maximum_filter(dt + 2*(~im), footprint=footprint(r))
peaks = (dt == mx)*im
return peaks
def reduce_peaks(peaks):
r"""
Any peaks that are broad or elongated are replaced with a single voxel
that is located at the center of mass of the original voxels.
Parameters
----------
peaks : ND-image
An image containing True values indicating peaks in the distance
transform
Returns
-------
An array with the same number of isolated peaks as the original image, but
fewer total voxels.
Notes
-----
The center of mass of a group of voxels is used as the new single voxel, so
if the group has an odd shape (like a horse shoe), the new voxel may *not*
lie on top of the original set.
"""
if peaks.ndim == 2:
strel = square
else:
strel = cube
markers, N = spim.label(input=peaks, structure=strel(3))
inds = spim.measurements.center_of_mass(input=peaks,
labels=markers,
index=sp.arange(1, N))
inds = sp.floor(inds).astype(int)
# Centroid may not be on old pixel, so create a new peaks image
peaks = sp.zeros_like(peaks, dtype=bool)
peaks[tuple(inds.T)] = True
return peaks
def trim_saddle_points(peaks, dt, max_iters=10):
r"""
Removes peaks that were mistakenly identified because they lied on a
saddle or ridge in the distance transform that was not actually a true
local peak.
Parameters
----------
peaks : ND-array
A boolean image containing True values to mark peaks in the distance
transform (``dt``)
dt : ND-array
The distance transform of the pore space for which the true peaks are
sought.
max_iters : int
The maximum number of iterations to run while eroding the saddle
points. The default is 10, which is usually not reached; however,
a warning is issued if the loop ends prior to removing all saddle
points.
Returns
-------
An image with fewer peaks than was received.
"""
if dt.ndim == 2:
from skimage.morphology import square as cube
else:
from skimage.morphology import cube
labels, N = spim.label(peaks)
slices = spim.find_objects(labels)
for i in range(N):
s = extend_slice(s=slices[i], shape=peaks.shape, pad=10)
peaks_i = labels[s] == i+1
dt_i = dt[s]
im_i = dt_i > 0
iters = 0
peaks_dil = sp.copy(peaks_i)
while iters < max_iters:
iters += 1
peaks_dil = spim.binary_dilation(input=peaks_dil,
structure=cube(3))
peaks_max = peaks_dil*sp.amax(dt_i*peaks_dil)
peaks_extended = (peaks_max == dt_i)*im_i
if sp.all(peaks_extended == peaks_i):
break # Found a true peak
elif sp.sum(peaks_extended*peaks_i) == 0:
peaks_i = False
break # Found a saddle point
peaks[s] = peaks_i
if iters >= max_iters:
print('Maximum number of iterations reached, consider' +
'running again with a larger value of max_iters')
return peaks
def trim_nearby_peaks(peaks, dt):
r"""
Finds pairs of peaks that are nearer to each other than to the solid phase,
and removes the peak that is closer to the solid.
Parameters
----------
peaks : ND-array
A boolean image containing True values to mark peaks in the distance
transform (``dt``)
dt : ND-array
The distance transform of the pore space for which the true peaks are
sought.
Returns
-------
An array the same size as ``peaks`` containing a subset of the peaks in
the original image.
Notes
-----
Each pair of peaks is considered simultaneously, so for a triplet of peaks
each pair is considered. This ensures that only the single peak that is
furthest from the solid is kept. No iteration is required.
"""
if dt.ndim == 2:
from skimage.morphology import square as cube
else:
from skimage.morphology import cube
peaks, N = spim.label(peaks, structure=cube(3))
crds = spim.measurements.center_of_mass(peaks, labels=peaks,
index=sp.arange(1, N+1))
crds = sp.vstack(crds).astype(int) # Convert to numpy array of ints
# Get distance between each peak as a distance map
tree = sptl.cKDTree(data=crds)
temp = tree.query(x=crds, k=2)
nearest_neighbor = temp[1][:, 1]
dist_to_neighbor = temp[0][:, 1]
del temp, tree # Free-up memory
dist_to_solid = dt[list(crds.T)] # Get distance to solid for each peak
hits = sp.where(dist_to_neighbor < dist_to_solid)[0]
# Drop peak that is closer to the solid than it's neighbor
drop_peaks = []
for peak in hits:
if dist_to_solid[peak] < dist_to_solid[nearest_neighbor[peak]]:
drop_peaks.append(peak)
else:
drop_peaks.append(nearest_neighbor[peak])
drop_peaks = sp.unique(drop_peaks)
# Remove peaks from image
slices = spim.find_objects(input=peaks)
for s in drop_peaks:
peaks[slices[s]] = 0
return (peaks > 0)
def find_disconnected_voxels(im, conn=None):
r"""
This identifies all pore (or solid) voxels that are not connected to the
edge of the image. This can be used to find blind pores, or remove
artifacts such as solid phase voxels that are floating in space.
Parameters
----------
im : ND-image
A Boolean image, with True values indicating the phase for which
disconnected voxels are sought.
conn : int
For 2D the options are 4 and 8 for square and diagonal neighbors, while
for the 3D the options are 6 and 26, similarily for square and diagonal
neighbors. The default is max
Returns
-------
An ND-image the same size as ``im``, with True values indicating voxels of
the phase of interest (i.e. True values in the original image) that are
not connected to the outer edges.
Notes
-----
The returned array (e.g. ``holes``) be used to trim blind pores from
``im`` using: ``im[holes] = False``
"""
if im.ndim == 2:
if conn == 4:
strel = disk(1)
elif conn in [None, 8]:
strel = square(3)
elif im.ndim == 3:
if conn == 6:
strel = ball(1)
elif conn in [None, 26]:
strel = cube(3)
labels, N = spim.label(input=im, structure=strel)
holes = clear_border(labels=labels) > 0
return holes
def fill_blind_pores(im):
r"""
Fills all pores that are not connected to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
A version of ``im`` but with all the disconnected pores removed.
See Also
--------
find_disconnected_voxels
"""
holes = find_disconnected_voxels(im)
im[holes] = False
return im
def trim_floating_solid(im):
r"""
Removes all solid that that is not attached to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
A version of ``im`` but with all the disconnected solid removed.
See Also
--------
find_disconnected_voxels
"""
holes = find_disconnected_voxels(~im)
im[holes] = True
return im
def trim_nonpercolating_paths(im, inlet_axis=0, outlet_axis=0):
r"""
Removes all nonpercolating paths between specified edges
This function is essential when performing transport simulations on an
image, since image regions that do not span between the desired inlet and
outlet do not contribute to the transport.
Parameters
----------
im : ND-array
The image of the porous material with ```True`` values indicating the
phase of interest
inlet_axis : int
Inlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
outlet_axis : int
Outlet axis of boundary condition. For three dimensional image the
number ranges from 0 to 2. For two dimensional image the range is
between 0 to 1.
Returns
-------
A copy of ``im`` but with all the nonpercolating paths removed
See Also
--------
find_disconnected_voxels
trim_floating_solid
trim_blind_pores
"""
im = trim_floating_solid(~im)
labels = spim.label(~im)[0]
inlet = sp.zeros_like(im, dtype=int)
outlet = sp.zeros_like(im, dtype=int)
if im.ndim == 3:
if inlet_axis == 0:
inlet[0, :, :] = 1
elif inlet_axis == 1:
inlet[:, 0, :] = 1
elif inlet_axis == 2:
inlet[:, :, 0] = 1
if outlet_axis == 0:
outlet[-1, :, :] = 1
elif outlet_axis == 1:
outlet[:, -1, :] = 1
elif outlet_axis == 2:
outlet[:, :, -1] = 1
if im.ndim == 2:
if inlet_axis == 0:
inlet[0, :] = 1
elif inlet_axis == 1:
inlet[:, 0] = 1
if outlet_axis == 0:
outlet[-1, :] = 1
elif outlet_axis == 1:
outlet[:, -1] = 1
IN = sp.unique(labels*inlet)
OUT = sp.unique(labels*outlet)
new_im = sp.isin(labels, list(set(IN) ^ set(OUT)), invert=True)
im[new_im == 0] = True
return ~im
def trim_extrema(im, h, mode='maxima'):
r"""
This trims local extrema in greyscale values by a specified amount,
essentially decapitating peaks or flooding valleys, or both.
Parameters
----------
im : ND-array
The image whose extrema are to be removed
h : scalar
The height to remove from each peak or fill in each valley
mode : string {'maxima' | 'minima' | 'extrema'}
Specifies whether to remove maxima or minima or both
Returns
-------
A copy of the input image with all the peaks and/or valleys removed.
Notes
-----
This function is referred to as **imhmax** or **imhmin** in Matlab.
"""
result = im
if mode in ['maxima', 'extrema']:
result = reconstruction(seed=im - h, mask=im, method='dilation')
elif mode in ['minima', 'extrema']:
result = reconstruction(seed=im + h, mask=im, method='erosion')
return result
@jit(forceobj=True)
def flood(im, regions=None, mode='max'):
r"""
Floods/fills each region in an image with a single value based on the
specific values in that region. The ``mode`` argument is used to
determine how the value is calculated.
Parameters
----------
im : array_like
An ND image with isolated regions containing 0's elsewhere.
regions : array_like
An array the same shape as ``im`` with each region labeled. If None is
supplied (default) then ``scipy.ndimage.label`` is used with its
default arguments.
mode : string
Specifies how to determine which value should be used to flood each
region. Options are:
*'max'* : Floods each region with the local maximum in that region
*'min'* : Floods each region the local minimum in that region
*'size'* : Floods each region with the size of that region
Returns
-------
An ND-array the same size as ``im`` with new values placed in each
forground voxel based on the ``mode``.
See Also
--------
props_to_image
"""
mask = im > 0
if regions is None:
labels, N = spim.label(mask)
else:
labels = sp.copy(regions)
N = labels.max()
I = im.flatten()
L = labels.flatten()
if mode.startswith('max'):
V = sp.zeros(shape=N+1, dtype=float)
for i in range(len(L)):
if V[L[i]] < I[i]:
V[L[i]] = I[i]
elif mode.startswith('min'):
V = sp.ones(shape=N+1, dtype=float)*sp.inf
for i in range(len(L)):
if V[L[i]] > I[i]:
V[L[i]] = I[i]
elif mode.startswith('size'):
V = sp.zeros(shape=N+1, dtype=int)
for i in range(len(L)):
V[L[i]] += 1
im_flooded = sp.reshape(V[labels], newshape=im.shape)
im_flooded = im_flooded*mask
return im_flooded
def apply_chords(im, spacing=0, axis=0, trim_edges=True):
r"""
Adds chords to the void space in the specified direction. The chords are
separated by 1 voxel plus the provided spacing.
Parameters
----------
im : ND-array
An image of the porous material with void marked as True.
spacing : int (default = 0)
Chords are automatically separated by 1 voxel and this argument
increases the separation.
axis : int (default = 0)
The axis along which the chords are drawn.
trim_edges : bool (default = True)
Whether or not to remove chords that touch the edges of the image.
These chords are artifically shortened, so skew the chord length
distribution
Returns
-------
An ND-array of the same size as ```im``` with True values indicating the
chords.
See Also
--------
apply_chords_3D
"""
if spacing < 0:
raise Exception('Spacing cannot be less than 0')
dims1 = sp.arange(0, im.ndim)
dims2 = sp.copy(dims1)
dims2[axis] = 0
dims2[0] = axis
im = sp.moveaxis(a=im, source=dims1, destination=dims2)
im = sp.atleast_3d(im)
ch = sp.zeros_like(im, dtype=bool)
if im.ndim == 2:
ch[:, ::2+spacing, ::2+spacing] = 1
if im.ndim == 3:
ch[:, ::4+2*spacing, ::4+2*spacing] = 1
chords = im*ch
chords = sp.squeeze(chords)
if trim_edges:
temp = clear_border(spim.label(chords == 1)[0]) > 0
chords = temp*chords
chords = sp.moveaxis(a=chords, source=dims1, destination=dims2)
return chords
def apply_chords_3D(im, spacing=0, trim_edges=True):
r"""
Adds chords to the void space in all three principle directions. The
chords are seprated by 1 voxel plus the provided spacing. Chords in the X,
Y and Z directions are labelled 1, 2 and 3 resepctively.
Parameters
----------
im : ND-array
A 3D image of the porous material with void space marked as True.
spacing : int (default = 0)
Chords are automatically separed by 1 voxel on all sides, and this
argument increases the separation.
trim_edges : bool (default = True)
Whether or not to remove chords that touch the edges of the image.
These chords are artifically shortened, so skew the chord length
distribution
Returns
-------
An ND-array of the same size as ```im``` with values of 1 indicating
x-direction chords, 2 indicating y-direction chords, and 3 indicating
z-direction chords.
Notes
-----
The chords are separated by a spacing of at least 1 voxel so that tools
that search for connected components, such as ``scipy.ndimage.label`` can
detect individual chords.
See Also
--------
apply_chords
"""
if im.ndim < 3:
raise Exception('Must be a 3D image to use this function')
if spacing < 0:
raise Exception('Spacing cannot be less than 0')
ch = sp.zeros_like(im, dtype=int)
ch[:, ::4+2*spacing, ::4+2*spacing] = 1 # X-direction
ch[::4+2*spacing, :, 2::4+2*spacing] = 2 # Y-direction
ch[2::4+2*spacing, 2::4+2*spacing, :] = 3 # Z-direction
chords = ch*im
if trim_edges:
temp = clear_border(spim.label(chords > 0)[0]) > 0
chords = temp*chords
return chords
def local_thickness(im, sizes=25):
r"""
For each voxel, this functions calculates the radius of the largest sphere
that both engulfs the voxel and fits entirely within the foreground. This
is not the same as a simple distance transform, which finds the largest
sphere that could be *centered* on each voxel.
Parameters
----------
im : array_like
A binary image with the phase of interest set to True
sizes : array_like or scalar
The sizes to invade. If a list of values of provided they are used
directly. If a scalar is provided then that number of points spanning
the min and max of the distance transform are used.
Returns
-------
An image with the pore size values in each voxel
Notes
-----
The term *foreground* is used since this function can be applied to both
pore space or the solid, whichever is set to True.
This function is identical to porosimetry with ``access_limited`` set to
False.
"""
im_new = porosimetry(im=im, sizes=sizes, access_limited=False)
return im_new
def porosimetry(im, sizes=25, inlets=None, access_limited=True,
mode='fft'):
r"""
Performs a porosimetry simulution on the image
Parameters
----------
im : ND-array
An ND image of the porous material containing True values in the
pore space.
sizes : array_like or scalar
The sizes to invade. If a list of values of provided they are used
directly. If a scalar is provided then that number of points spanning
the min and max of the distance transform are used.
inlets : ND-array, boolean
A boolean mask with True values indicating where the invasion
enters the image. By default all faces are considered inlets,
akin to a mercury porosimetry experiment. Users can also apply
solid boundaries to their image externally before passing it in,
allowing for complex inlets like circular openings, etc. This argument
is only used if ``access_limited`` is ``True``.
access_limited : Boolean
This flag indicates if the intrusion should only occur from the
surfaces (``access_limited`` is True, which is the default), or
if the invading phase should be allowed to appear in the core of
the image. The former simulates experimental tools like mercury
intrusion porosimetry, while the latter is useful for comparison
to gauge the extent of shielding effects in the sample.
mode : string
Controls with method is used to compute the result. Options are:
*'fft'* - (default) Performs a distance tranform of the void space,
thresholds to find voxels larger than ``sizes[i]``, trims the resulting
mask if ``access_limitations`` is ``True``, then dilates it using the
efficient fft-method to obtain the non-wetting fluid configuration.
*'dt'* - Same as 'fft', except uses a second distance transform,
relative to the thresholded mask, to find the invading fluid
configuration. The choice of 'dt' or 'fft' depends on speed, which
is system and installation specific.
*'mio'* - Using a single morphological image opening step to obtain the
invading fluid confirguration directly, *then* trims if
``access_limitations`` is ``True``. This method is not ideal and is
included mostly for comparison purposes.
Returns
-------
An ND-image with voxel values indicating the sphere radius at which it
becomes accessible from the inlets. This image can be used to find
invading fluid configurations as a function of applied capillary pressure
by applying a boolean comparison: ``inv_phase = im > r`` where ``r`` is
the radius (in voxels) of the invading sphere. Of course, ``r`` can be
converted to capillary pressure using your favorite model.
See Also
--------
fftmorphology
"""
def trim_blobs(im, inlets):
temp = sp.zeros_like(im)
temp[inlets] = True
labels, N = spim.label(im + temp)
im = im ^ (clear_border(labels=labels) > 0)
return im
dt = spim.distance_transform_edt(im > 0)
if inlets is None:
inlets = get_border(im.shape, mode='faces')
inlets = sp.where(inlets)
if isinstance(sizes, int):
sizes = sp.logspace(start=sp.log10(sp.amax(dt)), stop=0, num=sizes)
else:
sizes = sp.sort(a=sizes)[-1::-1]
if im.ndim == 2:
strel = disk
else:
strel = ball
imresults = sp.zeros(sp.shape(im))
if mode == 'mio':
for r in tqdm(sizes):
imtemp = fftmorphology(im, strel(r), mode='opening')
if access_limited:
imtemp = trim_blobs(imtemp, inlets)
if sp.any(imtemp):
imresults[(imresults == 0)*imtemp] = r
if mode == 'dt':
for r in tqdm(sizes):
imtemp = dt >= r
if access_limited:
imtemp = trim_blobs(imtemp, inlets)
if sp.any(imtemp):
imtemp = spim.distance_transform_edt(~imtemp) < r
imresults[(imresults == 0)*imtemp] = r
if mode == 'fft':
for r in tqdm(sizes):
imtemp = dt >= r
if access_limited:
imtemp = trim_blobs(imtemp, inlets)
if sp.any(imtemp):
imtemp = fftconvolve(imtemp, strel(r), mode='same') > 0.1
imresults[(imresults == 0)*imtemp] = r
return imresults
| true |
46df3b31f74e0a4595c2f4124610f43c8b3e8f37 | Python | houdinis/wsgi | /app.py | UTF-8 | 2,060 | 2.78125 | 3 | [] | no_license | from wsgiref.simple_server import make_server
from webob import Response, Request
from webob.dec import wsgify
from webob.exc import HTTPNotFound
import re
# application函数不用了, 用来和app函数对比
# def application(environ: dict, start_response):
# # 请求处理
# request = Request(environ)
# print(request.method)
# print(request.path)
# print(request.query_string)
# print(request.GET)
# print(request.POST)
# print("params = {}".format(request.params))
#
# # 响应处理
# res = Response() # [("Content-Tyep", "text/html; charset=UTF-8"), ("Content-Length", "0")]
# res.status_code = 200
# print(res.content_type)
# res.body = "<h1>this is my web root!</h1>".encode("utf-8")
# return res(environ, start_response)
class Application:
ROUTETABLE = []
@classmethod # 注册, 装饰器
def register(cls, pattern):
def wrapper(handler):
cls.ROUTETABLE.append((re.compile(pattern), handler)) #预编译对象, 处理函数
return handler
return wrapper
@wsgify
def __call__(self, request: Request) -> Response:
for pattern, handler in self.ROUTETABLE:
if pattern.match(request.path):
return handler(request)
raise HTTPNotFound('<h1>你访问的页面被外星人劫持了</h1>')
@Application.register('^/$')
def index(request: Request):
res = Response()
res.status_code = 200
res.content_type = "text/html"
res.charset = "utf-8"
res.body = "<h1>root!</h1>".encode()
return res
@Application.register('^/python$')
def index(request: Request):
res = Response()
res.status_code = 200
res.content_type = "text/plain"
res.charset = "gb2312"
res.body = "<h1>python</h1>".encode()
return res
if __name__ == "__main__":
ip = "0.0.0.0"
port = 8000
server = make_server(ip, port, Application())
try:
server.serve_forever()
except KeyboardInterrupt:
server.shutdown()
server.server_close()
| true |
0529ea5f892352530e49c139081872d2473b463d | Python | aenshtyn/safepass2 | /user_test.py | UTF-8 | 462 | 2.734375 | 3 | [] | no_license | import unittest
from user import User
class TestUser(unittest.TestCase):
def setUp(self):
self.new_user = User("NewUser", "12345")
def test_init(self):
self.assertEqual(self.new_user.username, "NewUser")
self.assertEqual(self.new_user.password, "12345")
def test_save_user(self):
self.new_user.save_user()
self.assertEqual(len(User.user_list), 1)
if __name__ == '__main__':
unittest.main()
| true |
32c8bc84431d6224d8f34642cbf5b928f45ed941 | Python | Freshield/LEARN_TENSORFLOW | /19_help_figure_error/model0916.py | UTF-8 | 7,118 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 16 14:41:31 2017
@author: Linstancy
"""
import tensorflow as tf
import numpy as np
import preprocessing as prepro
import cv2
import csv
import os
import skimage.data
import skimage.transform
from tensorflow.contrib.layers import flatten
from sklearn.utils import shuffle
train_data_dir = os.path.normpath("GTSRB/Final_Training/Images")
test_data_dir = os.path.normpath("GTSRB/Final_Test/Images")
def load_data(data_dir):
"""Loads a data set and returns two lists:
images: a list of Numpy arrays, each representing an image.
labels: a list of numbers that represent the images labels.
"""
# Get all subdirectories of data_dir. Each represents a label.
directories = [d for d in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, d))]
# Loop through the label directories and collect the data in
# two lists, labels and images.
y_train = []
x_train = []
for d in directories:
label_dir = os.path.join(data_dir, d)
file_names = [os.path.join(label_dir, f)
for f in os.listdir(label_dir) if f.endswith(".ppm")]
# For each label, load it's images and add them to the images list.
# And add the label number (i.e. directory name) to the labels list.
for f in file_names:
x_train.append(skimage.data.imread(f))
y_train.append(int(d))
return x_train, y_train
y_test = []
x_test = []
for d in directories:
label_dir = os.path.join(data_dir, d)
file_names = [os.path.join(label_dir, f)
for f in os.listdir(label_dir) if f.endswith(".ppm")]
# For each label, load it's images and add them to the images list.
# And add the label number (i.e. directory name) to the labels list.
for f in file_names:
x_test.append(skimage.data.imread(f))
y_test.append(int(d))
return y_test, x_test
def lsy_model(x, train):
with tf.name_scope('conv_1'):
w1=tf.Variable(tf.truncated_normal(shape=[1,1,3,32]))
b1=tf.Variable(tf.constant(0.0, shape=[32], dtype=tf.float32))
conv_1=tf.nn.conv2d(x,w1,strides=[1,1,1,1],padding='SAME')+b1
relu1=tf.nn.relu(conv_1)
with tf.name_scope('conv_2'):
w2=tf.Variable(tf.truncated_normal(shape=[3,3,32,64]))
b2=tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32))
conv_2=tf.nn.conv2d(relu1,w2,strides=[1,1,1,1],padding='SAME')+b2
pool2=tf.nn.max_pool(conv_2,[1,2,2,1],[1,2,2,1],padding='SAME')
relu2=tf.nn.relu(pool2)
with tf.name_scope('conv_3'):
w3=tf.Variable(tf.truncated_normal(shape=[3,3,64,192]))
b3=tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32))
conv_3=tf.nn.conv2d(relu2,w3,strides=[1,1,1,1],padding='SAME')+b3
pool3=tf.nn.max_pool(conv_3,[1,2,2,1],[1,2,2,1],padding='SAME')
relu3=tf.nn.relu(pool3)
with tf.name_scope('conv4'):
w4=tf.Variable(tf.truncated_normal(shape=[3,3,192,192]))
b4=tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32))
conv_4=tf.nn.conv2d(relu3,w4,strides=[1,1,1,1],padding='SAME')+b4
pool4=tf.nn.max_pool(conv_4,[1,2,2,1],[1,2,2,1],padding='SAME')
relu4=tf.nn.relu(pool4)
fc0=flatten(relu4)
with tf.name_scope('fc1'):
w5=tf.Variable(tf.truncated_normal(shape=[6912,2048]))
b5=tf.Variable(tf.constant(0.0, shape=[2048], dtype=tf.float32))
fc1=tf.matmul(fc0,w5)+b5
fc1=tf.nn.relu(fc1)
if train: fc1=tf.nn.dropout(fc1,0.5)
with tf.name_scope('fc2'):
w6=tf.Variable(tf.truncated_normal(shape=[2048,1024]))
b6=tf.Variable(tf.constant(0.0, shape=[1024], dtype=tf.float32))
fc2=tf.matmul(fc1,w6)+b6
fc2=tf.nn.relu(fc2)
if train: fc2=tf.nn.dropout(fc2,0.5)
with tf.name_scope('fc3'):
w7=tf.Variable(tf.truncated_normal(shape=[1024,256]))
b7=tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32))
fc3=tf.matmul(fc2,w7)+b7
fc3=tf.nn.relu(fc3)
if train: fc2=tf.nn.dropout(fc3,0.5)
with tf.name_scope('fc4'):
w8=tf.Variable(tf.truncated_normal(shape=[256,43]))
b8=tf.Variable(tf.constant(0.0, shape=[43], dtype=tf.float32))
logit=tf.matmul(fc3,w8)+b8
if train: logit=tf.nn.dropout(logit,0.5)
return logit
x_train,y_train=load_data(train_data_dir)
x_train= np.array([skimage.transform.resize(image, (48, 48)) for image in x_train])
x_test,y_test=load_data(test_data_dir)
x_test= np.array([skimage.transform.resize(image, (48, 48)) for image in x_test])
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 48, 48, 3], name='x-input')
y = tf.placeholder(tf.int32, [None,43],name='y-input')
one_hot_y = tf.one_hot(y, 43, name='y-one-hot')
rate = 0.001
global_step = tf.placeholder(tf.int32)
logit=lsy_model(x,train=True)
EPOCHS=1000
BATCH_SIZE=100
with tf.name_scope('optimizer'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
learning_rate = tf.train.exponential_decay(rate, global_step, 1, 0.99, staircase=True, name=None)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logit, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#confusion_matrix = tf.contrib.metrics.confusion_matrix(tf.argmax(one_hot_y, 1), tf.argmax(logits, 1), num_classes)
with tf.Session() as sess:
tf.initialize_all_variables().run()
print("Training...")
for i in range(EPOCHS):
x_train_epoch, y_train_epoch = shuffle(x_train, y_train)
num_examples = len(x_train)
#start_time = time.time()
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x,batch_y= x_train_epoch[offset:end],y_train_epoch[offset:end]
_, loss, learning_rate_val = sess.run([loss_operation,training_operation,learning_rate], feed_dict={x:batch_x,y:batch_y})
'''if i % 100 == 0 or i + 1 == EPOCHS:
valid_acc=sess.run(accuracy_operation,feed_dict=valid_feed)
print(" %d training step(s),validation accuracy is %g" % (i,valid_acc))'''
test_acc=sess.run(accuracy_operation,feed_dict={x:x_test, y:y_test})
print("%d training step(s), test accuracy is %g" % (i, test_acc))
| true |
4c337846f33b1aba300d3eee789dfa6e391450f6 | Python | aqueed-shaikh/submissions | /7/duda_justin/helloflask.py | UTF-8 | 660 | 3.359375 | 3 | [] | no_license |
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/")
def home():
return "<h2>Hello World!</h2>"
if __name__ == '__main__':
app.run()
def fact(n):
ans = 1
while(n > 1):
ans *= n
n -= 1
return ans
def fib(n):
a = 1
b = 1
ans = a + b
if (n < 3):
return 1
n -= 2
while(n > 0):
ans = a + b
a = b
b = ans
n -= 1
return ans
def isPrime(n):
i = n-1
if(n==1 or n==0):
return False
if(n==2):
return True
while(i>1):
if(n%i==0):
return False
i -= 1
return True
| true |
f90a62337ccd421b2231fd0679859c05921b1bdc | Python | MilesCranmer/bnn_chaos_model | /figures/spock/modelfitting.py | UTF-8 | 5,354 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | import pandas as pd
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve, confusion_matrix, auc
from sklearn import metrics
import numpy as np
def hasnull(row):
numnulls = row.isnull().sum()
if numnulls == 0:
return 0
else:
return 1
def train_test_split(trainingdatafolder, features=None, labelname='Stable', filter=False, filtertimes=False):
dataset = pd.read_csv(trainingdatafolder+"trainingdata.csv", index_col = 0)
if features is None:
features = dataset.columns.values
dataset['hasnull'] = dataset.apply(hasnull, axis=1)
labels = pd.read_csv(trainingdatafolder+"labels.csv", index_col=0)
if filter:
y = labels[(labels['instability_time'] > 1.e4) & (dataset['hasnull'] == 0)][labelname]
X = dataset[(labels['instability_time'] > 1.e4) & (dataset['hasnull'] == 0)][features]
elif filtertimes:
y = labels[labels['instability_time'] > 1.e4][labelname]
X = dataset[labels['instability_time'] > 1.e4][features]
else:
y = labels[labelname]
X = dataset[features]
Nrows = int(0.8*X.shape[0])
trainX = X.iloc[:Nrows, :]
trainy = y.iloc[:Nrows]
testX = X.iloc[Nrows:, :]
testy = y.iloc[Nrows:]
return trainX, trainy, testX, testy
def ROC_curve(trainingdatafolder, model, features=None, filter=False, filtertimes=False):
trainX, trainy, testX, testy = train_test_split(trainingdatafolder, features, filter=filter, filtertimes=filtertimes)
preds = model.predict_proba(testX)[:,1]
fpr, tpr, ROCthresholds = roc_curve(testy, preds)
roc_auc = metrics.roc_auc_score(testy, preds)
return roc_auc, fpr, tpr, ROCthresholds
def PR_curve(trainingdatafolder, model, features=None, filter=False, filtertimes=False):
trainX, trainy, testX, testy = train_test_split(trainingdatafolder, features, filter=filter, filtertimes=filtertimes)
preds = model.predict_proba(testX)[:,1]
precision, recall, PRthresholds = precision_recall_curve(testy, preds)
pr_auc = metrics.auc(recall, precision)
return pr_auc, precision, recall, PRthresholds
def tnr_npv_curve(trainingdatafolder, model, features=None, filter=False, filtertimes=False, N=1000):
trainX, trainy, testX, testy = train_test_split(trainingdatafolder, features, filter=filter, filtertimes=filtertimes)
preds = model.predict_proba(testX)[:,1]
npv, tnr = np.zeros(N), np.zeros(N)
thresholds = np.linspace(0, 1, N)
for i, thresh in enumerate(thresholds):
predy = (preds >= thresh)
tn, fp, fn, tp = confusion_matrix(testy, predy).ravel()
npv[i] = tn/(tn+fn)
tnr[i] = tn/(tn+fp)
# When thresh is 0, we don't predict any negs, so npv is nan
npv[np.isnan(npv)] = 1
aucval = auc(tnr, npv)
return aucval, npv, tnr, thresholds
def stable_unstable_hist(trainingdatafolder, model, features=None, filter=False, filtertimes=False):
trainX, trainy, testX, testy = train_test_split(trainingdatafolder, features, filter=filter, filtertimes=filtertimes)
preds = model.predict_proba(testX)[:,1]
stablepreds = preds[np.where(testy==1)]
unstablepreds = preds[np.where(testy==0)]
return stablepreds, unstablepreds
def calibration_plot(trainingdatafolder, model, features=None, bins=10, filter=False, filtertimes=False):
trainX, trainy, testX, testy = train_test_split(trainingdatafolder, features, filter=filter, filtertimes=filtertimes)
preds = model.predict_proba(testX)[:,1]
hist, edges = np.histogram(preds, bins=bins)
bincenters = []
fracstable = []
errorbars = []
for i in range(len(edges)-1):
bincenters.append((edges[i]+edges[i+1])/2)
mask = (preds >= edges[i]) & (preds < edges[i+1])
nstable = testy[mask].sum()
fracstable.append(nstable/hist[i]) # fraction of stable systems in bin with predictions in range
errorbars.append(np.sqrt(1./nstable + 1./hist[i])*fracstable[-1]) # assume poisson counting errors for each fractional error, and add in quadrature for error on ratio.
# multiply the fractional error by value
return np.array(bincenters), np.array(fracstable), np.array(errorbars)
def unstable_error_fraction(trainingdatafolder, model, thresh, features=None, bins=10, filter=False, filtertimes=False):
trainX, trainy, testX, testy = train_test_split(trainingdatafolder, features, filter=filter, filtertimes=filtertimes)
preds = model.predict_proba(testX)[:,1]
dummy, dummy, dummy, inst_times = train_test_split(trainingdatafolder, features, labelname='instability_time', filter=filter, filtertimes=filtertimes)
log_inst_times = np.log10(inst_times)
unstable = log_inst_times < 8.99
preds = preds[unstable]
log_inst_times = log_inst_times[unstable]
hist, edges = np.histogram(log_inst_times, bins=bins)
bincenters = []
errorfracs = []
errorbars = []
for i in range(len(edges)-1):
bincenters.append((edges[i]+edges[i+1])/2)
mask = (log_inst_times >= edges[i]) & (log_inst_times < edges[i+1])
Nerrors = (preds[mask] > thresh).sum()
errorfracs.append(Nerrors/hist[i])
errorbars.append(np.sqrt(1./Nerrors + 1./hist[i])*errorfracs[-1]) # see calibration plot comment
return np.array(bincenters), np.array(errorfracs), np.array(errorbars)
| true |
f1dcb34a6042a920663d3521b681d9d9479eb3db | Python | prograsshopper/five-questions-for-a-week | /term10th/week_5/subsets.py | UTF-8 | 275 | 3.125 | 3 | [] | no_license | import itertools
class Solution:
def subsets(self, nums):
result = []
for i in range(0, len(nums)+1):
combi_iter = itertools.combinations(nums, i)
for combi in combi_iter:
result.append(combi)
return result
| true |
005e0e254119990ecc4ff94120a39f872747528d | Python | MelvinDunn/Algorithm_Implementations | /algorithms/algorithms/word2vec.py | UTF-8 | 4,664 | 3.109375 | 3 | [] | no_license | """
CBOW implementation is used here.
"""
import numpy as np
def get_data(filepath):
f = open(filepath, 'r')
message = f.read()
return message
def clean_text(corpus):
return corpus.lower().split()
def create_vocabulary(corpus):
#set will just output whatever order so it has to be sorted.
return sorted(list(set(corpus)))
def init_input_one_hot_layer(doc, word):
zeros = np.zeros((len(doc),1))
index_word = np.where(np.asarray(doc) == word)[0]
zeros[index_word] += 1
return zeros
def create_input_and_output_words(doc, window=1):
inputs = []
outputs = []
window_range = list(range(-window, window+1))
window_range.remove(0)
item = 0
for key, value in enumerate(doc):
for j in window_range:
if key == 0 and j < key:
pass
else:
try:
input_item = doc[key]
output_item = doc[key+j]
inputs.append(input_item)
outputs.append(output_item)
except IndexError:
pass
return inputs,outputs
def create_one_hots(doc, io_words, input=True):
if input == True:
words = io_words[0]
else:
words = io_words[1]
empty = np.zeros((len(words), len(doc)))
for key,value in enumerate(words):
empty[key,:] += (np.asarray(init_input_one_hot_layer(doc,value)).T.reshape(len(doc)))
return empty
def sigmoid(x):
return 1/(1 + np.exp(-x))
def cbow(vocab, word):
pass
def linear_neuron(x):
return np.log(1+np.exp(x))
def init_hidden_layer(vocab, n_features=300):
return np.zeros((len(vocab), n_features))
def shallow_network(inputs, output, epoch, hiddenlayer_neurons):
#neural network portion is from https://www.analyticsvidhya.com/blog/2017/06/word-embeddings-count-word2veec/
lr=0.1
inputlayer_neurons = inputs.shape[0]
output_neurons = (output.shape[0])
#initialize neurons
wh = np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh = np.random.uniform(size=(1,hiddenlayer_neurons))
wout = np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout = np.random.uniform(size=(1,output_neurons))
for i in range(epoch):
hidden_layer_input1 = np.dot(inputs, wh)+bh
hidden_layer_activations = sigmoid(hidden_layer_input1)
#dot product of your activations and your weights plus biases gets your
#inputs for the output layer
output_layer_input = np.dot(hidden_layer_activations , wout) + bout
#sigmoid transform the output layer
y_hat_output = sigmoid(output_layer_input)
#comput the error
error = output - y_hat_output
#comput the slope
slope_output_layer = derivatives_sigmoid(y_hat_output)
slope_hidden_layer = derivatives_sigmoid(hidden_layer_activations)
delta_output = error * slope_output_layer * lr
error_at_hidden_layer = np.dot(delta_output, wout.T)
#compute the delta at the hidden layer.
delta_hidden_layer = error_at_hidden_layer * slope_hidden_layer
#update your weights and biases
wout += np.dot(hidden_layer_activations.T, delta_output) * lr
wh += np.dot(inputs.T.reshape(inputs.shape[0],1), delta_hidden_layer)*lr
bh += np.sum(delta_hidden_layer, axis=0)*lr
bout += np.sum(delta_output, axis=0)*lr
return y_hat_output
def shallow_network_iter(inputs, output, epoch=2000, hiddenlayer_neurons=10):
empty = np.zeros(inputs.shape)
for i in range(inputs.shape[0]):
empty[i,:] += (shallow_network(inputs[i,:],outputs[i,:],epoch, hiddenlayer_neurons)).reshape(inputs.shape[1])
return empty
def derivatives_sigmoid(x):
return x * (1. - x)
def cosine_sim(a,b):
return np.dot(a,b) / np.dot(a.dot(a), b.dot(b))
def cosine_sim_two_words(word1, word2, yhat):
return cosine_sim(y_hat[:,cleaned_corpus.index(word1)], y_hat[:,cleaned_corpus.index(word2)])
if __name__ == "__main__":
cleaned_corpus = (clean_text(get_data('data/steely_dan_deacon_blues_lyrics.txt')))[:100]
print(cleaned_corpus)
io_words = (create_input_and_output_words(cleaned_corpus))
inputs = create_one_hots(cleaned_corpus, io_words)
outputs = create_one_hots(cleaned_corpus, io_words, input=False)
lookup = np.asarray([cleaned_corpus for i in range(inputs.shape[0])])
y_hat = ((shallow_network_iter(inputs,outputs)))
#word2vec becomes relevant as the size of your corpus grows. Using only 100 words yields poor results.
print(cosine_sim_two_words("this", "this", y_hat))
| true |
4db7f50cd10def8cde2d9edf067c751e621fd72e | Python | gracetian6/mai21-learned-smartphone-isp | /tools/dng2png.py | UTF-8 | 1,036 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | ##############
# DNG to PNG #
##############
import numpy as np
import imageio
import rawpy
import sys
import os
if __name__ == "__main__":
input_dir = sys.argv[1]
if not os.path.isdir(input_dir):
print("The folder doesn't exist!")
sys.exit()
input_dng = [f for f in os.listdir(input_dir) if os.path.isfile(input_dir + f)]
input_dng.sort()
output_dir = sys.argv[2]
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for dng_images in input_dng:
print("Converting file " + dng_images)
if not os.path.isfile(input_dir + dng_images):
print("The file doesn't exist!")
sys.exit()
raw = rawpy.imread(input_dir + dng_images)
raw_image = raw.raw_image
del raw
# convert format
raw_image = raw_image.astype(np.float32)
png_image = raw_image.astype(np.uint16)
new_name = dng_images.replace(".dng", ".png") # save to output_dir
imageio.imwrite(output_dir + new_name, png_image)
| true |
fc298a6c96b342dff7a653986b16de6b4d351d52 | Python | aydogan4288/registration-login | /login_and_registiration/login.py | UTF-8 | 3,461 | 2.625 | 3 | [] | no_license | from flask import Flask, render_template, session, request, redirect, flash
from mysqlconnection import connectToMySQL
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
from flask_bcrypt import Bcrypt
app = Flask(__name__)
bcrypt = Bcrypt(app)
mysql = connectToMySQL('mydb')
app.secret_key = "ThisIsSecret!"
@app.route('/', methods = ['GET'])
def index():
mysql = connectToMySQL('mydb')
all_users = mysql.query_db('SELECT * FROM users')
print('Fetched all users', all_users)
return render_template('login.html', users = all_users)
@app.route('/register', methods = ['POST'])
def register():
if len(request.form['first_name']) < 1:
flash("Name cannot be blank!", 'first_name')
elif len(request.form['first_name']) <= 2:
flash("Name must be 2+ characters", 'first_name')
if len(request.form['last_name']) < 1:
flash("Name cannot be blank!", 'last_name')
elif len(request.form['last_name']) <= 2:
flash("Name must be 2+ characters", 'last_name')
if len(request.form['email']) < 1:
flash("Email cannot be blank!", 'email')
elif not EMAIL_REGEX.match(request.form['email']):
flash("Invalid Email Address!", 'email')
if len(request.form['password']) < 1:
flash("Password cannot be blank!", 'password')
elif len(request.form['password']) < 8:
flash("Password must be at least 8 characters!", 'password')
if request.form['con_password'] != request.form['password']:
flash('Password confirmation does not match Password', 'con_password')
if '_flashes' in session.keys():
return redirect("/")
else:
pw_hash = bcrypt.generate_password_hash(request.form['password'])
print(pw_hash)
mysql = connectToMySQL("mydb")
query = "INSERT INTO users (first_name, last_name, email, password, created_at, updated_at) VALUES (%(first_name)s, %(last_name)s, %(email)s, %(password_hash)s, NOW(), NOW());"
data = {
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'email': request.form['email'],
'password_hash': pw_hash
}
new_user_id = mysql.query_db(query, data)
session["first_name"] = request.form['first_name']
return redirect("/success")
@app.route('/login', methods = ['POST'])
def login():
session["email"] = request.form["emaillogin"]
session["password"] = request.form["passwordlogin"]
if len(request.form['passwordlogin']) < 1:
flash("Password cannot be blank.", "password")
if len(request.form['emaillogin']) < 1:
flash("Please input your email!", "email")
else:
mysql = connectToMySQL("mydb")
query = "SELECT * FROM users WHERE email = %(email)s"
data = {
"email": request.form['emaillogin']
}
user = mysql.query_db(query, data)
if not user:
flash("Email not registered. Please register", "emaillogin")
else:
if not bcrypt.check_password_hash(user[0]['password'], request.form['passwordlogin']):
flash("Incorrect Password", "passwordlogin")
if '_flashes' in session.keys():
return redirect("/")
else:
session["id"] = user[0]["id"]
session["first_name"] = user[0]["first_name"]
return redirect('/success')
@app.route('/success')
def success():
return "Thank you " + session["first_name"] + ". You are now logged in!"
if __name__ == "__main__":
app.run(debug=True) | true |
1f253980a9b8783494009d5d3e16ded85ae45e84 | Python | helloyan/learning_tf | /4-7 trainmonitored.py | UTF-8 | 753 | 2.625 | 3 | [] | no_license | """
摘要:tf.train.MonitoredTraining Session
该函数可以直接实现保存及载入检查点模型的文件。与前面的方式不同,本例中并不是按
照循环步数来保存,而是按照训练时间来保存的。通过指定save_checkpoint_secs参
数的具体秒数,来设置每训练多久保存一次检查点。
作者:Lebhoryi@gmail.com
时间:2019/01/28
"""
import tensorflow as tf
tf.reset_default_graph()
global_steps = tf.train.get_or_create_global_step()
step = tf.assign_add(global_steps, 1)
with tf.train.MonitoredTrainingSession(checkpoint_dir="log/checkpoints", save_checkpoint_secs=2) as sess:
print(sess.run([global_steps]))
while not sess.should_stop():
i = sess.run(step)
print(i) | true |
c0a6229787e0ed92b477c94d5b1b4f686cbaa501 | Python | LTstrange/pushbox | /search_F.py | UTF-8 | 9,771 | 2.921875 | 3 | [] | no_license | #!/user/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'LTstrange'
import copy
import time
"""
用穷举和剪枝的方法,找到推箱子的最佳走法,广度优先
对每一步的field都与先前的步骤的field比对,重复则剪枝
先前的field储存在T_field里面
U,R,D,L 分别为上右下左
"""
step = 0
T_field = []
def in_field():
code = 1
T_line = []
while True:
line_str = input('line%d:' % (code))
if line_str == 'end':
return T_line
line = [int(x) for x in line_str]
T_line.append(line)
code += 1
def in_target():
code = 1
T_target = []
while True:
tar_str = input('tar%d:' % (code))
if tar_str == 'end':
return T_target
tar = [int(x) for x in tar_str]
T_target.append(tar)
code += 1
def deleteFalseway(field, corners):
ind_y = -1
for y in field:
ind_y += 1
ind_x = -1
for x in y:
ind_x += 1
if x == 2:
if field[ind_y + 1][ind_x] == 2 and field[ind_y][ind_x + 1] == 2 and field[ind_y + 1][ind_x + 1] == 2:
return False
for corner in corners:
if corner == 2:
return False
return True
def cut_branch(field, T_field, corners):
if T_field.count(field) == 0 and deleteFalseway(field, corners):
return True
else:
return False
def Fpoint(field):
point = []
for Ly in field:
for each in Ly:
if each == 3:
x = Ly.index(3)
y = field.index(Ly)
point.append(y)
point.append(x)
return point
def move(nfield, way, point):
field = copy.deepcopy(nfield)
for each in way:
if each == 0:
if field[point[0] - 1][point[1]] == 1:
field[point[0] - 1][point[1]] = 3
field[point[0]][point[1]] = 1
elif field[point[0] - 1][point[1]] == 2:
if field[point[0] - 2][point[1]] == 1:
field[point[0] - 2][point[1]] = 2
field[point[0] - 1][point[1]] = 3
field[point[0]][point[1]] = 1
else:
return False, 0
else:
return False, 0
elif each == 1:
if field[point[0]][point[1] + 1] == 1:
field[point[0]][point[1] + 1] = 3
field[point[0]][point[1]] = 1
elif field[point[0]][point[1] + 1] == 2:
if field[point[0]][point[1] + 2] == 1:
field[point[0]][point[1] + 2] = 2
field[point[0]][point[1] + 1] = 3
field[point[0]][point[1]] = 1
else:
return False, 0
else:
return False, 0
elif each == 2:
if field[point[0] + 1][point[1]] == 1:
field[point[0] + 1][point[1]] = 3
field[point[0]][point[1]] = 1
elif field[point[0] + 1][point[1]] == 2:
if field[point[0] + 2][point[1]] == 1:
field[point[0] + 2][point[1]] = 2
field[point[0] + 1][point[1]] = 3
field[point[0]][point[1]] = 1
else:
return False, 0
else:
return False, 0
elif each == 3:
if field[point[0]][point[1] - 1] == 1:
field[point[0]][point[1] - 1] = 3
field[point[0]][point[1]] = 1
elif field[point[0]][point[1] - 1] == 2:
if field[point[0]][point[1] - 2] == 1:
field[point[0]][point[1] - 2] = 2
field[point[0]][point[1] - 1] = 3
field[point[0]][point[1]] = 1
else:
return False, 0
else:
return False, 0
point = Fpoint(field)
return True, field
def check_goal(field, target):
for each in target:
if field[each[0]][each[1]] == 2:
continue
else:
return False
return True
def C_tree(field, point, tree, target, corners):
global T_field
global step
# if step > 12:
# print('超出设定')
# FINALtime = time.time()
# alltime = FINALtime - starttime
# print("耗时:",alltime)
# return [0]
nfield = copy.deepcopy(field)
new_tree = []
step += 1
print("searching step%d..." % (step))
print('此时tree内元素数量', len(tree))
ft_time = time.time()
if len(tree) != 0:
for way in tree:
for i in range(4):
nway = copy.deepcopy(way)
nway.append(i)
result = move(nfield, nway, point)
if result[0]:
if cut_branch(result[1], T_field, corners):
if check_goal(result[1], target):
return nway
T_field.append(copy.deepcopy(result[1]))
new_tree.append(copy.deepcopy(nway))
else:
continue
else:
continue
else:
for i in range(4):
way = []
way.append(i)
result = move(nfield, way, point)
if result[0]:
if cut_branch(result[1], T_field, corners):
if check_goal(result[1], target):
return way
T_field.append(copy.deepcopy(result[1]))
new_tree.append(copy.deepcopy(way))
else:
continue
else:
continue
end_time = time.time()
print("this step use %f s" % (end_time - ft_time))
return C_tree(field, point, new_tree, target, corners)
def in_corners():
code = 1
T_corners = []
while True:
cor_str = input('cor%d:' % (code))
if cor_str == 'end':
return T_corners
cor = [int(x) for x in cor_str]
T_corners.append(cor)
code += 1
def find_corners(field, target):
corners = []
ind_y = -1
for y in field:
ind_y += 1
ind_x = -1
for x in y:
ind_x += 1
if x == 0:
try:
if field[ind_y + 1][ind_x] == 0 and field[ind_y][ind_x + 1] == 0:
corners.append((ind_y + 1, ind_x + 1))
if field[ind_y + 1][ind_x] == 0 and field[ind_y][ind_x - 1] == 0:
corners.append((ind_y + 1, ind_x - 1))
if field[ind_y - 1][ind_x] == 0 and field[ind_y][ind_x + 1] == 0:
corners.append((ind_y - 1, ind_x + 1))
if field[ind_y - 1][ind_x] == 0 and field[ind_y][ind_x - 1] == 0:
corners.append((ind_y - 1, ind_x - 1))
except:
continue
for each in corners:
for any in each:
if any < 0:
try:
corners.remove(each)
except:
continue
return corners
def answer(get):
t_answer = []
for each in get:
if each == 0:
t_answer.append('U')
elif each == 1:
t_answer.append('R')
elif each == 2:
t_answer.append('D')
elif each == 3:
t_answer.append('L')
print("解决方案:", t_answer)
def main():
global T_field
# field= in_field()
# field=[[0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 1, 0, 0, 0, 0],
# [0, 0, 1, 1, 1, 1, 0, 0],
# [0, 0, 0, 1, 0, 1, 0, 0],
# [0, 1, 0, 2, 0, 1, 3, 0],
# [0, 1, 2, 1, 1, 0, 1, 0],
# [0, 1, 1, 1, 1, 2, 1, 0],
# [0, 0, 0, 0, 0, 0, 0, 0]]
field = [[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 2, 2, 1, 2, 2, 1, 0],
[0, 2, 1, 1, 1, 1, 2, 1, 0],
[0, 1, 2, 2, 1, 2, 2, 1, 0], # 图解:图中的0代表墙壁(不可移动的物块), 3代表玩家所在位置,2代表箱子所在位置,1代表没有东西(空位)
[0, 1, 2, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]] # 难度很高
# target=in_target()
# target = [[4, 1], [5, 1], [6, 1]]
target = [[1, 3], [2, 4], [4, 4], [5, 3], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [3, 6], [3, 7]]
T_field.append(copy.deepcopy(field))
num = 0
for Ly in field:
num += Ly.count(2)
if len(target) == num:
point = Fpoint(field)
# corners = find_corners(field,target)
# corners = in_corners()
corners = [[1, 1], [1, 7], [5, 1], [5, 7]]
# corners=[[1,1],[1,3],[1,5],[1,6],[1,13],[3,16],[4,1],[4,3],[4,5],[4,6],[4,18],[5,13],[5,18],[6,1],[6,6],[7,13],[7,18],[8,1],[8,7],[8,12],[9,5],[9,18],[10,7],[10,10],[10,12],[10,17]]
# print(corners)
tree = [[]]
starttime = time.time()
get = C_tree(field, point, tree, target, corners)
FINALtime = time.time()
alltime = FINALtime - starttime
print("耗时:", alltime)
answer(get)
else:
print("啦啦啦,箱子和目标的数目不一致哦")
if __name__ == '__main__':
main()
| true |
0d4805c9d8230a625d8222732a5cd3563046a349 | Python | Hiking-Apprentice/python_spider | /scrapy爬取2020考研调剂学生信息/Kaoyan_tiaoji/Kaoyan_tiaoji/spiders/kaoyan_tiaoji.py | UTF-8 | 1,115 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
from ..items import KaoyanTiaojiItem
class KaoyanTiaojiSpider(scrapy.Spider):
name = 'kaoyan_tiaoji'
allowed_domains = ['chinakaoyan.com']
start_urls = ['http://chinakaoyan.com/']
def start_requests(self):
for i in range(1,411):
url="http://www.chinakaoyan.com/tiaoji/studentlist/pagenum/{}.shtml".format(i)
yield scrapy.Request(
url=url,
callback=self.parse
)
def parse(self, response):
item=KaoyanTiaojiItem()
total_score=response.xpath('//div[@class="info-item font14"]/span[@class="total"]/text()').extract()
major=response.xpath('//div[@class="info-item font14"]/span[@class="name"]/text()').extract()
info=response.xpath('//div[@class="info-item font14"]/span[@class="title"]/a/text()').extract()
date=response.xpath('//div[@class="info-item font14"]/span[@class="time"]/text()').extract()
item['total_score']=total_score
item['major']=major
item['info']=info
item['date']=date
yield item
| true |
6b1b30bda7fe39b5fa3c73e95588e32f4caaa6ed | Python | denysgerasymuk799/Logistic_system | /LogisticSystem/loc_item_vehicle.py | UTF-8 | 746 | 3.625 | 4 | [] | no_license | class Location(object):
"""class to create a Location"""
def __init__(self, city, postoffice):
"""
Description
"""
self.city, self.postoffice = city, postoffice
class Item(object):
"""class to create an Item"""
def __init__(self, name, price):
"""
Description
"""
self.name, self.price = name, price
def __str__(self):
"""
Return: a string with name and price for this item
"""
return "{} - {}".format(self.name, self.price)
class Vehicle(object):
"""class to create a Vehicle"""
def __init__(self, vehicleNo):
"""
Description
"""
self.vehicleNo, self.isAvailable = vehicleNo, True | true |
a65330340d11589c2c9af70ae79f1a81f0be0e9b | Python | Ressull250/code_practice | /part4/153.py | UTF-8 | 503 | 3.03125 | 3 | [] | no_license | class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums: return 0
if len(nums) == 1: return nums[0]
l,r = 0,len(nums)-1
while l<r:
mid = (l+r) / 2
if nums[r] > nums[mid]:
r = mid
elif nums[r] < nums[mid]:
l = mid+1
else:
r = r-1
return nums[l]
print Solution().findMin([-1,-1,1])
| true |
5e6ae5cd9f4ba48615bc0f25407f9f2dbb9d0cad | Python | paraslonic/Genome-Complexity-Browser | /gcb_server/app/gene_graph/source/old/genes_coordinates_finder.py | UTF-8 | 1,641 | 2.53125 | 3 | [] | no_license | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--orfiles', default='no', type=str, help='OrthoFinder output file')
parser.add_argument('--main_chain_file', default='no', type=str, help='File with main chain, generated by start.sh utilite')
parser.add_argument('-o', '--output_file', default='STDOUT', type=str, help='Output file path')
parser.add_argument('--name', default='no', type=str, help='name of base stamm')
args = parser.parse_args()
def parser_chain(Orthologous_file_path, Main_chain_file_path, output_file, name):
print('Gene coordinates file creating with ' + name + '...')
out = open(output_file, 'a+')
ortho_file = open(Orthologous_file_path, 'r')
main_chain_file = open(Main_chain_file_path, 'r')
main_chain = []
coordinates = {}
for line in main_chain_file:
if line.find('\t') >= 0:
break
main_chain.append(line[:-1])
coordinates.update([(line[:-1], [])])
for line in ortho_file:
gene = line.split(': ')[0]
for i in line.split(': ')[1].split(' '):
if i.split('|')[0] == name and gene in coordinates:
start = int(i.split('|')[-2])
end = int(i.split('|')[-1])
coordinates[gene].append(start)
coordinates[gene].append(end)
main_chain = []
for gene in coordinates:
main_chain.append([coordinates[gene][0], coordinates[gene][1], gene])
main_chain.sort()
for i in main_chain:
out.write(i[2] + '\t' + str(i[0]) + '\t' + str(i[1]) + '\n')
print('Done!')
if args.orfiles != 'no' and args.main_chain_file != 'no' and args.output_file != 'no' and args.name != 'no':
parser_chain(args.orfiles, args.main_chain_file, args.output_file, args.name) | true |
c9f1768e2f2dd47d637c2e577067eb6cd163e972 | Python | blenature/python_advanced-1 | /modules/mod6.2.functools/partial.py | UTF-8 | 171 | 3.421875 | 3 | [] | no_license | from functools import partial
def power_func(x, y, a=1, b=0):
return a*x**y + b
new_func = partial(power_func, 2, a=4)
print(new_func(4, b=1))
print(new_func(1))
| true |
f551bbbc6c01d937a78df7dee976c1cc6533a9f5 | Python | winnee0solta/cn-scrapper | /programs/ReactPost/autoreact-kawai.py | UTF-8 | 11,835 | 2.625 | 3 | [] | no_license | '''
Author : Winnee Creztha
Desc : scrapper for auto reaction
CN Auto React random posts replies
uses account.json for login
'''
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import os
import shutil
import time
import random
import json
from requests import get
print('################################################################')
print('PROGRAM STARTED')
print('################################################################')
#get account data
print("#############################################################################################")
print("#########################################LOADING account.json############################")
print("#############################################################################################")
account = {
"email": "foode.monkey@gmail.com",
"password": "9860988117",
"username": "kawai potato"
}
loginurl = "https://cybernepal.com/login/"
# start driver
print('################################################################')
print('SYSTEM STARTED')
print('################################################################')
print('################################################################')
print('STARTING DRIVER')
print('################################################################')
driver = webdriver.Chrome(
executable_path='browserdrivers/chromedriver.exe')
print('################################################################')
print('DRIVER STARTED WITHOUT ERROR')
print('################################################################')
print('################################################################')
print('OPENING URL =', loginurl)
print('################################################################')
driver.get(loginurl)
# delay
time.sleep(int(random.uniform(1, 5)))
driver.execute_script("window.scrollTo(0, (document.body.scrollHeight)/5);")
time.sleep(int(random.uniform(1, 5)))
driver.execute_script("window.scrollTo(0, (document.body.scrollHeight)/4);")
time.sleep(int(random.uniform(1, 5)))
driver.execute_script("window.scrollTo(0, (document.body.scrollHeight)/3);")
time.sleep(int(random.uniform(1, 5)))
# get html source from dom
html = driver.execute_script("return document.documentElement.outerHTML")
# parse html
soup = BeautifulSoup(html, 'html.parser')
print('################################################################')
print('LOGGING IN')
print('################################################################')
username_text_area = driver.find_element_by_name('login')
username_text_area.send_keys(account['email'])
print('################################################################')
print('EMAIL', str(account['email']))
print('################################################################')
username_text_area = driver.find_element_by_name('password')
username_text_area.send_keys(account['password'])
print('################################################################')
print('PASSWORD', str(account['password']))
print('################################################################')
driver.find_element_by_css_selector(
'.button--icon--login').click()
print('################################################################')
print('OPENING WINNEE KO ACCOUNT')
print('################################################################')
# target url
# use gaming discussion for now
url = "https://cybernepal.com/forums/gaming-discussion.33/"
print('################################################################')
print('OPENING URL =', url)
print('################################################################')
driver.get(url)
# get html source from dom
html = driver.execute_script("return document.documentElement.outerHTML")
# parse html
soup = BeautifulSoup(html, 'html.parser')
# for pagination
paginationdiv = soup.find("ul", {"class": "pageNav-main"})
lastpageindex = 0
tempcount = 0
for li in paginationdiv.findAll('li'):
tempcount = tempcount + 1
# print(li.a.get_text())
if tempcount == 5:
lastpageindex = int(li.a.get_text())
pass
# last page index
while 1:
posturls = []
# get the each page posts urls
index1 = 0
while index1 < lastpageindex+1:
# while index1 < 1:
index1 = index1 + 1
pageurl = 'https://cybernepal.com/forums/gaming-discussion.33/page-' + \
str(index1)
print('################################################################')
print("#Opening url : ", pageurl,
"#######################################")
print('################################################################')
# open page
driver.get(pageurl)
html2 = driver.execute_script(
"return document.documentElement.outerHTML")
soup2 = BeautifulSoup(html2, 'html.parser')
# time.sleep(10)
# get all the posts urls
mainpostdivs = soup2.findAll(
"div", {"class": "structItem--thread"})
loopcount = 0
for singlediv in mainpostdivs:
loopcount = loopcount + 1
print(
"#############################################################################################")
print('loop no ', str(loopcount))
print(
"#############################################################################################")
print('################################################################')
print('SLEEP MODE ON FOR .300')
print('################################################################')
time.sleep(.300)
# print(singlediv)
# continue
# get url if replies != 0
# repliediv = singlediv.find(
# "div", {"class": "structItem-cell--meta"})
# repliescount = int(repliediv.find(
# "dd").get_text()) # replies counter
# if repliescount == 0 :
# continue
# # print(repliediv.find("dd").get_text())
# thread url
urldiv = singlediv.find(
"div", {"class": "structItem-title"})
urlas = urldiv.findAll('a')
# urla = urlas.next_sibling("a")
if len(urlas) == 2:
threadlink = "https://cybernepal.com"+urlas[1]['href']
# add to list
posturls.append(threadlink)
elif len(urlas) == 1:
# print(urlas[0]['href'])
threadlink = "https://cybernepal.com"+urlas[0]['href']
# add to list
posturls.append(threadlink)
#
# Use the post links to react in random post
# take random post each time
# reach one reply then repeat
#
while 1:
# loop 28 time then wait 3hr
moneycounter = 0
while moneycounter < 29:
moneycounter = moneycounter + 1
#
# REACT
#
# select random post
selectedposturl = random.choice(posturls)
emo_val = [1, 3, 4, 5]
emotoselect = random.choice(emo_val)
# go to the url
# open page
driver.get(selectedposturl)
driver.execute_script(
"window.scrollTo(0, (document.body.scrollHeight)/5);")
time.sleep(int(random.uniform(1, 5)))
driver.execute_script(
"window.scrollTo(0, (document.body.scrollHeight)/5);")
time.sleep(int(random.uniform(1, 5)))
driver.execute_script(
"window.scrollTo(0, (document.body.scrollHeight)/5);")
time.sleep(int(random.uniform(1, 5)))
html3 = driver.execute_script(
"return document.documentElement.outerHTML")
soup3 = BeautifulSoup(html3, 'html.parser')
mainreplydivs = soup3.findAll(
"article", {"class": "message--post"})
print(
"#############################################################################################")
print('length of mainreplydivs', len(mainreplydivs))
print(
"#############################################################################################")
indexcollection = []
counter122 = 0
while counter122 < len(mainreplydivs):
indexcollection.append(counter122)
counter122 = counter122 + 1
chooser12 = random.choice(indexcollection)
singlereplydiv = mainreplydivs[chooser12]
#check if user is not winnee
usernameh4 = singlereplydiv.find(
"h4", {"class": "message-name"})
print(
"#############################################################################################")
print("#########################################USERNAME CHECK#########################################")
print(
"#############################################################################################")
username = usernameh4.a.text
if username != account["username"]:
#get reply url
spanwithid = singlereplydiv.find(
"span", {"class": "u-anchorTarget"})
idtoreact = spanwithid['id']
temp_id = idtoreact.split('-')
postid = int(temp_id[1])
print('################################################################')
print('SLEEP MODE ON FOR Random SEC ')
print('################################################################')
time.sleep(int(random.uniform(10, 80)))
# react page
reactpageurl = 'https://cybernepal.com/posts/' + \
str(postid)+'/react?reaction_id='+str(emotoselect)
driver.get(reactpageurl)
time.sleep(int(random.uniform(1, 5)))
driver.execute_script(
"window.scrollTo(0, (document.body.scrollHeight)/5);")
time.sleep(int(random.uniform(1, 5)))
driver.execute_script(
"window.scrollTo(0, (document.body.scrollHeight)/5);")
time.sleep(int(random.uniform(1, 5)))
html4 = driver.execute_script(
"return document.documentElement.outerHTML")
soup4 = BeautifulSoup(html4, 'html.parser')
# use the drive to confirm
driver.find_element_by_css_selector(
'.button--icon--confirm').click()
driver.get('https://cybernepal.com')
#
# maybe some delay
#
print(
"#############################################################################################")
print(
"######################################### 29 sec Delay #########################################")
print(
"#############################################################################################")
time.sleep(int(random.uniform(10, 29)))
#if ends for username
# 15min delay
print("#############################################################################################")
print("#########################################random 100s delay#########################################")
print("#############################################################################################")
time.sleep(int(random.uniform(100, 329)))
# page url
| true |
6f47c238681423c6113c0fe98ca5e106359ae045 | Python | SecMatrix/Machine-Learning-and-Practice-code | /2.1.1.1 LogisticRegression.py | UTF-8 | 2,224 | 3.328125 | 3 | [
"MIT"
] | permissive | # breast-cancer-wisconsin 乳腺癌
import datetime
import pandas as pd
import numpy as np
# 分割 训练集、测试集
from sklearn.model_selection import train_test_split
# 标准化
from sklearn.preprocessing import StandardScaler
# 逻辑斯蒂回归、随机梯度…
from sklearn.linear_model import LogisticRegression
# 分类报告 classification_report
from sklearn.metrics import classification_report
colume_names = ['Sample code number','Clump Thickness','Uniformity of Cell Size','Uniformity of Cell Shape','Marginal Adhesion'
,'Single Epithelial Cell Size','Bare Nuclei','Bland Chromatin','Normal Nucleoli','Mitoses','Class']
df = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
,names=colume_names)
# 数据预处理,清除含空数据
data = pd.DataFrame(df)
data = data.replace(to_replace='?',value=np.nan)
data = data.dropna(how='any')
# 随机采样 25% 的数据用于测试,剩下 75% 用于构建训练集合
X_train,X_test,y_train,y_test = train_test_split(data[colume_names[1:10]],data[colume_names[10]],test_size=0.25,random_state=33)
# 查验训练样本的数量和类别分布
print(pd.value_counts(y_train))
# 查验测试样本的数量和类别分布
print(pd.value_counts(y_test))
# 标准化数据,均值0,方差1
ss = StandardScaler()
X_train = ss.fit_transform(X_train)
X_test = ss.transform(X_test)
# 计时开始
starttime = datetime.datetime.now()
# 初始化 LogisticRegression
lr = LogisticRegression()
# 调用 LogisticRegression 中 fit 函数来训练模型参数,并用训练出的 lr 模型预测,存在lr_y_predict
lr.fit(X_train,y_train)
lr_y_predict = lr.predict(X_test)
# 计时结束
endtime = datetime.datetime.now()
# 模型训练用时
print((endtime - starttime).microseconds)
# 使用 LogisticRegression 回归模型自带的评分函数 score 获得模型在测试集上的准确性结果
print('Accuracy of LR Classifier:',lr.score(X_test,y_test))
# 使用 classification_report 模块获得 LogisticRegression 其他三个指标的结果
print(classification_report(y_test,lr_y_predict,target_names=['Benign(阴性2)','Malignant(阳性4)'])) | true |
dd561eb652b2ff1427738468a42752aeca025c52 | Python | PriyaPareek635/QR-Code | /window.py | UTF-8 | 364 | 2.53125 | 3 | [] | no_license | import webbrowser
f = open('home.html','w')
message="""<!DOCTYPE html>
<html>
<head>
<title>Home </title>
</head>
<body>
<center>
<form method="get" action="F:/QR/qrcode-reader-master/index.html">
<button type="submit"><h1>QR Code</h1></button>
</form>
</center>
</body>
</html>"""
f.write(message)
f.close()
webbrowser.open_new_tab('home.html')
| true |
6332b8e9341556b3c0ffea41a1ac53856fe874fc | Python | FreedomConsultingGroup/tnt-analytics | /python/analytic2.py | UTF-8 | 4,780 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python
############################################################################################
# Name: ToilAndTrouble Analytic
# Author: Scott A. Beall
# Date: 21-March-2019
# Org: Freedom Consulting Group
# Purpose:
# This program is part of the ToilAndTrouble Tech Challenge. Its demonstrates our ability
# to develop a command line/script based analytic.
#
# Inputs/Outputs:
# The program will prompt a user for a userid of a Customer
# It will return the following results
# Q4: What questions did the customer answer
# Q5: What questions did the user have comments on
# Q6: Did the customer download/upload/ or attach any files. If so where were the file stored?
#
#
# NOTES:
# This program requires Python 3! Use pyenv to set your python version
# You'll need to install the following Python modules using pip
# pip install elasticsearch
# pip install certifi
#
############################################################################################
from datetime import datetime
from elasticsearch import Elasticsearch
from ssl import create_default_context
import certifi
import sys
def setContext():
## Context retrieves the users certs
context = create_default_context(cafile=certifi.where())
return
def getEs():
## Connect to ElasticSearch (Ours is located in AWS)
es = Elasticsearch(
["search-toiltrouble2-mg4ycgvgucahib4lulughlocci.us-east-1.es.amazonaws.com"],
scheme="https",
port=443
)
return es
def getUser():
## System user will be prompted to enter a Customers userid/SID
userid = input("Enter a Userid (SID)?")
return userid
############################################################################################
## The rest of the program will print out the results for Questions 4-6 in Appendix A
############################################################################################
def question4(es,userid):
############################################################################################
## Q4: What questions did the customer answer
############################################################################################
q4 = es.search(index="qa", body={"query": {"match": {"userid":userid}}})
## Stop program if no results for entered user were given
if(q4['hits']['total']<1):
print("No results were found for %s" % userid)
sys.exit()
## If we get this far, then there were results
print("-----------------------------------------------------------------------")
print("-- Questions and responses by user %s" % userid)
print("-----------------------------------------------------------------------")
for hit in q4['hits']['hits']:
print("For Question: %(question)s, %(userid)s Responsed: %(response)s" % hit["_source"])
return q4['hits']['total']
def question5(es,userid):
############################################################################################
### Q5: What questions did the user have comments on
############################################################################################
## Query ElasticSearch
q5 = es.search(index="qa", body={"query":{"bool":{ "must":[{"match":{"userid":userid}}, {"exists":{"field":"comments"}}]}}})
## If nothing was found then skip
if(q5['hits']['total'] < 1):
print("no Comments by %s were found" % userid)
else:
print("-----------------------------------------------------------------------")
for hit5 in q5['hits']['hits']:
print("-- Comments left on question: %(question)s" % hit5["_source"])
print("====>%(comments)s" % hit5["_source"])
def question6(es,userid):
###########################################################################################
## Q6: Did the customer download/upload/ or attach any files. If so where were the file stored?
############################################################################################
q6 = es.search(index="qa", body={"query":{"bool":{ "must":[{"match":{"userid":userid}}, {"exists":{"field":"document"}}]}}})
## If no documents were found then skip
if(q6['hits']['total'] < 1):
print("no Documents were downloaded/uploaded or attached by %s" % userid)
else:
print("-----------------------------------------------------------------------")
for hit6 in q6['hits']['hits']:
print("Documents were attached for question: %(question)s" % hit6["_source"])
print("==>Documents: %(document)s" % hit6["_source"])
""" setContext()
es=getEs()
userid=getUser()
question4(es,userid)
question5(es,userid)
question6(es,userid) """
print("-----------------------------------------------------------------------")
| true |
d0af2a0129ecdf8dec83db0da6de80c446519901 | Python | juanchi1789/Machine_Learning | /Linear_logistic_Regression_and_unsupervised_learning/unsupervised learning and LR/aj_nuevo.py | UTF-8 | 4,744 | 2.921875 | 3 | [] | no_license | import pandas as pd
import numpy as np
df = pd.read_csv(r'/Users/juanmedina1810/PycharmProjects/Machine_Learning/TP4/TP/acath.csv').interpolate()
datos_maximos = 20
datos = df.drop(['sex','tvdlm'], axis=1).dropna().sample(n=datos_maximos, random_state = 123)# Tomo menos datos para mejor visualizacion
datos.reset_index(inplace=True,drop = True)
y_true = datos['sigdz']# Para despues evaluar la comparacion
#datos['clase'] = datos.index
datos = datos.drop(['sigdz'], axis=1)
datos = np.array(datos)
print(datos,"\n")
class Distance_computation_grid(object):
'''
class to enable the Computation of distance matrix
'''
def __init__(self):
pass
def compute_distance(self, samples):
'''
Creates a matrix of distances between individual samples and clusters attained at a particular step
'''
Distance_mat = np.zeros((len(samples), len(samples)))
for i in range(Distance_mat.shape[0]):
for j in range(Distance_mat.shape[0]):
if i != j:
Distance_mat[i, j] = float(self.distance_calculate(samples[i], samples[j]))
else:
Distance_mat[i, j] = 10 ** 4
return Distance_mat
def distance_calculate(self, sample1, sample2):
'''
Distance calulated between two samples. The two samples can be both samples, both clusters or
one cluster and one sample. If both of them are samples/clusters, then simple norm is used. In other
cases, we refer it as an exception case and pass the samples as parameter to some function that
calculates the necessary distance between cluster and a sample
'''
dist = []
for i in range(len(sample1)):
for j in range(len(sample2)):
try:
dist.append(np.linalg.norm(np.array(sample1[i],dtype=object) - np.array(sample2[j],dtype=object)))
except:
dist.append(self.intersampledist(sample1[i], sample2[j]))
return min(dist)
def intersampledist(self, s1, s2):
'''
To be used in case we have one sample and one cluster . It takes the help of one
method 'interclusterdist' to compute the distances between elements of a cluster(which are
samples) and the actual sample given.
'''
if str(type(s2[0])) != '<class \'list\'>':
s2 = [s2]
if str(type(s1[0])) != '<class \'list\'>':
s1 = [s1]
m = len(s1)
n = len(s2)
dist = []
if n >= m:
for i in range(n):
for j in range(m):
if (len(s2[i]) >= len(s1[j])) and str(type(s2[i][0]) != '<class \'list\'>'):
dist.append(self.interclusterdist(s2[i], s1[j]))
else:
dist.append(np.linalg.norm(np.array(s2[i]) - np.array(s1[j])))
else:
for i in range(m):
for j in range(n):
if (len(s1[i]) >= len(s2[j])) and str(type(s1[i][0]) != '<class \'list\'>'):
dist.append(self.interclusterdist(s1[i], s2[j]))
else:
dist.append(np.linalg.norm(np.array(s1[i]) - np.array(s2[j])))
return min(dist)
def interclusterdist(self, cl, sample):
if sample[0] != '<class \'list\'>':
sample = [sample]
dist = []
for i in range(len(cl)):
for j in range(len(sample)):
dist.append(np.linalg.norm(np.array(cl[i]) - np.array(sample[j])))
return min(dist)
X = datos
progression = [[i] for i in range(X.shape[0])]
samples = [[list(X[i])] for i in range(X.shape[0])]
m = len(samples)
distcal = Distance_computation_grid()
while m > 1:
print('Sample size before clustering :- ', m)
Distance_mat = distcal.compute_distance(samples)
sample_ind_needed = np.where(Distance_mat == Distance_mat.min())[0]
value_to_add = samples.pop(sample_ind_needed[1])
samples[sample_ind_needed[0]].append(value_to_add)
print('Cluster Node 1 :-', progression[sample_ind_needed[0]])
print('Cluster Node 2 :-', progression[sample_ind_needed[1]])
progression[sample_ind_needed[0]].append(progression[sample_ind_needed[1]])
progression[sample_ind_needed[0]] = [progression[sample_ind_needed[0]]]
v = progression.pop(sample_ind_needed[1])
m = len(samples)
print('Progression(Current Sample) :-', progression)
print('Cluster attained :-', progression[sample_ind_needed[0]])
print('Sample size after clustering :-', m)
print('\n') | true |
643437f4b57f1b236c1ecd9f2fd63f71c77323ae | Python | rishi4758/machine-learning-programmes-and-projects | /programme/svm from scrarch.py | UTF-8 | 680 | 3.03125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn import svm
style.use('ggplot')
a={'1':np.array([[1,2],[2,3],[2,5]]),'0':np.array([[6,5],[7,3],[6,9]])}
def class mongo:
def __init__(self,visualization=True):
self.colors={1:'r',0:'k'}
if self.visualization:
self.fig=plt.figure()
self.ax=self.fig.add_subplot(1,1,1)
def fit(self,data):
pass
def predict(self,features):# we use sign function because its graph will give us either 1 or 0 to seperate the data
s=np.sign(np.dot(np.array(features),self.w)+self.b)
return s
| true |
7ef7229ed40f04d9f3455e2f89e7d918c3848372 | Python | Hebertprata/ChatbotCultural | /chatbot.py | UTF-8 | 5,339 | 2.703125 | 3 | [] | no_license | import json as js
import numpy as np
import nltk
from nltk.stem.rslp import RSLPStemmer
import tensorflow as tf
import tflearn as tfl
import random
import os
import speech_recognition as sr
from gtts import gTTS
from playsound import playsound
from selenium import webdriver
driver =webdriver.Chrome('C:/Users/heber/OneDrive/Documentos/chromedriver.exe')
# Carregando Json
with open("intents.json", encoding='utf-8') as file:
data = js.load(file)
def inserirItens(tagInsercao,item):
for intent in data["intents"]:
if intent["tag"] == tagInsercao:
intent["responses"].append(item)
target_url = 'https://www.agendartecultura.com.br/noticias/dar-volta-museus/'
driver.get(target_url)
lista_museus=driver.find_elements_by_css_selector('p strong')
for museus in lista_museus:
inserirItens("museu", museus.text)
target_url = 'https://www.awebic.com/livros-amazon/'
driver.get(target_url)
livros=driver.find_elements_by_tag_name('li')
for livro in livros:
inserirItens("livros", livro.text)
target_url = 'https://www.maioresemelhores.com/melhores-filmes-atuais/'
driver.get(target_url)
lista_filmes=driver.find_elements_by_css_selector('h3 em')
for filmes in lista_filmes:
inserirItens("filme", filmes.text)
target_url = 'https://maistocadas.mus.br/musicas-mais-tocadas/'
driver.get(target_url)
musicas=driver.find_elements_by_class_name('musicas')
for musica in musicas:
inserirItens("musica", musica.text)
target_url = 'https://www.euamotheatro.com.br/pecasfamosas'
driver.get(target_url)
pecas=driver.find_elements_by_tag_name('strong')
for peca in pecas:
inserirItens("teatro", peca.text)
#Preparando os dados
nltk.download('rslp')
nltk.download('punkt')
palavras = []
intencoes = []
sentencas = []
saidas = []
def cria_audio(text):
tts = gTTS(text,lang = 'pt')
#Salva o arquivo de audio
caminho = 'audios/001.mp3'
tts.save(caminho)
#print("Estou aprendendo o que você disse...")
#Da play ao audio
playsound(caminho)
os.remove(caminho)
def ouvir_microfone():
#Habilita o microfone para ouvir o usuario
microfone = sr.Recognizer()
with sr.Microphone() as source:
#Chama a funcao de reducao de ruido disponivel na speech_recognition
microfone.adjust_for_ambient_noise(source)
#Avisa ao usuario que esta pronto para ouvir
#print("Diga alguma coisa: ")
#Armazena a informacao de audio na variavel
audio = microfone.listen(source)
try:
#Passa o audio para o reconhecedor de padroes do speech_recognition
frase = microfone.recognize_google(audio,language='pt-BR')
#Após alguns segundos, retorna a frase falada
#print("Você disse: " + frase)
#Caso nao tenha reconhecido o padrao de fala, exibe esta mensagem
except sr.UnknownValueError:
cria_audio("Não entendi")
return frase
for intent in data["intents"]:
tag = intent['tag']
if tag not in intencoes:
intencoes.append(tag)
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern, language='portuguese')
palavras.extend(wrds)
sentencas.append(wrds)
saidas.append(tag)
#Stemming
stemer = RSLPStemmer()
stemmed_words = [stemer.stem(w.lower()) for w in palavras]
stemmed_words = sorted(list(set(stemmed_words)))
#Bag of Words
training = []
output = []
outputEmpty = [0 for _ in range(len(intencoes))]
for x, frase in enumerate(sentencas):
bag = []
wds = [stemer.stem(k.lower()) for k in frase]
for w in stemmed_words:
if w in wds:
bag.append(1)
else:
bag.append(0)
outputRow = outputEmpty[:]
outputRow[intencoes.index(saidas[x])] = 1
training.append(bag)
output.append(outputRow)
#Rede Neural
training = np.array(training)
output = np.array(output)
tf.reset_default_graph()
net = tfl.input_data(shape=[None, len(training[0])])
net = tfl.fully_connected(net, 8)
net = tfl.fully_connected(net, len(output[0]), activation="softmax")
net = tfl.regression(net)
model = tfl.DNN(net)
#Treinamento
model.fit(training, output, n_epoch=300, batch_size=8, show_metric=True)
model.save("model.chatbot30G")
#Bot
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return np.array(bag)
def chat():
#print("Bem vindo ao Bot Cultural")
cria_audio('Bem vindo ao Bot Cultural, pode falar estou ouvindo')
Online = True
while Online:
inp = ouvir_microfone()
bag_usuario = bag_of_words(inp, stemmed_words)
results = model.predict([bag_usuario])
results_index = np.argmax(results)
tag = intencoes[results_index]
maximo=results.max()
if maximo>0.15:
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
#print(random.choice(responses))
cria_audio(random.choice(responses))
if tag == "ate-mais":
Online = False
else:
cria_audio('Não entendi, pode repetir?')
chat()
| true |
a3d1b29be7443e18f4b6f06d2c4155b33d89112d | Python | Spumiglio/progettoTirocinio | /evaluation.py | UTF-8 | 12,502 | 2.53125 | 3 | [] | no_license | import itertools
from multiprocessing import cpu_count
from warnings import catch_warnings, filterwarnings
from statistics import mean
from statistics import stdev
from math import sqrt
from joblib import Parallel, delayed
from sklearn.metrics import mean_squared_error
from dataPreparation import datasplitter
from forecasting import *
def forecast_error(forecast_data, test_data):
errors = pd.Series()
errors = errors.reindex(test_data.index)
for i in test_data.index:
error = test_data[i] - forecast_data[i]
errors[i] = error
return errors
# Mean Absolute Error
def mae(forecast_data, test_data):
return mean(abs(forecast_error(forecast_data, test_data)))
# Root Mean Square Error
def rmse(forecast_data, test_data):
errors = forecast_error(forecast_data, test_data) ** 2
m = mean(errors)
return sqrt(m)
# Mean Absolute Percentage Error
def mape(forecast_data, test_data):
errors = forecast_error(forecast_data, test_data)
percentage = 100 * (errors / test_data)
return mean(abs(percentage))
# Mean Absolute Scaled Error
def mase(forecast_data, test_data, season=None):
errors = forecast_error(forecast_data, test_data)
if season is not None:
q = (errors / ((1 / len(errors) - 1) * diff_val(test_data, season)))
else:
q = (errors / ((1 / len(errors) - 1) * diff_val(test_data)))
return mean(abs(q))
# fa y(t) - y(t-1)
def diff_val(series, lag=1):
summ = 0
for t in range(lag, len(series)):
summ += abs(series[t] - series[t - lag])
return summ
def evaluate_simple_forecasts(df_train, df_test, data_column_name, config, models, weight, forecast_driftict,
season=26):
# Naive
df_train_copy = df_train.copy()
naive_errors = {}
last_week = df_train_copy.index[df_train_copy.index.size - 1]
naive(df_train_copy, last_week, week_to_forecast=len(df_train_copy.index))
naive_errors['MAE'] = mae(df_train_copy[data_column_name], df_test[data_column_name])
naive_errors['RMSE'] = rmse(df_train_copy[data_column_name], df_test[data_column_name])
# naive_errors['MAPE'] = mape(df_train_copy[data_column_name], df_test[data_column_name])
naive_errors['MASE'] = mase(df_train_copy[data_column_name], df_test[data_column_name])
# Seasonal naive
df_train_copy = df_train.copy()
seasonal_naive_errors = {}
last_week = df_train_copy.index[df_train_copy.index.size - 1]
seasonal_naive_forecasting(df_train_copy, last_week, 26, 1, week_to_forecast=len(df_train_copy.index))
seasonal_naive_errors['MAE'] = mae(df_train_copy[data_column_name], df_test[data_column_name])
seasonal_naive_errors['RMSE'] = rmse(df_train_copy[data_column_name], df_test[data_column_name])
# seasonal_naive_errors['MAPE'] = mape(df_train_copy[data_column_name], df_test[data_column_name])
seasonal_naive_errors['MASE'] = mase(df_train_copy[data_column_name], df_test[data_column_name])
# Average
df_train_copy = df_train.copy()
average_errors = {}
last_week = df_train_copy.index[df_train_copy.index.size - 1]
average_forecasting(df_train_copy, last_week, week_to_forecast=len(df_train_copy.index))
average_errors['MAE'] = mae(df_train_copy[data_column_name], df_test[data_column_name])
average_errors['RMSE'] = rmse(df_train_copy[data_column_name], df_test[data_column_name])
# average_errors['MAPE'] = mape(df_train_copy[data_column_name], df_test[data_column_name])
average_errors['MASE'] = mase(df_train_copy[data_column_name], df_test[data_column_name])
# Drift
df_train_copy = df_train.copy()
drift_errors = {}
last_week = df_train_copy.index[df_train_copy.index.size - 1]
driftmethod(df_train_copy, last_week, week_to_forecast=len(df_train_copy.index))
drift_errors['MAE'] = mae(df_train_copy[data_column_name], df_test[data_column_name])
drift_errors['RMSE'] = rmse(df_train_copy[data_column_name], df_test[data_column_name])
# drift_errors['MAPE'] = mape(df_train_copy[data_column_name], df_test[data_column_name])
drift_errors['MASE'] = mase(df_train_copy[data_column_name], df_test[data_column_name])
# Holt-Winters ETS
df_train_copy = df_train.copy()
holt_winter_errors = {}
df_train_copy = seasonalExp_smoothing(df_train_copy, len(df_test.index))
holt_winter_errors['MAE'] = mae(df_train_copy[data_column_name], df_test[data_column_name])
holt_winter_errors['RMSE'] = rmse(df_train_copy[data_column_name], df_test[data_column_name])
# holt_winter_errors['MAPE'] = mape(df_train_copy[data_column_name], df_test[data_column_name])
holt_winter_errors['MASE'] = mase(df_train_copy[data_column_name], df_test[data_column_name])
# Simple ETS
df_train_copy = df_train.copy()
simple_ets_error = {}
df_train_copy = smpExpSmoth(df_train_copy, len(df_test.index))
simple_ets_error['MAE'] = mae(df_train_copy[data_column_name], df_test[data_column_name])
simple_ets_error['RMSE'] = rmse(df_train_copy[data_column_name], df_test[data_column_name])
# simple_ets_error['MAPE'] = mape(df_train_copy[data_column_name], df_test[data_column_name])
simple_ets_error['MASE'] = mase(df_train_copy[data_column_name], df_test[data_column_name])
# Aggregate Models
aggregate_error = {}
df_aggregate = aggregate_models(models)
aggregate_error['MAE'] = mae(df_aggregate[data_column_name], df_test[data_column_name])
aggregate_error['RMSE'] = rmse(df_aggregate[data_column_name], df_test[data_column_name])
aggregate_error['MASE'] = mase(df_aggregate[data_column_name], df_test[data_column_name])
# Aggregate Weighted Models
aggregate_weighted_error = {}
df_aggregate_weighted = aggregate_weighted(weight, forecast_driftict, len(df_test.index))
aggregate_weighted_error['MAE'] = mae(df_aggregate_weighted[data_column_name], df_test[data_column_name])
aggregate_weighted_error['RMSE'] = rmse(df_aggregate_weighted[data_column_name], df_test[data_column_name])
aggregate_weighted_error['MASE'] = mase(df_aggregate_weighted[data_column_name], df_test[data_column_name])
# Sarima
df_train_copy = df_train.copy()
sarima_ets_error = {}
df_train_copy = sarima_forecast(df_train_copy, config, len(df_test.index))
sarima_ets_error['MAE'] = mae(df_train_copy[data_column_name], df_test[data_column_name])
sarima_ets_error['RMSE'] = rmse(df_train_copy[data_column_name], df_test[data_column_name])
# sarima_ets_error['MAPE'] = mape(df_train_copy[data_column_name], df_test[data_column_name])
sarima_ets_error['MASE'] = mase(df_train_copy[data_column_name], df_test[data_column_name])
errors = {'N': [naive_errors['MAE'], naive_errors['RMSE'], naive_errors['MASE'],
sum([naive_errors['MAE'], naive_errors['RMSE'], naive_errors['MASE']])],
'SN': [seasonal_naive_errors['MAE'], seasonal_naive_errors['RMSE'],
seasonal_naive_errors['MASE'], sum([seasonal_naive_errors['MAE'], seasonal_naive_errors['RMSE'],
seasonal_naive_errors['MASE']])],
'AVG': [average_errors['MAE'], average_errors['RMSE'], average_errors['MASE'], sum(
[average_errors['MAE'], average_errors['RMSE'], average_errors['MASE']])],
'DR': [drift_errors['MAE'], drift_errors['RMSE'], drift_errors['MASE'],
sum([drift_errors['MAE'], drift_errors['RMSE'], drift_errors['MASE']])],
'HW': [holt_winter_errors['MAE'], holt_winter_errors['RMSE'],
holt_winter_errors['MASE'], sum([holt_winter_errors['MAE'], holt_winter_errors['RMSE'],
holt_winter_errors['MASE']])],
'SES': [simple_ets_error['MAE'], simple_ets_error['RMSE'],
simple_ets_error['MASE'], sum([simple_ets_error['MAE'], simple_ets_error['RMSE'],
simple_ets_error['MASE']])],
'SRM': [sarima_ets_error['MAE'], sarima_ets_error['RMSE'],
sarima_ets_error['MASE'], sum([sarima_ets_error['MAE'], sarima_ets_error['RMSE'],
sarima_ets_error['MASE']])],
'AGG': [aggregate_error['MAE'], aggregate_error['RMSE'],
aggregate_error['MASE'], sum([aggregate_error['MAE'], aggregate_error['RMSE'],
aggregate_error['MASE']])],
'AGG-WGT': [aggregate_weighted_error['MAE'], aggregate_weighted_error['RMSE'],
aggregate_weighted_error['MASE'],
sum([aggregate_weighted_error['MAE'], aggregate_weighted_error['RMSE'],
aggregate_weighted_error['MASE']])],
}
# print(errors)
return errors
# root mean squared error or rmse
def measure_rmse(actual, predicted):
return sqrt(mean_squared_error(actual, predicted))
# split a univariate dataset into train/test sets
def train_test_split(data, n_test):
return data[:-n_test], data[-n_test:]
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test, cfg):
predictions = list()
train, test = datasplitter(data, n_test)
history = [x for x in train]
for i in range(len(test)):
sarima = sarima_forecast_test(history, cfg)
predictions.append(sarima)
history.append(test[i])
error = measure_rmse(test, predictions)
# error = rmse(test, predictions)
return error
# score a model, return None on failure
def score_model(data, n_test, cfg):
result = None
key = str(cfg)
try:
with catch_warnings():
filterwarnings("ignore")
result = walk_forward_validation(data, n_test, cfg)
except:
error = None
# if result is not None:
# print(' > Model[%s] %.3f' % (key, result))
return key, result
# grid search configs
def grid_search(data, cfg_list, n_test):
scores = None
executor = Parallel(n_jobs=cpu_count(), backend='multiprocessing')
tasks = (delayed(score_model)(data, n_test, cfg) for cfg in cfg_list)
scores = executor(tasks)
# remove empty results
scores = [r for r in scores if r[1] != None]
# sort configs by error, asc
scores.sort(key=lambda tup: tup[1])
return scores
# create a set of sarima configs to try
def sarima_configs(seasonal=[0]):
models = list()
# define config lists
p_params = [0, 1, 2, 3, 4, 5]
d_params = [0, 1]
q_params = [0, 1, 2, 3, 4, 5]
t_params = ['n', 'c', 't', 'ct']
P_params = [0, 1, 2, 3, 4, 5]
D_params = [0, 1]
Q_params = [0, 1, 2, 3, 4, 5]
m_params = seasonal
# create config instances
for p in p_params:
for d in d_params:
for q in q_params:
for t in t_params:
for P in P_params:
for D in D_params:
for Q in Q_params:
for m in m_params:
cfg = [(p, d, q), (P, D, Q, m), t]
models.append(cfg)
return models
def best_aggregate_config(models, test):
error_dict = {}
all_combinations = []
key_list = models.keys()
for r in range(len(key_list) + 1):
combinations_object = itertools.combinations(key_list, r)
combinations_list = list(combinations_object)
all_combinations += combinations_list
del all_combinations[:8]
for combination in all_combinations:
comb = list(combination)
df_list = [models[x] for x in comb]
df = aggregate_models(df_list)
error = mae(df['vendite'], test['vendite'])
error_dict[combination] = error
keys = list(error_dict.keys())
vals = list(error_dict.values())
return [keys[vals.index(min(vals))], error_dict]
def model_weighted(models, test):
error_dict = {}
for model in models:
error = mae(models[model]['vendite'], test['vendite'])
error_dict[model] = error
alpha = {k: v for k, v in sorted(error_dict.items(), key=lambda item: item[1])}
keys = list(alpha.keys())
alpha[keys[0]] = 0.4
alpha[keys[1]] = 0.25
alpha[keys[2]] = 0.16
alpha[keys[3]] = 0.1
alpha[keys[4]] = 0.05
alpha[keys[5]] = 0.03
alpha[keys[6]] = 0.01
return alpha
| true |
41553246c449e8ddc7916356823afd567003f782 | Python | christophernhill/bigdataonpi | /test-and-dev/simpleplot.py | UTF-8 | 1,187 | 3.21875 | 3 | [
"MIT"
] | permissive | # Load needed Python modules
import netCDF4
import matplotlib.pyplot as plt
# Switch to directory containing file(s)
cd '/nfs/cnhlab003/cnh/mur-sst'
# Create "handle" to access netCDF file
# the variable "tFile" is not a simple variable but is an instance of
# a the netcdf4.Dataset class ( see - http://unidata.github.io/netcdf4-python/#netCDF4.Dataset)
tFile = netCDF4.Dataset('20110913090000-JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1.nc')
# To see something about the class instance variable tFile we can apply the print function to the tFile instance.
print(tFile)
# Reading the output of print() we can see that tFile contains a netCDF variable called 'analysed_sst'
# This holds the sea-surface temperature that we want to plot, so we extract it.
# Extract sst from file
sst=tFile['analysed_sst']
# Check dimensions
# The variable sst turns out to have 3 dimensions time,lat,lon
print(sst.shape)
# Extract a subregion
# Take time level 0 (thats all there is for this example)
# Subtract 273. to convert from Kelvin to Celcius.
sstReg=sst[0,10800:15000,1000:3500]-273.
# Make a very basic plot
plt.imshow(sstReg,origin='lower',cmap='prism');plt.colorbar();plt.show()
| true |
7d2168687cc09c496a1ef98da844828092cb6d8d | Python | bchellew15/DustProject | /BrandtFiles/make_BOSS_textfile.py | UTF-8 | 1,340 | 3.171875 | 3 | [] | no_license | #take sky_radec.dat and extract latitude and longitude
#output text file with just those
#1st coordinate column is 0 through 360, so it's longitude, then latitude.
#based on the Readme, this is same order as SFD code.
import numpy as np
from astropy.coordinates import SkyCoord
all_info = np.loadtxt("sky_radec.dat")
all_info = all_info[:,3:]
'''
print(np.min(all_info[:,0]))
print(np.max(all_info[:,0]))
print(np.min(all_info[:,1]))
print(np.max(all_info[:,1]))
'''
#no conversion:
#np.savetxt("BOSS_locations.txt", all_info)
#convert to galactic coordinates
c = SkyCoord(all_info[:,0], all_info[:,1], frame='fk5', unit="deg")
#print(c.fk5.ra[0])
#print(c.fk5.dec[0])
#print(c.galactic.l[0])
#print(c.galactic.l[0])
#make textfile:
longitudes = np.array(c.galactic.l.deg)
latitudes = np.array(c.galactic.b.deg)
longitudes = np.reshape(longitudes, (longitudes.shape[0], 1))
latitudes = np.reshape(latitudes, (latitudes.shape[0], 1))
all_info = np.concatenate((longitudes, latitudes), axis=1)
np.savetxt("BOSS_locations_galactic.txt", all_info)
'''
#convert the first few lines to various coordinate frames, for comparison
for i in range(10):
#print(c.fk5.ra[i].deg, " ", c.fk5.dec[i].deg)
print(c.fk4.ra[i].deg/15, " ", c.fk4.dec[i].deg/3600)
#print(c.galactic.l.deg[i], " ", c.galactic.b.deg[i])
'''
| true |
3682ff77f003b4c3457608f7c7648ca7bc9db23b | Python | sake224/new_demo | /main.py | UTF-8 | 430 | 4.1875 | 4 | [] | no_license | ## initializing string
string = "tokyo"
## initializing a dictionary
duplicates = {}
for z in string:
## checking whether the char is already present in dictionary or not
if z in duplicates:
## increasing count if present
duplicates[z] += 1
else:
## initializing count to 1 if not present
duplicates[z] = 1
for key, value in duplicates.items():
if value > 1:
print(key, end = " ")
print() | true |
3d4f595971330885738e4cf4347c5ffae9623f1b | Python | IdiotCirno/MFTI | /Arrays/E.py | UTF-8 | 3,282 | 3.578125 | 4 | [] | no_license | N = int(input())
A = []
def summ(A):
s = 0
for a in A:
s += a
return s
for i in range(N):
#A.append([0])
A.append([])
while True:
x = input()
if x == '#':
break
else:
x = x.split()
A[int(x[0])].append(int(x[1]))
#A[int(x[0])][0] += int(x[1])
for i,z in enumerate(A):
A[i].sort(key=lambda x: x, reverse=True)
A.sort(key=summ, reverse=True)
for a in A:
if len(a):
for i in a:
print(i, end=' ')
'''
Есть результаты работы студентов в семестре. Студентов выводить в порядке суммы их баллов. Требутеся вывести отсортированные результаты работ для каждого студента.
Данные вводятся как: student_id value
student_id принимает значения от 0 до N. value от 1 до 10
Пример входных данных: 0 3 0 5 1 3 1 2
Тут представленны данные о двух студента: 0 и 1. Сумма балов студента 0 - 8. Студента 1 - 5. Значит, сначала должны быть напечатаны результаты 0 студента, затем 1. Таким образом сначала надо вывести отсортированные результаты студента 0, затем студента 1:
5 3 3 2
Напомним, что у list в питоне есть встроенный метод sort и есть функция sorted. У них есть параметр key, который определяет по каким значениям будет сортироваться объект. Например код ниже будет сортировать лист по длинне его элементов. Так же есть параметр reverse.
a = ['###', '@', '??']
a.sort(key=lambda x: len(x)) #a ['@', '??', '###']
a.sort(key=lambda x: len(x), reverse=True) # a ['###', '??', '@']
Что такое лямбда функция вы узнаете в дальнейшем (так же всегда есть сайт google). Для выполнения этого задания достаточно понять, на что надо заменить функцию len.
Формат входных данных
В первой строке N - количество студентов. Далее идет какое-то количество строк (не равное N) с результатами студентов в формате: student_id value. 0 <= student_id < N. Значения разделены пробелом. Ввод заканчивается #.
Формат выходных данных
Вывести отсортированные результаты студентов в одну строку. Сначала печатаются результаты лучшего по сумме баллов студента, потом второго и так далее. Результаты в одну строку
Примеры
Ввод Вывод
3 10 3 4 3 2
0 3
0 10
2 3
2 2
2 4
#
'''
| true |
4a6c26b1a4ad48d152e2d4a4c8c08fe3281c2b4f | Python | graevskiy/mit_6.0001 | /PSet2/hangman.py | UTF-8 | 12,473 | 4.3125 | 4 | [] | no_license | # Problem Set 2, hangman.py
# Name:
# Collaborators:
# Time spent:
# Hangman Game
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
# (so be sure to read the docstrings!)
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = load_words()
def is_word_guessed(secret_word, letters_guessed):
'''
secret_word: string, the word the user is guessing; assumes all letters are
lowercase
letters_guessed: list (of letters), which letters have been guessed so far;
assumes that all letters are lowercase
returns: boolean, True if all the letters of secret_word are in letters_guessed;
False otherwise
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
for _ch in secret_word:
if not _ch in letters_guessed:
return False
return True
def get_guessed_word(secret_word, letters_guessed):
'''
secret_word: string, the word the user is guessing
letters_guessed: list (of letters), which letters have been guessed so far
returns: string, comprised of letters, underscores (_), and spaces that represents
which letters in secret_word have been guessed so far.
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
_s = ""
for _ch in secret_word:
if _ch in letters_guessed:
_s += _ch
else:
_s += "_ "
return _s
def get_available_letters(letters_guessed):
'''
letters_guessed: list (of letters), which letters have been guessed so far
returns: string (of letters), comprised of letters that represents which letters have not
yet been guessed.
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
_s = ""
for _ch in string.ascii_lowercase:
if _ch not in letters_guessed:
_s += _ch
return _s
def is_letter_in_secret_word(letter, secret_word):
vowels = 'aeiou'
if letter not in secret_word:
if letter in vowels:
return 2, 'Oops! That letter is not in my word:'
return 1, 'Oops! That letter is not in my word:'
return 0, 'Good guess:'
def print_remaining_guesses_and_avail_letters(guesses, warnings, avail_letters):
print(f'You have {warnings} warnings left.')
print(f'You have {guesses} guesses left.')
print(f'Available letters: {avail_letters}')
def letter_already_used(_l, guessed_letters):
if _l in guessed_letters:
return True
return False
def guess_letter(guessed_letters):
print('Guess a letter:')
_l = input()
if str.isalpha(_l) and len(_l) == 1:
_l = str.lower(_l)
if letter_already_used(_l, guessed_letters):
return None, 'Oops! You\'ve already guessed that letter.'
return _l, None
return None, 'Oops! That is not a valid letter.'
def guess_letter_w_hint(guessed_letters):
print('Guess a letter:')
_l = input()
if _l == '*':
return _l, None
if str.isalpha(_l) and len(_l) == 1:
_l = str.lower(_l)
if letter_already_used(_l, guessed_letters):
return None, 'Oops! You\'ve already guessed that letter.'
return _l, None
return None, 'Oops! That is not a valid letter.'
def decrease_warnings(warnings):
warnings -= 1
return warnings
def decrease_guesses(guesses):
guesses -= 1
return guesses
def is_lost_and_message(guesses, secret_word):
if guesses == 0:
print(f'You lose. The word was {secret_word}.')
return True
return False
def hangman(secret_word):
'''
secret_word: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secret_word contains and how many guesses s/he starts with.
* The user should start with 6 guesses
* Before each round, you should display to the user how many guesses
s/he has left and the letters that the user has not yet guessed.
* Ask the user to supply one guess per round. Remember to make
sure that the user puts in a letter!
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each guess, you should display to the user the
partially guessed word so far.
Follows the other limitations detailed in the problem write-up.
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
guesses = 6
warnings = 3
guessed_letters = []
print('Welcome!')
print(f'I\'m thinking of a word that is {len(secret_word)} letters long.')
print('------')
while guesses > 0:
avail_letters = get_available_letters(guessed_letters)
print_remaining_guesses_and_avail_letters(guesses, warnings, avail_letters)
_l, print_guess_type = guess_letter(guessed_letters)
if _l is None:
warnings = decrease_warnings(warnings)
warnings_msg = f'You have {warnings} warnings left:'
if warnings < 0:
warnings_msg = f'You have no warnings left so you lose one guess:'
guesses = decrease_guesses(guesses)
if is_lost_and_message(guesses, secret_word):
return None
warnings = 3
print(print_guess_type, warnings_msg, get_guessed_word(secret_word, guessed_letters))
print('------')
continue
if not _l in guessed_letters:
guessed_letters.append(_l)
if is_word_guessed(secret_word, guessed_letters):
break
lost_guesses, print_guess_type = is_letter_in_secret_word(_l, secret_word)
guesses -= lost_guesses
print(print_guess_type, get_guessed_word(secret_word, guessed_letters))
print('------')
if is_lost_and_message(guesses, secret_word):
return None
else:
print('You won! Congrats!')
print(f'Your score is {guesses*len(set(secret_word))}.')
return None
# When you've completed your hangman function, scroll down to the bottom
# of the file and uncomment the first two lines to test
#(hint: you might want to pick your own
# secret_word while you're doing your own testing)
# -----------------------------------
def match_with_gaps(my_word, other_word):
'''
my_word: string with _ characters, current guess of secret word
other_word: string, regular English word
returns: boolean, True if all the actual letters of my_word match the
corresponding letters of other_word, or the letter is the special symbol
_ , and my_word and other_word are of the same length;
False otherwise:
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
my_word = my_word.replace(" ","")
if len(my_word) != len(other_word):
return False
missing_letters = []
for i in range(len(my_word)):
if my_word[i] == "_":
if other_word[i] not in missing_letters:
missing_letters.append(other_word[i])
else:
if my_word[i] != other_word[i] or my_word[i] in missing_letters:
return False
return True
def show_possible_matches(my_word):
'''
my_word: string with _ characters, current guess of secret word
returns: nothing, but should print out every word in wordlist that matches my_word
Keep in mind that in hangman when a letter is guessed, all the positions
at which that letter occurs in the secret word are revealed.
Therefore, the hidden letter(_ ) cannot be one of the letters in the word
that has already been revealed.
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
matches = []
for w in wordlist:
if match_with_gaps(my_word, w):
matches.append(w)
if matches:
print(*matches)
else:
print("No matches found")
def hangman_with_hints(secret_word):
'''
secret_word: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secret_word contains and how many guesses s/he starts with.
* The user should start with 6 guesses
* Before each round, you should display to the user how many guesses
s/he has left and the letters that the user has not yet guessed.
* Ask the user to supply one guess per round. Make sure to check that the user guesses a letter
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each guess, you should display to the user the
partially guessed word so far.
* If the guess is the symbol *, print out all words in wordlist that
matches the current guessed word.
Follows the other limitations detailed in the problem write-up.
'''
# FILL IN YOUR CODE HERE AND DELETE "pass"
guesses = 6
warnings = 3
guessed_letters = []
print('Welcome!')
print(f'I\'m thinking of a word that is {len(secret_word)} letters long.')
print('------')
while guesses > 0:
avail_letters = get_available_letters(guessed_letters)
print_remaining_guesses_and_avail_letters(guesses, warnings, avail_letters)
_l, print_guess_type = guess_letter_w_hint(guessed_letters)
if _l == '*':
print('-------')
print('You used a hint')
show_possible_matches(get_guessed_word(secret_word, guessed_letters))
print('-------')
continue
if _l is None:
warnings = decrease_warnings(warnings)
warnings_msg = f'You have {warnings} warnings left:'
if warnings < 0:
warnings_msg = f'You have no warnings left so you lose one guess:'
guesses = decrease_guesses(guesses)
if is_lost_and_message(guesses, secret_word):
return None
warnings = 3
print(print_guess_type, warnings_msg, get_guessed_word(secret_word, guessed_letters))
print('------')
continue
if not _l in guessed_letters:
guessed_letters.append(_l)
if is_word_guessed(secret_word, guessed_letters):
break
lost_guesses, print_guess_type = is_letter_in_secret_word(_l, secret_word)
guesses -= lost_guesses
print(print_guess_type, get_guessed_word(secret_word, guessed_letters))
print('------')
if is_lost_and_message(guesses, secret_word):
return None
else:
print('You won! Congrats!')
print(f'Your score is {guesses*len(set(secret_word))}.')
return None
# When you've completed your hangman_with_hint function, comment the two similar
# lines above that were used to run the hangman function, and then uncomment
# these two lines and run this file to test!
# Hint: You might want to pick your own secret_word while you're testing.
if __name__ == "__main__":
# pass
# To test part 2, comment out the pass line above and
# uncomment the following two lines.
# secret_word = choose_word(wordlist)
# hangman(secret_word)
###############
# To test part 3 re-comment out the above lines and
# uncomment the following two lines.
secret_word = choose_word(wordlist)
hangman_with_hints(secret_word)
| true |
8e1b6fba3862516592717d6cb8e861750794d6bc | Python | tectronics/scientists-analysis | /preprocessing/baseline_collection.py | UTF-8 | 1,773 | 2.546875 | 3 | [
"MIT"
] | permissive | '''
Created on May 28, 2016
@author: Tania
'''
import os
import sys
from wikitools import wiki, api
import wikipedia
import csv
def wiki_search(keyword):
title=""
site = wiki.Wiki("https://en.wikipedia.org/w/api.php")
# get the title of the article
params = {'action':'query', 'list':'search', 'srsearch':keyword, 'srnamespace':0, 'srwhat':'nearmatch', 'srlimit':1, 'srprop':'titlesnippet', 'srenablerewrites':1}
req = api.APIRequest(site, params)
result = req.query()
if result['query']['search']:
for key, value in result['query']['search'][0].iteritems():
if key=='title':
title=value
# get the URL of the article
params = {'action':'query', 'prop':'info', 'titles':title, 'inprop':'url'}
req = api.APIRequest(site, params)
result = req.query()
for pidkey in result['query']['pages']:
for key, value in result['query']['pages'][pidkey].iteritems():
if key=='fullurl':
url=value
return url
base_dir = os.path.dirname(os.path.dirname(__file__))
data_dir = os.path.join(base_dir, 'data')
baseline_dir = os.path.join(data_dir, 'baseline')
csv_path =os.path.join(baseline_dir, 'prominent_scientists_(mearged_2001,2014,2015_TR).csv')
csvfile = open(csv_path , 'r')
reader = csv.reader(csvfile)
data=[]
for row in reader:
keyword = row[0]+" "+row[1]
url=wiki_search(keyword)
if url is not None:
csv_row = [keyword, url, row[3]]
data.append(csv_row)
csvfile.close()
output_path = os.path.join(baseline_dir, 'baseline.csv')
with open(output_path, 'w') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(data)
| true |
0b317997bff0c026849249b1bd0c7ab233215d14 | Python | AndrewMurd/Computational-Science | /Midterm/Answers/fillA.py | UTF-8 | 161 | 2.703125 | 3 | [] | no_license | import numpy as np
def fillA(n):
A = np.zeros((n,n))
for i in range(n):
for j in range(n):
A[i,j] = 1.0/(1.0+(i-j)**2)
return A
| true |
103b4ff0ba3c96e08aaad11ae89cd844204073a2 | Python | SKShah36/Design_Patterns | /Patterns/template.py | UTF-8 | 1,575 | 3.265625 | 3 | [] | no_license | # This code is adapted from https://www.geeksforgeeks.org/template-method-design-pattern/
import abc
from abc import abstractmethod
class OrderProcessTemplate(metaclass=abc.ABCMeta):
isGift: bool
@abstractmethod
def do_select(self)->None:
pass
@abstractmethod
def do_payment(self) -> None:
pass
# Do not override
def gift_wrap(self) -> None:
print("Gift wrap successful")
@abstractmethod
def do_delivery(self) -> None:
pass
# Do not override
def process_order(self, is_gift: bool):
self.do_select()
self.do_payment()
if is_gift:
self.gift_wrap()
self.do_delivery()
class NetOrder(OrderProcessTemplate):
def do_select(self) ->None:
print("Item added to online shopping cart")
print("Get gift wrap preference")
print("Get delivery address.")
def do_payment(self) -> None:
print("Online Payment through Netbanking, card or Paytm")
def do_delivery(self) -> None:
print("Ship the item through post to delivery address")
class StoreOrder(OrderProcessTemplate):
def do_select(self) -> None:
print("Customer chooses the item from shelf.")
def do_payment(self) -> None:
print("Pays at counter through cash/POS")
def do_delivery(self) -> None:
print("Item deliverd to in delivery counter.")
def client():
net_order = NetOrder()
net_order.process_order(True)
print("")
store_order = StoreOrder()
store_order.process_order(True)
client()
| true |
a36c3b408abae6101e8f5f5aee7c9fc70ab3b535 | Python | mjhossain/note-app-python | /playground/arg-test.py | UTF-8 | 225 | 2.765625 | 3 | [] | no_license | import argparse
parser = argparse.ArgumentParser(description="Some function")
parser.add_argument("-a","--add", help="to add", type=int, metavar='')
args = parser.parse_args()
if __name__ == "__main__":
print(args.add) | true |
7a1524f77294f3285057f3e140b470ab4039ebbe | Python | isperetz/Test | /week1 - problem 1-2.py | UTF-8 | 793 | 4.40625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 5 11:49:48 2017
@author: isperetz
"""
#week 1 problem 1
#Write a program that counts up the number of vowels contained in the string s.
# Valid vowels are: 'a', 'e', 'i', 'o', and 'u'.
#For example, if s = 'azcbobobegghakl', your program should print:
# Number of vowels: 5
s = 'azcbobobegghaklvgg'
numvow = 0
for chr in s:
if chr in 'aeiou':
numvow += 1
print('Number of vowels:',numvow)
#Week 1 problem 2
#Write a program that prints the number of times the string 'bob' occurs in s.
#For example, if s = 'azcbobobegghakl', then your program should print
# Number of times bob occurs is: 2
s = 'azcbobobegghakl'
ctr = 0
for i in range(len(s)):
if s[i:i+3] == 'bob':
ctr += 1
print('Number of times bob occurs is:',ctr)
| true |
4054343d0a364beffd8c627bd04ccfee5eaaf670 | Python | domperor/dotfiles | /xkcd.py | UTF-8 | 2,835 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv
try:
from PIL import Image
except:
from sys import stderr
stderr.write('[E] PIL not installed')
exit(1)
from io import BytesIO
from urllib.request import Request, urlopen
from urllib.parse import urlencode
import json
import re
import shutil
from drawille import Canvas
def usage():
print('Usage: %s <url/id>')
exit()
if __name__ == '__main__':
if len(argv) < 2:
url = 'http://xkcd.com/'
else:
usage()
c = urlopen(url).read().decode('utf-8')
img_url = 'https:' + ''.join(re.search('src="(\/\/imgs.xkcd.com\/comics\/.*\.)(jpg|png)"', c).groups())
i = Image.open(BytesIO(urlopen(img_url).read())).convert('L')
w, h = i.size
tw, th = shutil.get_terminal_size((80, 25))
tw -= 1
th -= 10
tw *= 2
th *= 4
if tw < w or th < h:
ratio = min(tw / float(w), th / float(h))
w = int(w * ratio)
h = int(h * ratio)
i = i.resize((w, h), Image.ANTIALIAS)
can = Canvas()
x = y = 0
try:
i_converted = i.tobytes()
except AttributeError:
i_converted = i.tostring()
for pix in i_converted:
if pix < 128:
can.set(x, y)
x += 1
if x >= w:
y += 1
x = 0
info_json = urlopen('https://xkcd.com/info.0.json').read().decode('utf-8')
info = json.loads(info_json)
"""
redirect_url = "https://www.explainxkcd.com/wiki/api.php?{}".format(urlencode({
'action': 'query',
'titles': info['num'],
'redirects': 'true',
'format': 'json',
}))
redirect_request = Request(redirect_url, headers={'User-Agent': 'xkcd CLI tool'})
redirect_json = urlopen(redirect_request).read().decode('utf-8')
redirect = json.loads(redirect_json)
redirect_to = redirect['query']['redirects'][0]['to']
transcription_url = "https://www.explainxkcd.com/wiki/api.php?{}".format(urlencode({
'action': 'parse',
'page': redirect_to,
'prop': 'text',
'section': 2,
'format': 'json',
'disableeditsection': 'true',
}))
transcription_request = Request(transcription_url, headers={'User-Agent': 'xkcd CLI tool'})
transcription_json = urlopen(transcription_request).read().decode('utf-8')
transcription = json.loads(transcription_json)
"""
print(("\033[92m{0}: {1}\033[0m".format(info['num'], info['title'])))
print((can.frame()))
print((''))
print(("\033[1;31mAlt text\033[0m: {0}".format(info['alt'])))
print((''))
print(("\033[1;31mOriginal\033[0m: \033[96mhttps://xkcd.com/{0}\033[0m".format(info['num'])))
print(("\033[1;31mExplanation\033[0m: \033[96mhttps://www.explainxkcd.com/wiki/index.php/{0}\033[0m".format(info['num'])))
print((''))
| true |
5d9d33ddd3847c854218171d00a2d56f23059a93 | Python | noaOrMlnx/sonic-utilities | /tests/synchronous_mode_test.py | UTF-8 | 1,383 | 2.578125 | 3 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | from click.testing import CliRunner
import config.main as config
class TestSynchronousMode(object):
@classmethod
def setup_class(cls):
print("SETUP")
def __check_result(self, result_msg, mode):
if mode == "enable" or mode == "disable":
expected_msg = """Wrote %s synchronous mode into CONFIG_DB, swss restart required to apply the configuration: \n
Option 1. config save -y \n
config reload -y \n
Option 2. systemctl restart swss""" % mode
else:
expected_msg = "Error: Invalid argument %s, expect either enable or disable" % mode
return expected_msg in result_msg
def test_synchronous_mode(self):
runner = CliRunner()
result = runner.invoke(config.config.commands["synchronous_mode"], ["enable"])
print(result.output)
assert result.exit_code == 0
assert self.__check_result(result.output, "enable")
result = runner.invoke(config.config.commands["synchronous_mode"], ["disable"])
print(result.output)
assert result.exit_code == 0
assert self.__check_result(result.output, "disable")
result = runner.invoke(config.config.commands["synchronous_mode"], ["invalid-input"])
print(result.output)
assert result.exit_code != 0
assert self.__check_result(result.output, "invalid-input")
| true |
a8d0ff312b48123153aed2343dbcb9fcc70f07d7 | Python | ValeUrbina/ProteinCurationServices | /spcleaner/mafft_align.py | UTF-8 | 596 | 2.59375 | 3 | [] | no_license |
import sys
import os
# Use mafft to align a fasta alignment
# https://www.ebi.ac.uk/Tools/msa/mafft/
# https://mafft.cbrc.jp/alignment/software/
def mafft_align(fasta_path, aligned_fasta_path):
myCmd = 'mafft --auto ' + fasta_path + ' > '+aligned_fasta_path
os.system(myCmd)
def main():
fasta_path = sys.argv[1]
aligned_fasta_path = sys.argv[2]
mafft_align(fasta_path, aligned_fasta_path)
return
# Execution: mafft_align.py input_file_path output_file_path
# Example: mafft_align.py PF000/alignment PF000/trimmed_alignment
if __name__ == "__main__":
main()
| true |
ddfe87ec4f2f7880150d189b7a0213825eb9e737 | Python | syn-plataforma/syn | /syn/model/build/common/encode_structured_data.py | UTF-8 | 3,172 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python3
import argparse
import os
import time
import pandas as pd
from pymongo import MongoClient
from syn.helpers.environment import load_environment_variables
from syn.helpers.logging import set_logger
from syn.helpers.mongodb import get_default_mongo_client
from syn.helpers.mongodb import load_dataframe_from_mongodb
from syn.helpers.system import check_same_python_module_already_running
load_environment_variables()
log = set_logger()
def get_input_params():
parser = argparse.ArgumentParser(description='Encode structured data.')
parser.add_argument('--corpus', default='bugzilla', type=str, help='Database name.')
parser.add_argument('--collection_name', default='normalized_clear', type=str, help='Collection name.')
args = parser.parse_args()
return {
'corpus': args.corpus,
'collection_name': args.collection_name
}
if __name__ == "__main__":
# Stores the execution start time to calculate the time it takes for the module to execute.
initial_time = time.time()
# Check if there is a running process that contains the name of this module.
check_same_python_module_already_running(os.path.split(__file__))
log.info(f"Encoding structured data ...")
# Load the parameters.
input_params = get_input_params()
assert input_params is not None, f"No params provided."
# Query the database.
structured_data_column_name = os.environ['STRUCTURED_DATA_COLUMN_NAMES'].split(',')
if len(structured_data_column_name) == 0:
raise ValueError('No structured data column names defined.')
log.info(f"Structured data column name: {structured_data_column_name}")
# Load normalized_clear collection.
projection = {'_id': 0}
for column in structured_data_column_name:
projection[column] = 1
log.info(f"projection:{projection}")
df_structured_data = load_dataframe_from_mongodb(
database_name=input_params['corpus'],
collection_name=input_params['collection_name'],
projection=projection
)
# Mongo client.
mongodb_client: MongoClient = get_default_mongo_client()
db = mongodb_client[input_params['corpus']]
# Group_by structured data column.
for column in structured_data_column_name:
df = pd.DataFrame(columns=[column])
column_value_counts = df_structured_data[column].value_counts()
log.info(f"Number of distinct values in column '{column}': {column_value_counts.shape[0]}")
df[column] = column_value_counts.keys().to_list()
# Assigning numerical values and storing in another column
df[f"{column}_code"] = df[column].index
col = db[f"{column}_codes"]
if col.name in db.list_collection_names():
db.drop_collection(col.name)
log.info(f"Inserting documents ...")
inserted_documents = col.insert_many(df.to_dict("records"))
log.info(f"Inserted documents: {len(inserted_documents.inserted_ids)}")
final_time = time.time()
log.info(f"Encoding structured data total execution time = {((final_time - initial_time) / 60)} minutes")
log.info(f"MODULE EXECUTED.")
| true |
9d36b4bbd1350b4b2eda4891d78aedc82708d6d3 | Python | mythreyiramesh/learning-mpi | /1dtemp.py | UTF-8 | 662 | 2.734375 | 3 | [] | no_license | import numpy as np
timesteps = 10;
alpha = 1;
delX = 1;
delT = 1;
const = alpha/delX**2;
domain_size = 10;
xgrid = np.linspace(1,10,domain_size);
old_temp = np.sin(xgrid);
new_temp = np.zeros(domain_size);
# since we are using only the previous and next step
lines = ["["]
for j in range(timesteps):
print(old_temp)
lines = lines + [str(old_temp)+";\n"]
for i in range(1,domain_size-1):
# print("calculating",i,"from",i-1,"and",i+1)
new_temp[i] = old_temp[i] + const * (old_temp[i-1]-2*old_temp[i]+old_temp[i+1]);
old_temp = new_temp;
lines = lines + ["];"]
file = open("outs.txt","w");
file.writelines(lines)
file.close()
| true |
3eb8ce837ff0dfcbd928e31c9d8936d7b8526ce3 | Python | DanielOjo/Lists | /Classroom exercises/Development/Task1.py | UTF-8 | 1,218 | 4.21875 | 4 | [] | no_license | #DanielOgunlana (40730)
#Lists Development Exercise Task 1
#05/01/15
def random_number():
import random
random_num = random.randint(1,11)
return random_num
def countrys():
country_list = ["England","US","France","Spain","Belgium","Japan","China","Italy","Germany","Canada"]
return country_list
def capitals():
capital_list = ["London","WashingtonDC", "Madrid", "Paris","Brussels","Tokyo","Hongkong","Rome","Berlin","Ottawa"]
return capital_list
def process_country(country_list,random_num):
country = country_list[random_num]
return country
def process_capital(capital_list,random_num):
capital = capital_list[random_num]
return capital
def ask_user(country,capital):
user_answer = input("Name the capital of {0}: ".format(country))
if user_answer == capital:
print("That is correct")
else:
print("That is wrong")
def main_program():
random_num = random_number()
country_list = countrys()
capital_list = capitals()
country = process_country(country_list,random_num)
capital = process_capital(capital_list,random_num)
ask_user(country,capital)
#main program
main_program()
| true |
49915d62fdef84ddd3841ced77012a94adfeef26 | Python | flores-jacob/RoboND-Rover-Project | /code/path_generation_helpers.py | UTF-8 | 47,623 | 3.203125 | 3 | [] | no_license | import numpy as np
from collections import namedtuple
from pathfinding.core.diagonal_movement import DiagonalMovement
from pathfinding.core.grid import Grid
from pathfinding.finder.a_star import AStarFinder
def to_polar_coords_with_origin(origin_x, origin_y, x_pixels, y_pixels):
y_diffs = y_pixels - float(origin_y)
x_diffs = x_pixels - float(origin_x)
# Calculate the distances between the origin and the pixels
dist = np.sqrt((y_diffs ** 2) + (x_diffs ** 2))
angles = np.arctan2(y_diffs, x_diffs)
return dist, angles
def compute_distances(origin_x, origin_y, x_points, y_points):
y_diffs = y_points - origin_y
x_diffs = x_points - origin_x
# Calculate the distances between the origin and the pixels
distances = np.sqrt((y_diffs ** 2) + (x_diffs ** 2))
return distances
# adapted from https://stackoverflow.com/a/7869457
def compute_misalignment(destination_angle, yaw):
"""
:param destination_angle: value in degree
:param yaw: value in degrees
:return: misalignment in degrees
"""
angle_difference = destination_angle - yaw
misalignment = (angle_difference + 180) % 360 - 180
return misalignment
def coordinates_reached(current_coordinates, target_coordinates, precision='tight'):
# If the x and y values of the current position rounded up or down are equivalent to
# the destination point:
if precision == 'tight':
x_range = [np.floor(current_coordinates[0]), np.ceil(current_coordinates[0])]
y_range = [np.floor(current_coordinates[1]), np.ceil(current_coordinates[1])]
elif precision == 'loose':
x_range = range(round(current_coordinates[0]) - 1, round(current_coordinates[0]) + 2)
y_range = range(round(current_coordinates[1]) - 1, round(current_coordinates[1]) + 2)
else:
raise Exception("incorrect precision setting. should be either 'loose' or 'tight'")
dest_x_reached = target_coordinates[0] in x_range
dest_y_reached = target_coordinates[1] in y_range
if dest_x_reached and dest_y_reached:
print("destinaton reached ")
return True
else:
return False
def get_surrounding_values(x_pixel, y_pixel, map_data, radius=1):
"""
Identify and return a pixel's value along with the values of its surrounding pixels
:param x_pixel:
:param y_pixel:
:param map_data: 2 dimensional map that holds the pixel and the values
:param radius: how many pixels from teh central pixel are we going to fetch the values of
:return: 2 dimensional array with (x_pixel, y_pixel) at the middle
"""
assert np.ndim(map_data) == 2
surrounding_pixels = map_data[(y_pixel - radius):(y_pixel + radius + 1), (x_pixel - radius): (x_pixel + radius + 1)]
return surrounding_pixels
def choose_closest_flag(origin_x, origin_y, map_data, minimum_distance=0, flag=0, x_lower_bound=None,
x_upper_bound=None, y_lower_bound=None, y_upper_bound=None):
"""
This function returns a memory_map coordinate. The initial destination is normally chosen to be
the nearest unexplored (untagged) point on the memory_map
This may trigger the rover to first rotate 360 degrees upon initialization to survey what's in sight.
:return: returns an (x,y) tuple of 2 integers. These is the destination's xy coordinates in the
memory _ap. Rover.memory_map is different from Rover.worldmap. Rover.memory_map is 10 times
larger (2000 x 2000 )and has more layers
TODO allow the rover to choose as a destination any of the (navigable) areas it has already explored
"""
assert map_data.ndim == 2, " map does not have 2 dimensions "
if x_lower_bound is None:
x_lower_bound = 0
if x_upper_bound is None:
x_upper_bound = map_data.shape[1]
if y_lower_bound is None:
y_lower_bound = 0
if y_upper_bound is None:
y_upper_bound = map_data.shape[0]
flag_point_indices = np.where(map_data[y_lower_bound:y_upper_bound, x_lower_bound:x_upper_bound] == flag)
x_points = flag_point_indices[1] + x_lower_bound
y_points = flag_point_indices[0] + y_lower_bound
distances, angles = to_polar_coords_with_origin(origin_x, origin_y, x_points, y_points)
if np.any(distances):
# Get the argmin values given a condition
# https://seanlaw.github.io/2015/09/10/numpy-argmin-with-a-condition/
mask = (distances >= minimum_distance)
subset_idx = np.argmin(distances[mask])
parent_idx = np.arange(distances.shape[0])[mask][subset_idx]
# distance_min_idx = np.argmin(distances)
distance_min_idx = parent_idx
min_distance = distances[distance_min_idx]
accompanying_angle = angles[distance_min_idx]
x_point = x_points[distance_min_idx]
y_point = y_points[distance_min_idx]
chosen_destination_coords = (int(x_point), int(y_point))
chosen_destination_distance = min_distance
chosen_destination_angle = accompanying_angle
x_diff = x_point - float(origin_x)
y_diff = y_point - float(origin_y)
# logging.debug("min distance " + str(min_distance))
# logging.debug("x_diff " + str(x_diff))
# logging.debug("y-diff " + str(y_diff))
# logging.debug("dist ** 2 " + str((min_distance ** 2)))
# logging.debug("x_diff ** 2 + y_diff **2 " + str((x_diff ** 2) + (y_diff ** 2)))
# logging.debug("accompanying_ angle " + str(accompanying_angle))
# logging.debug("np.arctan2(float(y_diff), x_diff) " + str(np.arctan2(float(y_diff), x_diff)))
# assert (float(min_distance ** 2) == float((x_diff ** 2) + (y_diff ** 2)))
c_squared = min_distance ** 2
a_squared = (x_diff ** 2)
b_squared = (y_diff ** 2)
assert np.isclose(c_squared, a_squared + b_squared, rtol=1e-05, atol=1e-08, equal_nan=False)
assert np.isclose((accompanying_angle), np.arctan2(float(y_diff), x_diff))
return chosen_destination_coords, chosen_destination_distance, chosen_destination_angle
else:
return None, None, None
def choose_farthest_flag(origin_x, origin_y, map_data, maximum_distance=None, flag=0, x_lower_bound=None,
x_upper_bound=None, y_lower_bound=None, y_upper_bound=None):
assert map_data.ndim == 2, " map does not have 2 dimensions "
if x_lower_bound is None:
x_lower_bound = 0
if x_upper_bound is None:
x_upper_bound = map_data.shape[1]
if y_lower_bound is None:
y_lower_bound = 0
if y_upper_bound is None:
y_upper_bound = map_data.shape[0]
flag_point_indices = np.where(map_data[y_lower_bound:y_upper_bound, x_lower_bound:x_upper_bound] == flag)
x_points = flag_point_indices[1] + x_lower_bound
y_points = flag_point_indices[0] + y_lower_bound
distances, angles = to_polar_coords_with_origin(origin_x, origin_y, x_points, y_points)
# if there were no points found, return None
if not distances.size:
return None, None, None
if maximum_distance:
# Get the argmin values given a condition
# https://seanlaw.github.io/2015/09/10/numpy-argmin-with-a-condition/
mask = (distances <= maximum_distance)
subset_idx = np.argmax(distances[mask])
parent_idx = np.arange(distances.shape[0])[mask][subset_idx]
else:
subset_idx = np.argmax(distances)
parent_idx = np.arange(distances.shape[0])[subset_idx]
# distance_min_idx = np.argmin(distances)
distance_max_idx = parent_idx
max_distance = distances[distance_max_idx]
accompanying_angle = angles[distance_max_idx]
x_point = x_points[distance_max_idx]
y_point = y_points[distance_max_idx]
chosen_destination_coords = (int(x_point), int(y_point))
chosen_destination_distance = max_distance
chosen_destination_angle = accompanying_angle
# x_diff = x_point - float(origin_x)
# y_diff = y_point - float(origin_y)
#
# c_squared = max_distance ** 2
# a_squared = (x_diff ** 2)
# b_squared = (y_diff ** 2)
# assert np.isclose(c_squared, a_squared + b_squared, rtol=1e-05, atol=1e-08, equal_nan=False)
# assert np.isclose(accompanying_angle, np.arctan2(float(y_diff), x_diff))
return chosen_destination_coords, chosen_destination_distance, chosen_destination_angle
def get_range_to_iterate_over(origin_x, origin_y, destination_x, destination_y, angle, granularity):
"""
angle: in radians
"""
# set the range to begin from the lowest x value to the highest
range_start_x = min(origin_x, destination_x)
range_end_x = max(origin_x, destination_x)
if (0 <= abs(angle) <= (np.pi / 2)) or (((3 * np.pi) / 4) < abs(angle) < (np.pi * 2)):
# print(" quadrant I or IV")
# check from left to right
range_to_iterate_over_x = np.arange(range_start_x, range_end_x, granularity)
elif (np.pi / 2) < abs(angle) <= (
(3 * np.pi) / 4): # or (((3 * np.pi)/ (np.pi * 2)) < abs(angle) <= (3 * np.pi)/ (np.pi * 4)):
# print("quadrant II or III")
# if the angle is more than 90 degrees, x should be from right to left
range_to_iterate_over_x = np.arange(range_start_x, range_end_x, granularity)[::-1]
# do the same for y values
range_start_y = min(origin_y, destination_y)
range_end_y = max(origin_y, destination_y)
# if the angle is in the 2 upper quadrants of the cartesian plane
if (0 <= angle <= np.pi) or (-np.pi <= angle <= -np.pi * 2):
# print("quadrant I or II")
# check from bottom to top
range_to_iterate_over_y = np.arange(range_start_y, range_end_y, granularity)
# if the anlge is in the 2 bottom quadrants of the cartesian plane
elif (np.pi < angle < np.pi * 2) or (-np.pi < angle < 0):
# print("quadrant III or IV")
# check from top to bottom
range_to_iterate_over_y = np.arange(range_start_y, range_end_y, granularity)[::-1]
return range_to_iterate_over_x, range_to_iterate_over_y
def obstacle_crossed_by_line(origin_x, origin_y, destination_x, destination_y, map_data, flag_list, granularity=1,
line_width=0, return_all=False):
"""
x_points: should be divisible by the granularity value, otherwise, this function won't detect it. This function
can only detect coordinate's whose x values are divisible by the granularity value
map_data: should be a 2 dimensional array indicating which areas are obstacles and not
line_width: TODO find all points traversed by a line with thickness of line_width
return:
:param origin_x:
:param origin_y:
:param destination_x:
:param destination_y:
:param map_data:
:param flag_list: List of integers that flag obstacles (or areas to avoid) in the map_data
:param granularity:
:param line_width:
:param return_all: if True, all crossed obstacle coords are returned, otherwise, just the first one is returned
:return: list of (x,y) tuples representing crossed obstacle coords
"""
assert (np.ndim(map_data) == 2)
# print("map_data_size: ", map_data.size)
# print("destination x", destination_x)
# print("desitnation y", destination_y)
# print("this is the map data originally\n", map_data)
# print("this is the x y value for map_data", map_data[destination_y, destination_x])
# assert(map_data[destination_y, destination_x] == 0, "tag is " + str(map_data[destination_y, destination_x]) + " instead")
# "draw" the line by getting its different elements
x_diff = destination_x - float(origin_x) # convert one of the numbers into float so that we can have more
y_diff = destination_y - float(origin_y) # accurate computations, with no rounding off
if x_diff == 0:
slope = None
y_intercept = None
else:
slope = y_diff / x_diff
y_intercept = origin_y - (slope * origin_x)
distance, angle = to_polar_coords_with_origin(origin_x, origin_y, destination_x, destination_y)
range_to_iterate_over_x, range_to_iterate_over_y = get_range_to_iterate_over(origin_x, origin_y, destination_x,
destination_y, angle, granularity)
# if x y coords are given, check each x, y coordinate pairs from x_points y_points if they are on the line
# print("outside ")
if return_all is True: # run the function until all flagged coordinates that cross the line are returned
# print("return all is true ")
crossed_flagged_coords_x = []
crossed_flagged_coords_y = []
# check if any of the x values between the origin and destination have y_values that are obstacles
for x in range_to_iterate_over_x:
# ("itrating over ranges ")
y = (slope * x) + y_intercept
# round up and down because numpy only accepts integers when accessing array values
# speaking of which, it may not be possible to have a granularity that is less than one
y_up = min(np.ceil(y), map_data.shape[0])
y_down = max(np.floor(y), 0)
for flag in flag_list:
if (map_data[int(y_up), int(x)] == flag):
crossed_flagged_coords_x.append((int(x), int(y)))
elif (map_data[int(y_down), int(x)] == flag):
crossed_flagged_coords_x.append((int(x), int(y)))
# do the same thing for y
for y in range_to_iterate_over_y:
# if the line is vertical, assign the x_value
if slope is None:
x = origin_x
# if there's a slope, compute the x values
else:
x = (y - y_intercept) / slope
x_left = max(np.floor(x), 0)
x_right = min(np.ceil(x), map_data.shape[1])
for flag in flag_list:
if map_data[int(y), int(x_left)] == flag:
crossed_flagged_coords_y.append((int(x_left), int(y)))
if map_data[int(y), int(x_right)] == flag:
crossed_flagged_coords_y.append((int(x_right), int(y)))
# combine the crossed x and y coords with each other
# https://stackoverflow.com/a/1319353
crossed_flagged_coords = crossed_flagged_coords_x + list(
set(crossed_flagged_coords_y) - set(crossed_flagged_coords_x))
# no need to proceed with the rest of the function
# print(crossed_flagged_coords)
return crossed_flagged_coords
else: # just return the first x or y obstacle that is encountered
# for each flag in the list, check if they are found on the line
# print("entering ")
first_obstacle_x = None
first_obstacle_y = None
# print("range_to_iterate_over_y", range_to_iterate_over_y)
# print("range_to_iterate_over_x", range_to_iterate_over_x)
for x in range_to_iterate_over_x:
# print("iterating x ")
# if we already have a first obstacle, do not prceed with the loop
if first_obstacle_x is not None:
break
# "reaching this space x"
# print("x is ", x)
# print("slope", slope)
# print("intercept ", y_intercept)
y = (slope * x) + y_intercept
# print("y_result ", y)
# round up and down because numpy only accepts integers when accessing array values
# speaking of which, it may not be possible to have a granularity that is less than one
y_up = min(np.ceil(y), map_data.shape[0])
y_down = max(np.floor(y), 0)
for flag in flag_list:
if (map_data[int(y_up), int(x)] == flag):
first_obstacle_x = (x, int(y_up))
break
elif (map_data[int(y_down), int(x)] == flag):
first_obstacle_x = (x, int(y_down))
break
# do the same thing for y
for y in range_to_iterate_over_y:
# print("iterating y", range_to_iterate_over_y)
# if first obstacle y is not none, break the loop
if first_obstacle_y is not None:
break
# print("reaching this space y")
# if the line is vertical, assign the x_value
if slope is None:
x = origin_x
# if there's a slope, compute the x values
else:
x = (y - y_intercept) / slope
# print("y source ", y)
# print("y_inrercept ", y_intercept)
# print("slope ", slope)
# print("x result ", x)
x_left = max(np.floor(x), 0)
x_right = min(np.ceil(x), map_data.shape[1])
for flag in flag_list:
if (map_data[int(y), int(x_left)] == flag):
first_obstacle_y = (int(x_left), int(y))
break
elif (map_data[int(y), int(x_right)].astype(int) == flag):
first_obstacle_y = (int(x_right), int(y))
break
if first_obstacle_x and first_obstacle_y:
# compute which obstacle is closest
first_x_distance = compute_distances(origin_x, origin_y, first_obstacle_x[0], first_obstacle_x[1])
first_y_distance = compute_distances(origin_x, origin_y, first_obstacle_y[0], first_obstacle_y[1])
# return the closest obstacle as a list
if first_x_distance >= first_y_distance:
return [first_obstacle_y]
elif first_y_distance > first_x_distance:
return [first_obstacle_x]
elif first_obstacle_x:
return [first_obstacle_x]
elif first_obstacle_y:
return [first_obstacle_y]
# print("first obstacle x ", first_obstacle_x)
# print("first obstacle y ", first_obstacle_y)
return False
def sidestep_obstacle(origin_x, origin_y, destination_x, destination_y, map_data, navigable_flag, obstacle_flag):
"""
This function takes origin coordinates and destination coordinates, and plots a path to the destination in two steps
or two lines uninterrupted by obstacles based on the map_data
:param origin_x:
:param origin_y:
:param destination_x:
:param destination_y:
:param map_data: 2 dimensional array flagged with indices representing nature of their coordinates
:param navigable_flag: int used when on map_data when flagging navigable pixels
:param obstacle_flag: int used when on map_data when flagging obstacle pixels
:return: a Path_guide named tuple that contains the following values:
["midpoint_x", "midpoint_y", "midpoint_distance", "midpoint_angle", "destination_distance","destination_angle"]
midpoint_x: x coordinate of the midpoint
midpoint_y: y coordinate of the midpoint
midpoint_distance: distance from the origin to the midpoint
midpoint_angle: angle from teh origin to the midpoint
destination_distance: distance from the midpoint to the destination
destination_angle: angle from the midoint to the destination
"""
# compute the distance between origin and all navigable points, and all navigable points to destinaion point
navigable_points = np.where(map_data == navigable_flag)
distance_origin_to_midpoint = compute_distances(origin_x, origin_y, navigable_points[1], navigable_points[0])
distance_midpoint_to_destination = compute_distances(navigable_points[1], navigable_points[0], destination_x,
destination_y)
combined_distance = distance_origin_to_midpoint + distance_midpoint_to_destination
# find the shortest distance, but remember its original index
original_indices = np.argsort(combined_distance)
for original_index in original_indices:
midpoint_x = np.array(navigable_points)[1, original_index]
midpoint_y = np.array(navigable_points)[0, original_index]
# check if either of the two paths are blocked
# print("sidestep ")
# print("origin x ", origin_x)
# print("origin y ", origin_y)
# print("midpoint x ", midpoint_x)
# print("midpoint y ", midpoint_y)
# print("map data shape ", map_data.shape)
# print("obstacle flag ", str([obstacle_flag]))
obstacle_crossed_part_1 = obstacle_crossed_by_line(origin_x, origin_y, midpoint_x, midpoint_y, map_data,
[obstacle_flag])
if obstacle_crossed_part_1 is not False:
# if the path was blocked
continue
obstacle_crossed_part_2 = obstacle_crossed_by_line(midpoint_x, midpoint_y, destination_x, destination_y,
map_data, [obstacle_flag])
if obstacle_crossed_part_2 is not False:
continue
# if not blocked, compute the polar coordinates to be sent to the rover
if (obstacle_crossed_part_1 is False) and (obstacle_crossed_part_2 is False):
Path_guide = namedtuple("Path_guide",
["x", "y", "midpoint_distance", "midpoint_angle", "destination_distance",
"destination_angle"])
midpoint_distance, midpoint_angle = to_polar_coords_with_origin(origin_x, origin_y, midpoint_x, midpoint_y)
destination_distance, destination_angle = to_polar_coords_with_origin(midpoint_x, midpoint_y, destination_x,
destination_y)
path_guide = Path_guide(midpoint_x, midpoint_y, midpoint_distance, midpoint_angle, destination_distance,
destination_angle)
# if both are clear, then return the path guide
return path_guide
# if there are no clear paths, then return False
return False
def get_optimal_midpoint(origin_x, origin_y, destination_x, destination_y, map_data, navigable_flag, obstacle_flag):
# compute the distance between origin and all navigable points, and all navigable points to destination point
navigable_points = np.where(map_data == navigable_flag)
print("navigable len ", len(navigable_points))
distance_origin_to_midpoint = compute_distances(origin_x, origin_y, navigable_points[1], navigable_points[0])
print("len distance origin to midpoint ", len(distance_origin_to_midpoint))
distance_midpoint_to_destination = compute_distances(navigable_points[1], navigable_points[0], destination_x,
destination_y)
print("len distance midpoint to destination", len(distance_midpoint_to_destination))
combined_distance = distance_origin_to_midpoint + distance_midpoint_to_destination
print("combined distance ", len(combined_distance))
# find the shortest distance, but remember its original index
index_with_shortest_distance = np.argmin(combined_distance)
if index_with_shortest_distance:
midpoint_x = np.array(navigable_points)[1, index_with_shortest_distance]
midpoint_y = np.array(navigable_points)[0, index_with_shortest_distance]
return (midpoint_x, midpoint_y)
else:
return None
def find_waypoint(origin_x, origin_y, destination_x, destination_y, map_data, navigable_flag, obstacle_flag):
assert np.ndim(map_data) == 2
# compute the distance between origin and all navigable points, and all navigable points to destination point
navigable_points = np.where(map_data == navigable_flag)
# print("navigable len ", len(navigable_points))
distance_origin_to_midpoint = compute_distances(origin_x, origin_y, navigable_points[1], navigable_points[0])
# print("len distance origin to midpoint ", len(distance_origin_to_midpoint))
distance_midpoint_to_destination = compute_distances(navigable_points[1], navigable_points[0], destination_x,
destination_y)
# print("len distance midpoint to destination", len(distance_midpoint_to_destination))
combined_distance = distance_origin_to_midpoint + distance_midpoint_to_destination
# print("combined distance ", len(combined_distance))
# find the shortest distance, but remember its original index
original_indices = np.argsort(combined_distance)
complete_path = []
midpoint_path_only = []
Path_guide = namedtuple("Path_guide",
["midpoint_x", "midpoint_y", "midpoint_distance", "midpoint_angle",
"destination_distance",
"destination_angle"])
index = 0
for original_index in original_indices:
index += 1
# print("iterating through index ", index)
midpoint_x = np.array(navigable_points)[1, original_index]
midpoint_y = np.array(navigable_points)[0, original_index]
# check if either of the two paths are blocked
obstacle_crossed_part_1 = obstacle_crossed_by_line(origin_x, origin_y, midpoint_x, midpoint_y, map_data,
[obstacle_flag])
obstacle_crossed_part_2 = obstacle_crossed_by_line(midpoint_x, midpoint_y, destination_x, destination_y,
map_data, [obstacle_flag])
if (obstacle_crossed_part_1 is False) and (obstacle_crossed_part_2 is False):
# print("success1 and success2")
midpoint_distance, midpoint_angle = to_polar_coords_with_origin(origin_x, origin_y, midpoint_x, midpoint_y)
destination_distance, destination_angle = to_polar_coords_with_origin(midpoint_x, midpoint_y, destination_x,
destination_y)
path_guide = Path_guide(midpoint_x, midpoint_y, midpoint_distance, midpoint_angle, destination_distance,
destination_angle)
complete_path.append(path_guide)
elif obstacle_crossed_part_1 is False:
# print("success1 only")
midpoint_distance, midpoint_angle = to_polar_coords_with_origin(origin_x, origin_y, midpoint_x, midpoint_y)
path_guide = Path_guide(midpoint_x, midpoint_y, midpoint_distance, midpoint_angle, None,
None)
midpoint_path_only.append(path_guide)
if complete_path:
# TODO sort the paths instead of just returning the first one
path_guide = complete_path[0]
elif midpoint_path_only:
path_guide = midpoint_path_only[0]
else:
path_guide = None
return path_guide
def choose_closest_unobstructed_point(origin_x, origin_y, map_data, flag_target=0, flag_obstruction=5,
minimum_distance=0, x_lower_bound=None, x_upper_bound=None, y_lower_bound=None,
y_upper_bound=None):
assert map_data.ndim == 2, " map does not have 2 dimensions "
if x_lower_bound is None:
x_lower_bound = 0
if x_upper_bound is None:
x_upper_bound = map_data.shape[1]
if y_lower_bound is None:
y_lower_bound = 0
if y_upper_bound is None:
y_upper_bound = map_data.shape[0]
# get all distances to flagged areas
# get all the nav points in the desired area
flag_point_indices = np.where(map_data[y_lower_bound:y_upper_bound, x_lower_bound:x_upper_bound] == flag_target)
x_points = flag_point_indices[1] + x_lower_bound
y_points = flag_point_indices[0] + y_lower_bound
# for index in x_points:
# print("should be zero ", map_data[y_points[index], x_points[index]])
# print("x_points ", len(x_points))
# print("y_points ", len(y_points))
# compute the distances to them
distances = compute_distances(origin_x, origin_y, x_points, y_points)
# from lowest to highest distance, check if the path is obstructed or not
# result = obstacle_crossed_by_line(origin_x, origin_y, x_points, y_points, map_data, [flag_obstruction])
# distances = np.asarray([9,8,7,6,5,4,3,2,1])
# Get the argmin values given a condition
# https://seanlaw.github.io/2015/09/10/numpy-argmin-with-a-condition/
mask = (distances >= minimum_distance)
subset_idx = np.argsort(distances[mask])
parent_idx = np.arange(distances.shape[0])[mask][subset_idx]
closest_unobstructed_point_index = None
# once we have sorted them by their distances, we check if each point is obstructed
for index in parent_idx:
# print("this is the index ", index)
# print("x value ", x_points[index])
# print("y value ", y_points[index])
# print("map_data value ", map_data[y_points[index], x_points[index]])
obstruction_present = obstacle_crossed_by_line(origin_x, origin_y, x_points[index], y_points[index], map_data,
[flag_obstruction], return_all=False)
if obstruction_present:
# if path is obstructed, do nothing
# print("this is the obstruction ", obstruction_present)
pass
# if there are no obstructions, use the current index
elif obstruction_present is False:
closest_unobstructed_point_index = index
break
# use the obtained index of the unobstructed point to get its x and y coordinates
if closest_unobstructed_point_index:
closest_unobstructed_x_point = x_points[closest_unobstructed_point_index]
closest_unobstructed_y_point = y_points[closest_unobstructed_point_index]
closest_unobstructed_point = (closest_unobstructed_x_point, closest_unobstructed_y_point)
else:
closest_unobstructed_point = None
return closest_unobstructed_point
def get_closest_accessible_navigable_point_to_destination(origin_x, origin_y, destination_x, destination_y, map_data,
navigable_flag=7, obstacle_flag=5, minimum_distance=0):
assert np.ndim(map_data) == 2, "map data does not have 2 dimensions"
navigable_points = np.where(map_data[:, :] == navigable_flag)
x_points = navigable_points[1]
y_points = navigable_points[0]
# compute the distances of the navigable_points to the destination_point
distances = compute_distances(destination_x, destination_y, x_points, y_points)
# from lowest to highest distance, check if the path is obstructed from the origin or not
indices = np.argsort(distances)
closest_unobstructed_point_index = None
current_closest_distance = None
# once we have sorted them by their distances, we check if each point is obstructed
for index in indices:
obstruction_present = obstacle_crossed_by_line(origin_x, origin_y, x_points[index], y_points[index], map_data,
[obstacle_flag], return_all=False)
if obstruction_present:
# if path is obstructed, do nothing
# print("this is the obstruction ", obstruction_present)
pass
# if there are no obstructions, use the current index
elif obstruction_present is False:
if current_closest_distance is None:
current_closest_distance = distances[index]
closest_unobstructed_point_index = index
elif current_closest_distance > distances[index]:
current_closest_distance = distances[index]
closest_unobstructed_point_index = index
# use the obtained index of the unobstructed point to get its x and y coordinates
if closest_unobstructed_point_index:
closest_unobstructed_x_point = x_points[closest_unobstructed_point_index]
closest_unobstructed_y_point = y_points[closest_unobstructed_point_index]
closest_unobstructed_point = (closest_unobstructed_x_point, closest_unobstructed_y_point)
else:
closest_unobstructed_point = None
return closest_unobstructed_point
def get_nav_points_besides_unexplored_area(map_data, x_lower_bound=None, x_upper_bound=None, y_lower_bound=None,
y_upper_bound=None):
assert map_data.ndim == 2, " map does not have 2 dimensions "
nav_flag = 7
unexplored_flag = 0
if x_lower_bound is None:
x_lower_bound = 0
if x_upper_bound is None:
x_upper_bound = map_data.shape[1]
if y_lower_bound is None:
y_lower_bound = 0
if y_upper_bound is None:
y_upper_bound = map_data.shape[0]
# get all the nav points in the desired area
flag_point_indices = np.where(map_data[y_lower_bound:y_upper_bound, x_lower_bound:x_upper_bound] == nav_flag)
x_points = flag_point_indices[1] + x_lower_bound
y_points = flag_point_indices[0] + y_lower_bound
x_coordinate = x_points
y_coordinate = y_points
nav_areas_beside_unexplored_point_indices = np.where(
(map_data[y_coordinate + 1, x_coordinate] == unexplored_flag) |
(map_data[y_coordinate - 1, x_coordinate] == unexplored_flag) |
(map_data[y_coordinate, x_coordinate - 1] == unexplored_flag) |
(map_data[y_coordinate, x_coordinate + 1] == unexplored_flag)
)
unexplored_x = x_points[nav_areas_beside_unexplored_point_indices]
unexplored_y = y_points[nav_areas_beside_unexplored_point_indices]
result = np.column_stack((unexplored_x, unexplored_y))
return result
def get_unexplored_points_besides_navigable_areas(map_data, x_lower_bound=None, x_upper_bound=None, y_lower_bound=None,
y_upper_bound=None, return_value="nav"):
assert map_data.ndim == 2, " map does not have 2 dimensions "
unexplored_flag = 0
nav_flag = 7
if x_lower_bound is None:
x_lower_bound = 0
if x_upper_bound is None:
x_upper_bound = map_data.shape[1]
if y_lower_bound is None:
y_lower_bound = 0
if y_upper_bound is None:
y_upper_bound = map_data.shape[0]
# get all the nav points in the desired area
flag_point_indices = np.where(map_data[y_lower_bound:y_upper_bound, x_lower_bound:x_upper_bound] == unexplored_flag)
x_points = flag_point_indices[1] + x_lower_bound
y_points = flag_point_indices[0] + y_lower_bound
unexplored_points = []
nav_points = []
# now that we have all the nav points, let's check each one if they are beside an unexplored pixel
for index in range(x_points.size):
x_coordinate = x_points[index]
y_coordinate = y_points[index]
top_x = x_coordinate
top_y = min(y_coordinate + 1, y_upper_bound)
top = map_data[top_y, top_x]
bottom_x = x_coordinate
bottom_y = max(y_coordinate - 1, y_lower_bound)
bottom = map_data[bottom_y, bottom_x]
left_x = max(x_coordinate - 1, x_lower_bound)
left_y = y_coordinate
left = map_data[left_y, left_x]
right_x = min(x_coordinate + 1, x_upper_bound)
right_y = y_coordinate
right = map_data[right_y, right_x]
if top == nav_flag:
unexplored_points.append((x_coordinate, y_coordinate))
nav_points.append((top_x, top_y))
elif bottom == nav_flag:
unexplored_points.append((x_coordinate, y_coordinate))
nav_points.append((bottom_x, bottom_y))
elif left == nav_flag:
unexplored_points.append((x_coordinate, y_coordinate))
nav_points.append((left_x, left_y))
elif right == nav_flag:
unexplored_points.append((x_coordinate, y_coordinate))
nav_points.append((right_x, right_y))
# if nav_flag in [top, bottom, left, right]:
# unexplored_points.append((x_coordinate, y_coordinate))
# if there are no unexplored points beside nav points, return []
if return_value == "nav":
return nav_points
elif return_value == "unexplored":
return unexplored_points
else:
return nav_points, unexplored_points
def determine_quadrant(origin_x, origin_y, map_data):
half_x = map_data.shape[1] / 2
half_y = map_data.shape[0] / 2
if (origin_x >= half_x) and (origin_y >= half_y):
quadrant = 1
elif (origin_x < half_x) and (origin_y >= half_y):
quadrant = 2
elif (origin_x < half_x) and (origin_y < half_y):
quadrant = 3
elif (origin_x >= half_x) and (origin_y < half_y):
quadrant = 4
else:
raise Exception("unable to determine quadrant of coordinates")
return quadrant
def get_coordinate_lower_and_upper_bounds(quadrant_number, map_data):
half_x = int(map_data.shape[1] / 2)
half_y = int(map_data.shape[0] / 2)
full_x = map_data.shape[1]
full_y = map_data.shape[0]
if quadrant_number == 1:
x_lower_bound = half_x
x_upper_bound = full_x
y_lower_bound = half_y
y_upper_bound = full_y
elif quadrant_number == 2:
x_lower_bound = 0
x_upper_bound = half_x
y_lower_bound = half_y
y_upper_bound = full_y
elif quadrant_number == 3:
x_lower_bound = 0
x_upper_bound = half_x
y_lower_bound = 0
y_upper_bound = half_y
elif quadrant_number == 4:
x_lower_bound = half_x
x_upper_bound = full_x
y_lower_bound = 0
y_upper_bound = half_y
else:
raise Exception("inappropriate quadrant input")
return (x_lower_bound, x_upper_bound, y_lower_bound, y_upper_bound)
def get_new_target(rover):
print("computing new target")
# code to get new target point
# if new target generated then continue
# if new target fails to generate, assign start point as target. return
# to the middle of the map such that we can start exploring other quadrants
# if the start position has never been assigned, assign it
if not rover.start_pos:
rover.start_pos = (int(rover.pos[0]), int(rover.pos[1]))
# initialize the first target quadrant to be the current quadrant the rover is in
if not rover.target_quadrant:
rover.target_quadrant = determine_quadrant(rover.pos[0], rover.pos[1],
rover.memory_map[:, :, 3])
# rover.target_quadrant = 3
# get the x and y bounds for the current target quadrant
rover_x_lower, rover_x_upper, rover_y_lower, rover_y_upper = get_coordinate_lower_and_upper_bounds(
rover.target_quadrant, rover.memory_map[:, :, 3])
# get all the unexplored points that are next to a nav point
# unexplored_points_beside_nav_points = get_unexplored_points_besides_navigable_areas(rover.memory_map[:, :, 3],
# x_lower_bound=rover_x_lower,
# x_upper_bound=rover_x_upper,
# y_lower_bound=rover_y_lower,
# y_upper_bound=rover_y_upper, return_value="nav")
#
# rover_xpos = round(int(rover.pos[0]))
# rover_ypos = round(int(rover.pos[1]))
# # if available, get the closest one to the rover
# if unexplored_points_beside_nav_points:
# print("found unexplored points beside nav points ", unexplored_points_beside_nav_points)
# array_format = np.asarray(unexplored_points_beside_nav_points)
# x_points = array_format[:, 1]
# y_points = array_format[:, 0]
# distances_from_rover = compute_distances(rover_xpos, rover_ypos, x_points, y_points)
# closest_point_index = np.argmin(distances_from_rover)
# new_target_x = x_points[closest_point_index]
# new_target_y = y_points[closest_point_index]
# # if none, choose the nearest accessible unexplored point, and travel to it
# else:
# # print("unexplored points beside nav points not found, looking for unobstructed unexplored points intead")
# # closest_unobstructed_point = choose_closest_unobstructed_point(rover_xpos, rover_ypos,
# # rover.memory_map[:, :, 3], flag_target=0,
# # flag_obstruction=5, minimum_distance=0,
# # x_lower_bound=rover_x_lower,
# # x_upper_bound=rover_x_upper,
# # y_lower_bound=rover_y_lower,
# # y_upper_bound=rover_y_upper)
# # if closest_unobstructed_point:
# # print("found closest unobstructed point ", closest_unobstructed_point)
# # new_target_x = closest_unobstructed_point[0]
# # new_target_y = closest_unobstructed_point[1]
# # else:
# print("no unobstructed points found")
new_target_x = round(rover.pos[0])
new_target_y = round(rover.pos[1])
if rover.explore_mode == 'explore':
new_coords = choose_farthest_flag(rover.start_pos[0], rover.start_pos[1],
rover.memory_map[:, :, 3],
flag=7, x_lower_bound=rover_x_lower,
x_upper_bound=rover_x_upper,
y_lower_bound=rover_y_lower,
y_upper_bound=rover_y_upper)[0]
if new_coords:
new_target_x = new_coords[0]
new_target_y = new_coords[1]
print("new targets ", new_target_x, new_target_y)
if coordinates_reached(rover.pos, (new_target_x, new_target_y), precision="loose"):
# if (new_target_x, new_target_y) == rover.pos[0], rover.pos[1]:
rover.explore_mode = 'sweep'
elif rover.explore_mode == 'sweep':
new_coords = get_nav_points_besides_unexplored_area(rover.memory_map[:, :, 3],
x_lower_bound=rover_x_lower,
x_upper_bound=rover_x_upper,
y_lower_bound=rover_y_lower,
y_upper_bound=rover_y_upper)
if np.any(new_coords):
print("these are the new coords ", new_coords)
# get the first element, or better yet, get the element closest to the rover
distances = compute_distances(rover.pos[0], rover.pos[1], new_coords[:, 0], new_coords[:, 1])
min_index = np.argmin(distances)
# new_coords = new_coords[0]
new_target_x = int(new_coords[min_index, 0])
new_target_y = int(new_coords[min_index, 1])
# rover.explore_mode = 'explore'
else:
rover.explore_mode = 'return_home'
elif rover.explore_mode == "return_home":
print("no coordinates found NONE, returning to starting point ")
# get the closest accessible nav point to the start_position
new_coords = choose_closest_flag(rover.start_pos[0], rover.start_pos[1], rover.memory_map[:, :, 3])[0]
if new_coords:
new_target_x = new_coords[0]
new_target_y = new_coords[1]
else:
new_target_x = rover.start_pos[0]
new_target_y = rover.start_pos[1]
# once we've reached the starting position:
current_coords = (round(rover.pos[0]), round(rover.pos[1]))
if coordinates_reached(current_coords, rover.start_pos, precision="loose"):
# switch quadrants to search for targets
if rover.target_quadrant == 1:
rover.target_quadrant = 2
elif rover.target_quadrant == 2:
rover.target_quadrant = 3
elif rover.target_quadrant == 3:
rover.target_quadrant = 4
elif rover.target_quadrant == 4:
rover.target_quadrant = 1
else:
print("invalid quadrant")
# once quadrants have been switched, rever to explore mode
rover.explore_mode = "explore"
# get a new target from the new quadrant
new_coords = get_new_target(rover)
new_target_x = new_coords[0]
new_target_y = new_coords[1]
return tuple((new_target_x, new_target_y))
def generate_path_points(rover):
# if we don't have any, then we get new ones
# 1. recheck if we can travel to destination in a straight line
# if there are no obstacles blocking the way to the target, then assign target as destination point
flag_list = [5]
path = []
obstacles = obstacle_crossed_by_line(rover.pos[0], rover.pos[1],
rover.target[0],
rover.target[1],
rover.memory_map[:, :, 3], flag_list)
if not obstacles:
# destination_point = rover.target
path = [rover.target]
# 2. if there are obstacles, then let's check if we can sidestep these obstacles:
else:
path_guide = sidestep_obstacle(rover.pos[0], rover.pos[1],
rover.target[0],
rover.target[1],
rover.memory_map[:, :, 3],
7, 5)
# if we are successful in finding a path that can sidestep, we assign the nearer point as the
# destination, and we queue the Rover.target point in Rover.path for later use upon reaching
# Rover.destination
if path_guide:
# destination_point = (path_guide.x, path_guide.y)
path = [rover.target, (path_guide.x, path_guide.y)]
# 3. if we were unable to sidestep, then we plot a path using A *
else:
print("attempting A *")
obstaclevalues = [5, 0]
matrix = np.in1d(rover.memory_map[:, :, 3].ravel(), obstaclevalues).reshape(
rover.memory_map[:, :, 3].shape).tolist()
grid = Grid(matrix=matrix)
start = grid.node(round(rover.pos[0]), round(rover.pos[1]))
end = grid.node(rover.target[0], rover.target[1])
print("computing A star")
finder = AStarFinder(diagonal_movement=DiagonalMovement.always)
path, runs = finder.find_path(start, end, grid)
print("computation finished with runs: ", runs)
path = list(reversed(path))
return path
| true |
4b7cc70fd427c3fb2ffe3e9466b9dbdd308d4a89 | Python | mfkiwl/self_drive_rtk | /src/test.py | UTF-8 | 285 | 2.96875 | 3 | [] | no_license | import os
import sys
print('os.getcwd():', os.getcwd())
print('dirname(sys.path[0]):',os.path.dirname(sys.path[0]))
print('dirname(abspath(sys.argv[0])):',os.path.dirname(os.path.abspath(sys.argv[0])))
print('dirname(realpath(__file__)):',os.path.dirname(os.path.realpath(__file__)))
| true |
8d2bfbc6e211aa36608efb8fdcebe739e83d54b1 | Python | jpatel3/nlp-playground | /summary.py | UTF-8 | 7,981 | 3.296875 | 3 | [] | no_license | # coding=UTF-8
from __future__ import division
import re
# Created by Shlomi Babluki
# April, 2013
class SummaryTool(object):
#Naive method for spliting a text into sentences
def split_content_to_sentences(self, content):
content = content.replace("\n",". ")
return content.split(". ")
#Naive method for splitting a text inot paragraphs
def split_content_to_paragraphs(self, content):
return content.split("\n\n")
#calculate the intersection between 2 sentences
def sentences_intersection(self, sentence1, sentence2):
#split the sentence into words/tokens
s1 = set(sentence1.split(" "))
s2 = set(sentence2.split(" "))
#In case of no intersection return 0
if(len(s1) +len(s2) == 0):
return 0
#normalize the result by average number of words - This is the theme
return len(s1.intersection(s2)) / ((len(s1) + len(s2)) / 2 )
#Remove all non-alphabetic chars from the sentence
#We'll use formatted senteces as a key in our senteces dictionary
def format_sentence(self, sentence):
sentence = re.sub(r'\W+', '', sentence)
return sentence
#convert the content inot dictionary <K, V>
#k = The format senteces
#v = The rank of the sentence
def get_sentence_rank(self, content):
#split content into senteces
sentence = self.split_content_to_sentences(content)
#calculate the intersection of every two sentence
n = len(sentence)
values = [[0 for x in xrange(n)] for x in xrange(n)]
for i in range(0, n):
for j in range(0, n):
values[i][j] = self.sentences_intersection(sentence[i],sentence[j])
#Build the sentence dictionary
#The score of a sentence is the sum of all its intersection
sentences_dict = {}
for i in range(0, n):
score = 0
for j in range(0, n):
if i == j:
continue;
score += values[i][j]
sentences_dict[self.format_sentence(sentence[i])] = score
return sentences_dict
#Return the best sentence in a paragraph
def get_best_sentence(self, paragraph, sentences_dict):
#split the paragraph into two lines.
sentences = self.split_content_to_sentences(paragraph)
#ignore short paragraph
if len(sentences) < 2:
return ""
#Get the best sentence according to the sentence dict
best_sentence = ""
max_value = 0
for s in sentences:
strip_s = self.format_sentence(s)
if strip_s:
if sentences_dict[strip_s] > max_value:
max_value = sentences_dict[strip_s]
best_sentence = s
return best_sentence
#Build the summary
def get_summary(self, title, content, sentences_dict):
#split the content into paragraphs
paragraphs = self.split_content_to_paragraphs(content)
#add the title
summary = []
summary.append(title.strip())
summary.append("")
#Add the best sentene frome ach paragraph
for p in paragraphs:
sentence = self.get_best_sentence(p, sentences_dict).strip()
if sentence:
summary.append(sentence)
return ("\n").join(summary)
#Maain method, rust run "python summary.py"
def main():
#Demo
#Content from some url -
title = "Swayy is a beautiful new dashboard for discovering and curating online content"
content = """
Lior Degani, the Co-Founder and head of Marketing of Swayy, pinged me last week when I was in California to tell me about his startup and give me beta access. I heard his pitch and was skeptical. I was also tired, cranky and missing my kids – so my frame of mind wasn’t the most positive.
I went into Swayy to check it out, and when it asked for access to my Twitter and permission to tweet from my account, all I could think was, “If this thing spams my Twitter account I am going to bitch-slap him all over the Internet.” Fortunately that thought stayed in my head, and not out of my mouth.
One week later, I’m totally addicted to Swayy and glad I said nothing about the spam (it doesn’t send out spam tweets but I liked the line too much to not use it for this article). I pinged Lior on Facebook with a request for a beta access code for TNW readers. I also asked how soon can I write about it. It’s that good. Seriously. I use every content curation service online. It really is That Good.
What is Swayy? It’s like Percolate and LinkedIn recommended articles, mixed with trending keywords for the topics you find interesting, combined with an analytics dashboard that shows the trends of what you do and how people react to it. I like it for the simplicity and accuracy of the content curation. Everything I’m actually interested in reading is in one place – I don’t have to skip from another major tech blog over to Harvard Business Review then hop over to another major tech or business blog. It’s all in there. And it has saved me So Much Time
After I decided that I trusted the service, I added my Facebook and LinkedIn accounts. The content just got That Much Better. I can share from the service itself, but I generally prefer reading the actual post first – so I end up sharing it from the main link, using Swayy more as a service for discovery.
I’m also finding myself checking out trending keywords more often (more often than never, which is how often I do it on Twitter.com).
The analytics side isn’t as interesting for me right now, but that could be due to the fact that I’ve barely been online since I came back from the US last weekend. The graphs also haven’t given me any particularly special insights as I can’t see which post got the actual feedback on the graph side (however there are numbers on the Timeline side.) This is a Beta though, and new features are being added and improved daily. I’m sure this is on the list. As they say, if you aren’t launching with something you’re embarrassed by, you’ve waited too long to launch.
It was the suggested content that impressed me the most. The articles really are spot on – which is why I pinged Lior again to ask a few questions:
How do you choose the articles listed on the site? Is there an algorithm involved? And is there any IP?
Yes, we’re in the process of filing a patent for it. But basically the system works with a Natural Language Processing Engine. Actually, there are several parts for the content matching, but besides analyzing what topics the articles are talking about, we have machine learning algorithms that match you to the relevant suggested stuff. For example, if you shared an article about Zuck that got a good reaction from your followers, we might offer you another one about Kevin Systrom (just a simple example).
Who came up with the idea for Swayy, and why? And what’s your business model?
Our business model is a subscription model for extra social accounts (extra Facebook / Twitter, etc) and team collaboration.
The idea was born from our day-to-day need to be active on social media, look for the best content to share with our followers, grow them, and measure what content works best.
Who is on the team?
Ohad Frankfurt is the CEO, Shlomi Babluki is the CTO and Oz Katz does Product and Engineering, and I [Lior Degani] do Marketing. The four of us are the founders. Oz and I were in 8200 [an elite Israeli army unit] together. Emily Engelson does Community Management and Graphic Design.
If you use Percolate or read LinkedIn’s recommended posts I think you’ll love Swayy.
➤ Want to try Swayy out without having to wait? Go to this secret URL and enter the promotion code thenextweb . The first 300 people to use the code will get access.
"""
st = SummaryTool()
#Build the sentences dictionary
sentences_dict = st.get_sentence_rank(content)
#Build the summary with the sentences daily
summary = st.get_summary(title, content, sentences_dict)
#print the summary
print summary
#Print the ration between summary lenght and the original lenght
print ""
print "Original length %s"%(len(title) + len(content))
print "Summary length %s"%len(summary)
print "Summary Ration: %s"%(100 - (100 * ( len(summary) / ( len(title) + len(content)))))
if __name__ == '__main__':
main()
| true |
692b7e2ff7958eaa98085e02cc5d13f9e3b35f18 | Python | yishuen/python-strings-indepth-lab | /string_functions.py | UTF-8 | 2,265 | 3.734375 | 4 | [] | no_license | def say_hello(name):
n = str(name)
return "Hi my name is {}".format(n)
# takes in a name and returns the string "Hi my name is " plus the name
# use whichever form of interpolation is most appropriate
def replace_given_substring(str_to_replace, str_to_insert, string):
return string.replace(str_to_replace, str_to_insert)
# this function takes three parameters --
# the first is the substring we would like to replace.
# the second substring is what we would like to use inplace of the first
# the third is the actual string which we want to operate on
# the function should return the new string
def remove_duplicate_punctuation(string):
filtered = ""
from string import punctuation
punc = set(punctuation)
for k in range(len(string)-1):
if string[k] not in punc:
filtered += string[k]
if string[k] in punc:
if string[k] != string[k+1]:
filtered += string[k]
else:
filtered += ""
return filtered + string[-1]
# should remove all duplicate punctuation marks in a given string
# i.e. "Hi!!!!!!" should be reformatted to "Hi!"
# i.e. "Hello..... My name is Terrance!! How are you???" -> "Hello. My name is Terrance! How are you?"
def atdotcom(x):
if "@" and ".com" in x:
return True
else:
return False
def nospchx(x):
wo_spchx = []
special_chx = ['*','~','#','$','%','&','(',')','\`','\"',':',';','/','>','<', '!']
for chx in x:
if chx not in special_chx:
wo_spchx.append(chx)
return wo_spchx
def validate_email_format(email):
if atdotcom(email) == True and nospchx(email) == list(email):
return True
else:
return False
# make sure the email contains an @ symbol and a .com
# return True if format passes tests, return False otherwise
def anonymize_credit_card_number(credit_card_number):
ccno = credit_card_number
num_length = len(list(ccno))
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
masked = ""
for x in ccno[0:(num_length - 4)]:
if x in digits:
masked += "X"
else:
masked += (str(x))
return masked + ccno[-4:]
| true |
4b14f2b5a4205b6308ec56427cd0d4395b6b2dad | Python | ktan2020/tooling | /misc/luhn_check.py | UTF-8 | 1,360 | 3.234375 | 3 | [
"MIT"
] | permissive |
import sys
import re
import unittest
def luhn_check1(cc_no):
cc_no = [ int(d) for d in re.sub("[ \t]", "", cc_no) ]
l,s,flag = len(cc_no),0,0
for i in range(l-1,-1,-1):
n = cc_no[i]
s += sum(divmod((n*2),10)) if flag else n
flag = (flag+1) & 1
return s%10 == 0
def luhn_check(cc_no):
cc_no = map(lambda x: int(x), re.sub("[ \t]", "", cc_no))[::-1]
s = 0
for i,d in enumerate(cc_no):
if i%2==1:
d = d*2-9 if d*2>9 else d*2
s += d
return s%10 == 0
def main():
print luhn_check(sys.argv[1])
if __name__ == "__main__":
main()
class _(unittest.TestCase):
def test_valid_numbers(self):
good = [
"4929985773906955",
"49927398716",
"1234567812345670",
"345887019563059",
"4539149173996601",
"378385108533448",
"4111 1111 1111 1111",
]
assert all(map(lambda x: luhn_check(x), good))
def test_invalid_numbers(self):
bad = [
"499273987164",
"49927398717",
"1234567812345678",
"4111 1111 1111 1121",
"4221 1111 1111 1111",
]
assert all(map(lambda x: not luhn_check(x), bad))
| true |
e6da859ca549e11eb75ab8e74ffcd3a99f45b07d | Python | team5419/fingerprint-scanner | /main.py | UTF-8 | 2,157 | 2.640625 | 3 | [] | no_license | import requests
import getpass
import pyfingerprint
def login(session, email, password):
res = session.post(
"https://timesheet.team5419.org/sessions",
data={
"email" : email,
"password" : password
}
)
print(res.status_code)
print("logged in!")
return res.cookies.get("_timesheet_session")
def logout(session):
res = session.get(
"https://timesheet.team5419.org/log_out"
)
print(res.status_code)
print("logged out!")
def loguser(session, key):
print(session.cookies["_timesheet_session"])
res = session.post(
"https://timesheet.team5419.org/timelogs",
data={
"authenticity_token": session.cookies,
"owner_userid": key,
"multi": "Submit"
}
)
print(res.text)
return res
with requests.Session() as session:
try:
sensor = pyfingerprint.PyFingerprint(
'/dev/ttyUSB0',
57600,
0xFFFFFFFF,
0x00000000
)
if sensor.verifyPassword() == False:
raise ValueError('The given fingerprint sensor password is wrong!')
except Exception as e:
print('The fingerprint sensor could not be initialized!')
print('Exception message: ' + str(e))
exit(1)
login(
session=session,
email=input("Enter email: "),
password=getpass.getpass()
)
print('Currently used templates: ' + str(sensor.getTemplateCount()) +'/'+ str(sensor.getStorageCapacity()))
try:
while True:
while sensor.readImage() == False:
pass
sensor.convertImage(0x01)
result = sensor.searchTemplate()
pyfingerprint()
userID = result[0]
accuracy = result[1]
if userID == -1:
print(f"No match found!")
else:
print(f"Loged user with id {userID}, accuracy {accuracy}.")
loguser(session, userID)
except KeyboardInterrupt:
print("123")
finally:
logout(session) #logout dosent work | true |
15f7e72c7be8b12447d203fc8f986044e95f47d5 | Python | konker/isoveli | /meerkat/meerkat/filters/dummy.py | UTF-8 | 344 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#
# meerkat.meerkat.filters.uppercase
#
# Copyright 2012 Konrad Markus
#
# Author: Konrad Markus <konker@gmail.com>
#
from meerkat.filters import BaseFilter
class Uppercase(BaseFilter):
def filter(self, data):
return str(data).upper()
class Lowercase(BaseFilter):
def filter(self, data):
return str(data).lower()
| true |
e9afa76b4543c7b89183317c72421abf68b95b2e | Python | Yiyang-C/Data-Mining | /hw3/task2.py | UTF-8 | 7,498 | 2.578125 | 3 | [] | no_license | import sys
import json
import networkx as nx
import itertools
import copy
import collections
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
def readInput(fp):
data = []
for line in open(fp, 'r', encoding='utf-8'):
data.append(json.loads(line))
return data
if __name__ == '__main__':
# input_file = './Gamergate.json'
# input_file = './toy_test/mini_mid_gamergate.json'
# taskA_output_file = './testoutA.txt'
# taskB_output_file = './testoutB.txt'
# taskC_output_file = './testoutC.txt'
input_file = sys.argv[1]
taskA_output_file = sys.argv[2]
taskB_output_file = sys.argv[3]
taskC_output_file = sys.argv[4]
tweets = readInput(input_file)
G = nx.Graph()
for tweet in tweets:
if 'retweeted_status' not in tweet:
G.add_node(tweet['user']['screen_name'])
else:
if G.has_edge(tweet['user']['screen_name'], tweet['retweeted_status']['user']['screen_name']):
G[tweet['user']['screen_name']][tweet['retweeted_status']['user']['screen_name']]['weight'] += 1
else:
G.add_edge(tweet['user']['screen_name'], tweet['retweeted_status']['user']['screen_name'], weight = 1)
edge_betweenness = nx.edge_betweenness_centrality(G,normalized=False)
edge_betweenness_sort = sorted(edge_betweenness.items(), key=lambda x:x[1], reverse=True)
# i = 0
original_G = copy.deepcopy(G)
max_modularity = -float('inf')
m = original_G.size(weight='weight')
while G.number_of_edges() > 0:
# print(i, G.number_of_edges(), )
# i += 1
edge_betweenness_list = nx.edge_betweenness_centrality(G,normalized=False, weight='weight')
edge_betweenness_sorted_list = sorted(edge_betweenness_list.items(), key=lambda x:x[1], reverse=True)
max_edge_betweenness = edge_betweenness_sorted_list[0][1]
for edge_betweenness in edge_betweenness_sorted_list:
if edge_betweenness[1] == max_edge_betweenness:
G.remove_edge(edge_betweenness[0][0], edge_betweenness[0][1])
else:
break
tmp_modularity = 0
for partition in nx.connected_components(G):
if len(partition) == 1:
continue
else:
for node1 in partition:
for node2 in partition:
if node1 == node2:
continue
if not G.has_edge(node1, node2):
tmp_modularity += (-original_G.degree(weight='weight')[node1] * original_G.degree(weight='weight')[node2] / (m * 2))
else:
tmp_modularity += (G[node1][node2]['weight'] - original_G.degree(weight='weight')[node1] * original_G.degree(weight='weight')[node2] / (m * 2))
cur_modularity = tmp_modularity / (m * 2)
# print(len([c for c in nx.connected_components(G)]), cur_modularity)
if cur_modularity > max_modularity:
max_modularity = cur_modularity
# print('find larger modularity !!!!!!!!!!!!!!!!!')
optimal_G = copy.deepcopy(G)
# i = 1
community_list = []
for partition in nx.connected_components(optimal_G):
community = list(partition)
community.sort()
community_list.append(community)
# print(i)
# print('==========')
# print(partition)
# print('==========')
# i+=1
community_list.sort()
community_list.sort(key=lambda x: len(x))
txtA = community_list
file = open(taskA_output_file, 'w', encoding='utf-8')
file.write('Best Modularity is: ' + str(max_modularity) + '\n')
for user_list in txtA:
i = 0
for user in user_list:
if i == 0:
file.write('\'' +user + '\'')
else:
file.write(',\'' +user + '\'')
i += 1
file.write('\n')
file.close()
community_A, community_B = community_list[-1], community_list[-2]
community_A_set = set(community_A)
community_B_set = set(community_B)
user_tweets = collections.defaultdict(str)
for tweet in tweets:
user_tweets[tweet['user']['screen_name']] += ( ' ' + tweet['text'])
if 'retweeted_status' in tweet:
user_tweets[tweet['retweeted_status']['user']['screen_name']] += ( ' ' + tweet['retweeted_status']['text'])
train_data = []
train_label = []
for user in community_A:
train_data.append(user_tweets[user])
train_label.append(1)
for user in community_B:
train_data.append(user_tweets[user])
train_label.append(2)
vectorizer = TfidfVectorizer()
train_data_tfidf = vectorizer.fit_transform(train_data)
train_label_nparray = np.asarray(train_label)
clf = MultinomialNB().fit(train_data_tfidf, train_label_nparray)
test_data = []
test_data_user = []
for user in user_tweets.keys():
if (user not in community_A_set) and (user not in community_B_set):
test_data.append(user_tweets[user])
test_data_user.append(user)
test_data_tfidf = vectorizer.transform(test_data)
predicted = clf.predict(test_data_tfidf)
community_A_res = [] + community_A
community_B_res = [] + community_B
# cnt_A = cnt_B = 0
for user, predict_res in zip(test_data_user, predicted):
if predict_res == 1:
community_A_res.append(user)
# cnt_A += 1
else:
community_B_res.append(user)
# cnt_B += 1
community_A_res.sort()
community_B_res.sort()
file = open(taskB_output_file, 'w', encoding='utf-8')
i = 0
for user in community_A_res:
if i == 0:
file.write('\'' +user + '\'')
else:
file.write(',\'' +user + '\'')
i += 1
file.write('\n')
i = 0
for user in community_B_res:
if i == 0:
file.write('\'' +user + '\'')
else:
file.write(',\'' +user + '\'')
i += 1
file.close()
count_vect = CountVectorizer()
train_data_CV = count_vect.fit_transform(train_data)
clf_CV = MultinomialNB().fit(train_data_CV, train_label_nparray)
test_data_CV = count_vect.transform(test_data)
predicted_CV = clf_CV.predict(test_data_CV)
community_A_res_CV = [] + community_A
community_B_res_CV = [] + community_B
for user, predict_res in zip(test_data_user, predicted_CV):
if predict_res == 1:
community_A_res_CV.append(user)
else:
community_B_res_CV.append(user)
community_A_res_CV.sort()
community_B_res_CV.sort()
file = open(taskC_output_file, 'w', encoding='utf-8')
i = 0
for user in community_A_res_CV:
if i == 0:
file.write('\'' +user + '\'')
else:
file.write(',\'' +user + '\'')
i += 1
file.write('\n')
i = 0
for user in community_B_res_CV:
if i == 0:
file.write('\'' +user + '\'')
else:
file.write(',\'' +user + '\'')
i += 1
file.close()
| true |
4eb0654ab8880963d3d5bdcd91dd3ae6f294b8e1 | Python | jtyr/ansible-yaml_list_inventory | /tests/conditions.py | UTF-8 | 12,222 | 2.671875 | 3 | [
"MIT"
] | permissive | import os
import unittest
import yaml
from ansible import constants as C
from yaml_list import InventoryModule
class MyInventoryModule(InventoryModule):
def get_option(self, key):
# Override for 'optional_key_prefix'
return '_'
class MyTestCase(unittest.TestCase):
def _getenvbool(self, name, default):
val = os.getenv(name)
if val is None:
ret = default
elif val in ('1', 'yes', 'YES'):
ret = True
else:
ret = False
return ret
def _parse_yaml_file(self, data_file):
im = MyInventoryModule()
try:
data = yaml.safe_load(im._read_yaml_file(data_file))
except yaml.YAMLError as e:
raise Exception(
"Unable parse inventory '%s': %s" % (data_file, e))
return data
def _test(self, host, accept=[], ignore=[], grouping={}, expected=True):
C.DEFAULT_DEBUG = self._getenvbool('DEBUG', False)
C.COLOR_DEBUG = 'normal'
im = MyInventoryModule()
if (
im._eval_conditions(host, accept) and
not im._eval_conditions(host, ignore, False)):
im.display.debug('!!! Adding !!!')
result = True
for group, conditions in grouping.items():
if self._eval_conditions(host, conditions):
im.display.debug("!!! Adding into group %s" % group)
else:
im.display.debug('!!! NOT adding !!!')
result = False
im.display.debug('-----------')
if expected:
self.assertTrue(result)
else:
self.assertFalse(result)
class Test(MyTestCase):
def test_empty(self):
self._test(None, expected=True)
def test_equal(self):
host = {
'name': 'test',
}
conditions = [
{
'accept': [
{
'name': 'test',
}
],
'ignore': [],
'expected': True,
}, {
'accept': [],
'ignore': [
{
'name': 'test',
}
],
'expected': False,
}, {
'accept': [
{
'name': '!test',
}
],
'ignore': [],
'expected': False,
}, {
'accept': [],
'ignore': [
{
'name': '!test',
}
],
'expected': True,
},
]
for i, c in enumerate(conditions):
with self.subTest(i=i):
self._test(
host,
accept=c['accept'],
ignore=c['ignore'],
expected=c['expected'])
def test_regexp(self):
host = {
'name': 'test',
}
conditions = [
{
'accept': [
{
'name': '~^test',
}
],
'ignore': [],
'expected': True,
}, {
'accept': [],
'ignore': [
{
'name': '~^test',
}
],
'expected': False,
}, {
'accept': [
{
'name': '!~^test',
}
],
'ignore': [],
'expected': False,
}, {
'accept': [],
'ignore': [
{
'name': '!~^test',
}
],
'expected': True,
},
]
for i, c in enumerate(conditions):
with self.subTest(i=i):
self._test(
host,
accept=c['accept'],
ignore=c['ignore'],
expected=c['expected'])
def test_ignore_ip_state(self):
data = [
{
'name': 'test',
'state': 'poweredOn',
'ip': '1.2.3.4',
'expected': True,
}, {
'name': 'test',
'state': 'poweredOff',
'ip': '1.2.3.4',
'expected': False,
}, {
'name': 'test',
'state': 'poweredOn',
'ip': None,
'expected': False,
}, {
'name': 'test',
'state': 'poweredOff',
'ip': None,
'expected': False,
},
]
ignore = [
{
'ip': None,
}, {
'state': 'poweredOff'
}
]
for i, host in enumerate(data):
with self.subTest(i=i, ip=host['ip'], state=host['state']):
self._test(host, ignore=ignore, expected=host['expected'])
def test_ignore_ip_state_guest_group(self):
data = [
{
'name': 'test',
'ip': '1.2.3.4',
'state': 'poweredOn',
'vcenter': {
'guest_id': 'windows8Server64Guest',
},
'ansible': {
'group': 'aaa',
},
'expected': False,
}, {
'name': 'test',
'ip': '1.2.3.4',
'state': 'poweredOn',
'vcenter': {
'guest_id': 'windows8Server64Guest',
},
'ansible': {
'group': 'bbb',
},
'expected': True,
}, {
'name': 'test',
'ip': '1.2.3.4',
'state': 'poweredOn',
'vcenter': {
'guest_id': 'windows8Server64Guest',
},
'ansible': {
'group': ['000', 'aaa', 'zzz'],
},
'expected': False,
}, {
'name': 'test',
'ip': '1.2.3.4',
'state': 'poweredOn',
'vcenter': {
'guest_id': 'windows8Server64Guest',
},
'ansible': {
'group': ['000', 'bbb', 'zzz'],
},
'expected': True,
}, {
'name': 'test',
'ip': '1.2.3.4',
'state': 'poweredOn',
'vcenter': {
'guest_id': 'windows8Server64Guest',
},
'expected': False,
},
]
ignore = [
{
'ip': None,
}, {
'vcenter.guest_id': 'windows8Server64Guest',
'_ansible.group': '!bbb'
}, {
'state': 'poweredOff'
}
]
for i, host in enumerate(data):
ag = None
if 'ansible' in host and 'group' in host['ansible']:
ag = host['ansible']['group']
with self.subTest(
i=i,
ip=host['ip'],
state=host['state'],
guest_id=host['vcenter']['guest_id'],
ansible_group=ag):
self._test(host, ignore=ignore, expected=host['expected'])
def test_complex_key(self):
tests = [
{
'host': {
'name': 'test',
'vcenter': {
'guest_id': 'windows8Server64Guest',
},
},
'accept': [
{
'vcenter.guest_id': 'windows8Server64Guest',
}
],
'expected': True,
}, {
'host': {
'name': 'test',
'guest_id': 'windows8Server64Guest',
},
'accept': [
{
'vcenter.guest_id': 'windows8Server64Guest',
}
],
'expected': False,
}, {
'host': {
'name': 'test',
'vcenter': {
'nics': [
{
'name': 'eth0',
'mac': '11:22:33:44:55:66',
}, {
'name': 'eth1',
'mac': '22:33:44:55:66:11',
}
],
},
},
'accept': [
{
'vcenter.nics[0].name': 'eth0',
}
],
'expected': True,
}, {
'host': {
'name': 'test',
'vcenter': {
'nics': [
{
'name': 'eth0',
'mac': '11:22:33:44:55:66',
}, {
'name': 'eth1',
'mac': '22:33:44:55:66:11',
}
],
},
},
'accept': [
{
'vcenter.nics[4].name': 'eth4',
}
],
'expected': False,
}, {
'host': {
'name': 'test',
'vcenter': {
'nics': [
{
'name': 'eth0',
'mac': '11:22:33:44:55:66',
}, {
'name': 'eth1',
'mac': '22:33:44:55:66:11',
}
],
},
},
'accept': [
{
'vcenter.nics[0].name': [
'eth0',
'!eth1',
]
}
],
'expected': True,
},
]
for i, t in enumerate(tests):
with self.subTest(i=i):
self._test(
host=t['host'],
accept=t['accept'],
expected=t['expected'])
def test_real(self):
if not self._getenvbool('DEBUG', False):
self.skipTest("No DEBUG defined.")
host_filter = os.getenv('HOST')
data_file = os.getenv('DATA')
source_file = os.getenv('SOURCE')
expected = int(os.getenv('EXPECTED', 1))
if host_filter is None:
raise Exception("No HOST defined!")
if data_file is None:
raise Exception("No DATA defined!")
if source_file is None:
raise Exception("No SOURCE defined!")
data = self._parse_yaml_file(data_file)
source = self._parse_yaml_file(source_file)
host = None
accept = []
ignore = []
if 'accept' in source:
accept = source['accept']
if 'ignore' in source:
ignore = source['ignore']
for h in data:
if 'name' in h and h['name'] == host_filter:
host = h
break
if host is None:
raise Exception("No '%s' found in the data file!" % host_filter)
self._test(host, accept=accept, ignore=ignore, expected=expected)
if __name__ == '__main__':
unittest.main()
| true |
b7585722096c0571c24da4fcb3ef78b06039d9b3 | Python | dwangproof/1337c0d3 | /2_Add_Two_Numbers/solution.py | UTF-8 | 2,626 | 4.09375 | 4 | [] | no_license | """
Problem: 2. Add Two Numbers
Url: https://leetcode.com/problems/add-two-numbers/description/
Author: David Wang
Date: 12/26/2017
You are given two non-empty linked lists representing two non-negative integers.
The digits are stored in reverse order and each of their nodes contain a
single digit. Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero,
except the number 0 itself.
Example
Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 8
Explanation: 342 + 465 = 807.
"""
# Definition for singly-linked list.
# # class ListNode(object):
# # def __init__(self, x):
# # self.val = x
# # self.next = None
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def __init__(self):
self.start_node = None
def __add_node(self, value, prev_node):
value = value % 10
node = ListNode(value)
if prev_node == None:
self.start_node = node
else:
prev_node.next = node
return node
def addTwoNumbers(self, l1, l2):
"""
Returns a linked linked list that is the sum of the other two linked
lists.
Args:
l1: The first linked list.
l2: The second linked list.
Returns:
A linked list that contains the sum of the other two linked lists.
"""
carry_over = 0
prev_node = None
l1_current = l1
l2_current = l2
while l1_current != None and l2_current != None:
value = l1_current.val + l2_current.val + (1 if carry_over else 0)
carry_over = value / 10
node = self.__add_node(value, prev_node)
prev_node = node
l1_current = l1_current.next
l2_current = l2_current.next
while l1_current != None:
value = l1_current.val + (1 if carry_over else 0)
node = self.__add_node(value, prev_node)
prev_node = node
carry_over = value / 10
l1_current = l1_current.next
while l2_current != None:
value = l2_current.val + (1 if carry_over else 0)
node = self.__add_node(value, prev_node)
prev_node = node
carry_over = value / 10
l2_current = l2_current.next
# add extra node at end if
if carry_over == True:
node = self.__add_node(1, prev_node)
node.next = None # since last node
return self.start_node
| true |
1efcdbd3416ed516c41ee3f225c709ee64051b18 | Python | VeritasOS/krankshaft | /krankshaft/auth.py | UTF-8 | 3,829 | 2.671875 | 3 | [
"MIT"
] | permissive | # TODO fire signal on auth failure?
from . import authn, authz
class Auth(object):
'''
Bind a request to this object and centralizes all Authentication and
Authorization interfaces.
For convenience, you may test it in a boolean way to ensure the request
is both authenticated and authorized.
auth = Auth(request)
if auth:
# request is authorized (depending on your authorization scheme
# the request may or may not be authenticated)
obj = ...
if auth.is_authorized_object(obj):
# now the request is authorized to operate on the object
else:
# request is not authorized to the object
else:
# request is not authorized (but may be authenticated)
As an option, you can set 'authn' to a list/tuple of Authn's. When
initialized, it will decide which Authn to use and bind it permanently.
'''
AuthnedInterface = authn.AuthnedInterface
authn = authn.AuthnDjangoBasic()
authz = authz.AuthzReadonly()
def __init__(self, request):
self.authned = None
self.request = request
if isinstance(self.authn, (list, tuple)):
use_authn = None
for authn in self.authn:
if authn.can_handle(self.request):
use_authn = authn
break
self.authn = use_authn
def __nonzero__(self):
return self.is_authorized_request()
def authenticate(self):
'''authenticate() -> self.authned
Process the request and test if the request is authenticated.
'''
if not self.authn:
return None
authned = self.authn.authenticate(self.request)
if authned and self.authn.is_valid(authned):
self.authned = self.AuthnedInterface(authned)
return self.authned
return None
def challenge(self, response):
'''challenge(response) -> response
Update a response in flight. Useful to add HTTP Authenticate headers.
'''
if self.authn:
response = self.authn.challenge(self.request, response)
return response
@property
def id(self):
'''
A unique identifier for a request. If not authenticated, the
REMOTE_ADDR of the request is used.
'''
if self.authned:
return '%s-%s' % (
self.authned.name,
self.authned.id
)
else:
return 'anon-%s' % self.request.META.get('REMOTE_ADDR', 'noaddr')
def is_authenticated(self):
'''is_authenticated() -> bool
Test if the bound request is authned.
note: depends on .authenticate() being called otherwise always False
'''
return bool(self.authned)
def is_authorized_object(self, obj):
'''is_authorized_object(obj) -> bool
Test if authorized to operate on object.
'''
return self.authz.is_authorized_object(self.request, self.authned, obj)
def is_authorized_request(self):
'''is_authorized_request() -> bool
Test if authorized to process request further. No guarantees are
placed on if .is_authorized_object() will be called ever.
'''
return self.authz.is_authorized_request(self.request, self.authned)
def limit(self, query):
'''limit(query) -> limited_query
Pass the given query through any applicable authorization limits and
return a new limited query that should only be visible to the
requester.
'''
return self.authz.limit(self.request, self.authned, query)
@property
def user(self):
if self.authned:
return self.authned.user
return None
| true |
953702068f69dda5c0cfa950705df721033ff2a9 | Python | LewisAn/python_repository | /send_email_test.py | UTF-8 | 457 | 2.515625 | 3 | [] | no_license | from poplib import POP3_SSL as pssl
client = pssl("pop.qq.com")
client.user("913248383@qq.com")
client.pass_("gcimhvyjknaxbccg")
all_num, all_sz = client.stat() # message count, mailbox size
print("There are {} messages in total".format(all_num))
print("There are {} bytes in total".format(all_sz))
print("Client list", client.list())
msg_2 = client.retr(2)
print("This is an email:")
for i in msg_2[1]:
print("_______{}".format(i))
print("\r\n", client.quit()) | true |
58c994fc7a332fcc3e8337c33cedc12ad70f933d | Python | thoughteer/edera | /edera/lockers/directory.py | UTF-8 | 1,904 | 2.703125 | 3 | [
"MIT"
] | permissive | import contextlib
import errno
import logging
import os
import os.path
import sqlite3
import edera.helpers
from edera.exceptions import LockAcquisitionError
from edera.locker import Locker
class DirectoryLocker(Locker):
"""
A directory-level locker.
A directory-level lock works as an inter-process mutex.
It creates a temporary SQLite3 database and locks it using "BEGIN EXCLUSIVE".
It is a good practice to use a temporary directory for them (like /tmp).
Once the owning process dies, the lock is automatically released.
Don't forget to clean the directory from time to time!
Attributes:
path (String) - the directory path (absolute)
"""
def __init__(self, path):
"""
Args:
path (String) - a base path for lock files
The base path will be created if doesn't exist.
"""
self.path = os.path.abspath(path)
def __repr__(self):
return "<%s: path %r>" % (self.__class__.__name__, self.path)
@contextlib.contextmanager
def lock(self, key, callback=None):
try:
os.makedirs(self.path)
except OSError as error:
if error.errno == errno.EEXIST and os.path.isdir(self.path):
pass
else:
raise
lock_file_path = os.path.join(self.path, edera.helpers.sha1(key))
logging.getLogger(__name__).debug("Lock file: %s", lock_file_path)
try:
connection = sqlite3.connect(lock_file_path, timeout=0.2)
connection.execute("BEGIN EXCLUSIVE").fetchone()
except sqlite3.OperationalError:
raise LockAcquisitionError(key)
try:
yield
finally:
try:
os.remove(lock_file_path)
except OSError:
pass
if connection is not None:
connection.close()
| true |
c5c4c31026aba9b2d17341074f7f0227f2d164bd | Python | BrayanSolanoF/EjerciciosPython | /quiz.py | UTF-8 | 376 | 3 | 3 | [] | no_license |
def tres_cinco(N):
if isinstance(N,int) and N >=8:
return tres_cinco_aux(N,0,0,0)
else:
return "Error"
def tres_cinco_aux(N,a,b,resultado):
resultado = 3 * a + 5 * -b
if resultado == N:
return a,b
elif N%3 == 1:
return 3 * a + 5 * -b,tres_cinco_aux(N,a+1,b,resultado)
else:
return "fdgd"
| true |
d0a539b7c4818abc90e25d5138073ff94c025f16 | Python | kr-MATAGI/coursera | /3-NLP_with_Sequence_Models/Week2/Assignment/Deep_N-grams/train_model.py | UTF-8 | 2,453 | 2.8125 | 3 | [] | no_license | from trax.supervised import training
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: train_model
def train_model(model, data_generator, batch_size=32, max_length=64, lines=lines, eval_lines=eval_lines, n_steps=1, output_dir='model/'):
"""Function that trains the model
Args:
model (trax.layers.combinators.Serial): GRU model.
data_generator (function): Data generator function.
batch_size (int, optional): Number of lines per batch. Defaults to 32.
max_length (int, optional): Maximum length allowed for a line to be processed. Defaults to 64.
lines (list, optional): List of lines to use for training. Defaults to lines.
eval_lines (list, optional): List of lines to use for evaluation. Defaults to eval_lines.
n_steps (int, optional): Number of steps to train. Defaults to 1.
output_dir (str, optional): Relative path of directory to save model. Defaults to "model/".
Returns:
trax.supervised.training.Loop: Training loop for the model.
"""
### START CODE HERE (Replace instances of 'None' with your code) ###
bare_train_generator = data_generator(batch_size, max_length, lines)
infinite_train_generator = itertools.cycle(bare_train_generator)
bare_eval_generator = data_generator(batch_size, max_length, eval_lines)
infinite_eval_generator = itertools.cycle(bare_eval_generator)
train_task = training.TrainTask(
labeled_data=bare_train_generator, # Use infinite train data generator
loss_layer=tl.CrossEntropyLoss(), # Don't forget to instantiate this object
optimizer=trax.optimizers.Adam(learning_rate=0.0005) # Don't forget to add the learning rate parameter
)
eval_task = training.EvalTask(
labeled_data=infinite_eval_generator, # Use infinite eval data generator
metrics=[tl.CrossEntropyLoss(), tl.Accuracy()], # Don't forget to instantiate these objects
n_eval_batches=3 # For better evaluation accuracy in reasonable time
)
training_loop = training.Loop(model,
train_task,
eval_task=eval_task,
output_dir=output_dir)
training_loop.run(n_steps=n_steps)
### END CODE HERE ###
# We return this because it contains a handle to the model, which has the weights etc.
return training_loop
| true |
68f6e88a305b7c9135441f9d695b5e00bf8f201c | Python | jmaroeder/adventofcode2018 | /adventofcode2018/day08.py | UTF-8 | 1,605 | 3.265625 | 3 | [] | no_license | import contextlib
import re
from pathlib import Path
from typing import Set, MutableSet, Iterable, MutableMapping, MutableSequence, Sequence, Tuple
class Node:
def __init__(self):
self.children: MutableSequence[Node] = []
self.metadata: MutableSequence[int] = []
def parse_nodes(numbers: Sequence[int]) -> Tuple[Node, int]:
node = Node()
number_of_children = numbers[0]
number_of_metadata = numbers[1]
offset = 2
for _ in range(number_of_children):
child, chopped = parse_nodes(numbers[offset:])
node.children.append(child)
offset += chopped
for i in range(offset, offset + number_of_metadata):
node.metadata.append(numbers[i])
return node, offset + number_of_metadata
def sum_metadata(root: Node) -> int:
return sum(root.metadata) + sum(sum_metadata(child) for child in root.children)
def value_of_node(root: Node) -> int:
if not root.children:
return sum(root.metadata)
ret = 0
for index in root.metadata:
if index == 0:
# skip 0
continue
with contextlib.suppress(IndexError):
# skip missing nodes
ret += value_of_node(root.children[index - 1])
return ret
def part1(x: str) -> int:
root, _ = parse_nodes([int(y) for y in x.split()])
return sum_metadata(root)
def part2(x: str) -> int:
root, _ = parse_nodes([int(y) for y in x.split()])
return value_of_node(root)
if __name__ == '__main__':
puzzle_input = Path('day08.txt').read_text()
print(part1(puzzle_input))
print(part2(puzzle_input))
| true |
ba4e97cc4c2232af6cefc44bcc6afc011c001b32 | Python | pblackman/NN_calibration | /scripts/resnet_birds_cars/load_data_cars.py | UTF-8 | 4,260 | 3.09375 | 3 | [
"MIT"
] | permissive | # Loading in Stanford Cars Dataset data
import scipy.io
import numpy as np
from os import listdir
from os.path import isfile, join
from PIL import Image
# Paths to files, change if necessary
TEST_LABELS_PATH = '../../data/data_cars/cars_test_annos_labels.mat'
TRAIN_LABELS_PATH = '../../data/data_cars/cars_train_annos.mat'
TRAIN_DATA_PATH = "../../data/data_cars/cars_train/" # Folder full of images
TEST_DATA_PATH = "../../data/data_cars/cars_test/"
def load_img(path, new_size = 256):
"""
Loads in an image, and converts it so its sorter side will match to new side
params:
path: (string) location to the image
new_size: (int) the size of the image's shorter side
returns:
img_mat (nd.array) image matrix with shape of (width, height, channels)
"""
im = Image.open(path)
if im.size[0] < im.size[1]:
size_perc = new_size/im.size[0]
else:
size_perc = new_size/im.size[1]
size = (int(round(im.size[0]*size_perc, 0)), int(round(im.size[1]*size_perc, 0))) # New size of the image
im = im.resize(size, Image.ANTIALIAS)
rgb_im = im.convert('RGB') # Some images are in Grayscale
return np.array(rgb_im, dtype="float32")
def center_crop(img_mat, size = (224, 224)):
"""
Center Crops an image with certain size, image must be bigger than crop size (add check for that)
params:
img_mat: (3D-matrix) image matrix of shape (width, height, channels)
size: (tuple) the size of crops (width, height)
returns:
img_mat: that has been center cropped to size of center crop
"""
w,h,c = img_mat.shape
start_h = h//2-(size[1]//2) # Size[1] - h of cropped image
start_w = w//2-(size[0]//2) # Size[0] - w of cropepd image
return img_mat[start_w:start_w+size[0],start_h:start_h+size[1], :]
def load_data_cars(size = 256, size_crop = (224, 224)):
"""
Main function needed to load in cars (needs rather large amount of memory)
Params:
size - image converted so its shorter side is with given size
size_crop - test images center cropped into "size_crop"
Returns:
((x_train, y_train), (x_test, y_test)), train and test images with class labels.
"""
# Path to data - change according your paths
test_labels = scipy.io.loadmat(TEST_LABELS_PATH) # Labels saved as matlab mat-s
train_labels = scipy.io.loadmat(TRAIN_LABELS_PATH)
# Get labels from Matlab matrix
test_labels = np.array(test_labels.get('annotations'))
train_labels = np.array(train_labels.get('annotations'))
# ### Get test and train labels
# Length of test and train sets
len_test = len(test_labels[0])
len_train = len(train_labels[0])
y_test = np.empty(len_test, dtype="int16")
y_train = np.empty(len_train, dtype="int16")
# Test labels
for i in range(len_test):
y_test[i] = test_labels[0][i][4][0][0] # Get labels out of annotations
# Train labels
for i in range(len_train):
y_train[i] = train_labels[0][i][4][0][0]
# Labels start from 1, but we want it to be 0, so we could use 1-hot vector
y_test -= 1 # min label zero, max 195
y_train -= 1
### Load in images as numpy array
path = TRAIN_DATA_PATH
train_imgs = [f for f in listdir(path) if isfile(join(path, f))]
path2 = TEST_DATA_PATH
test_imgs = [f for f in listdir(path2) if isfile(join(path2, f))]
# Fill in x_train array with train data
x_train = np.empty((len_train, size, size, 3), dtype="float32")
for i, img_path in enumerate(train_imgs):
img_mat = load_img(TRAIN_DATA_PATH + img_path, new_size = size) # First load and rescale image
x_train[i] = center_crop(img_mat, size = (size, size)) # Second center crop the scaled image
# Fill in x_test array with test data
x_test = np.empty((len_test, size_crop[0], size_crop[1], 3), dtype="float32")
for i, img_path in enumerate(test_imgs):
img_mat = load_img(TEST_DATA_PATH + img_path, new_size = size) # First scale to 256-by-x
x_test[i] = center_crop(img_mat, size = size_crop) # Crop center of the image
return ((x_train, y_train), (x_test, y_test))
| true |
68d8344058c409b30c3f3c2e2d07452588e1d725 | Python | abinashp437/OCR-summariser | /text_summarisation.py | UTF-8 | 1,461 | 2.9375 | 3 | [] | no_license | import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
import sys
# import nltk
# nltk.download('punkt')
# text pre-processing
def preprocessing(data):
text = sent_tokenize(data)
df = pd.DataFrame(text, columns=['sent'])
stop = stopwords.words('english')
df['sent'] = df['sent'].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
return text, df
# summarisation function
def summary(text, df) :
vectorizer = TfidfVectorizer(max_df = 0.5, min_df = 2, smooth_idf = True)
vectorizer.fit(df['sent'])
vector = vectorizer.transform(df['sent'])
tfidf = vector.toarray()
tfidf = tfidf.T
svd_model = TruncatedSVD(n_components = -(-len(text)//2),algorithm='randomized', n_iter=10, random_state=22)
svd_model.fit(tfidf)
svc = svd_model.components_
svc = svc.tolist()
summary = ''
p_pos = 0
for _ in range(len(svc)):
m = max(svc[_])
# print(m)
pos = svc[_].index(m)
# print(pos)
# print(text[pos])
if pos > p_pos:
summary = summary + ' ' + text[pos]
else:
summary = text[pos] + ' ' + summary
p_pos = pos
return summary
data = sys.argv[1]
data = data.replace('\n', ' ')
text,df = preprocessing(data)
summary = summary(text, df)
print(summary) | true |
0022d38a74fbd1fbe9045775d6851df27fa625cc | Python | minq92/Tutorial.code | /Python/처음 시작하는 파이썬(Introducing Python)/Chap_06_07.py | UTF-8 | 2,882 | 3.203125 | 3 | [] | no_license | # Chap 6
from collections import namedtuple
#Chap 7
import unicodedata as ud
def unicode_test(value):
name = ud.name(value)
value2 = ud.lookup(name)
print('value="%s", name="%s", value2 = "%s"' % (value, name, value2))
unicode_test('A')
unicode_test('$')
unicode_test('\u00a2')
unicode_test('\u20ac')
unicode_test('\u2603')
place = 'Café'
place
ud.name('\u00e9')
ud.lookup('LATIN SMALL LETTER E WITH ACUTE') # E WITH ACUTE, LATIN SMALL LETTER
place = 'caf\u00e9'
place
place = 'caf\N{LATIN SMALL LETTER E WITH ACUTE}'
place
u_umlaut = '\N{LATIN SMALL LETTER U WITH DIAERESIS}'
u_umlaut
drink = 'Gew' + u_umlaut + 'rztraminer'
print('Now I can finally have my', drink, 'in a', place)
len('$')
len('\U0001f47b')
'\U0001f47b'
snowman = '\u2603'
len(snowman)
ds = snowman.encode('utf-8')
len(ds)
ds
snowman.encode('ascii', 'ignore')
snowman.encode('ascii', 'replace')
snowman.encode('ascii', 'backslashreplace')
snowman.encode('ascii', 'xmlcharrefreplace')
# ascii utf-8 latin-1 cp-1252 unicode-escape(\uxxxx or \Uxxxxxxxx)
place = 'caf\u00e9'
place
type(place)
place_bytes = place.encode('utf-8')
place_bytes
type(place_bytes)
place2 = place_bytes.decode('utf-8')
place2
import re
result = re.match('You', 'Young Frankenstein')
result
youpattern = re.compile('You')
result = youpattern.match('Young Frankenstein')
help(re)
# search() findall() split() sub()
source = 'Young Frankenstein'
m = re.match('You', source)
if m:
print(m.group())
m = re.match('Frank', source)
if m:
print(m.group())
bekgoos = '알게머람?'
m = re.search('Frank',source)
if m:
print(m.group())
m = re.match('.*Frank',source)
if m:
print(m.group())
m = re.findall('n', source)
m
print('Found', len(m), 'matches')
m = re.findall('n.', source)
m
m = re.findall('n.?', source)
m
m = re.split('n', source)
m
m = re.sub('n', '?', source)
m
'''
\d 숫자
\D 비숫자
\w 알파벳 문자, '_'
\W 비알파벳 문자
\s 공백 문자
\S 비공백 문자
\b 단어 경계(\w와 \W 또는 \W와 \w 사이의 경계)
\B 비단어 경계
'''
import string
printable = string.printable
len(printable)
printable[0:50]
printable[50:]
re.findall('\d', printable)
re.findall('\w', printable)
re.findall('\s', printable)
x = 'abc' + '-/*' + '\u00ea' + '\u0115'
re.findall('\w', x)
source = '''I wish I may, I wish I might
Have a dish of fish tonight.'''
re.findall('wish', source)
re.findall('wish|fish', source)
re.findall('^wish', source)
re.findall('^I wish', source)
re.findall('.*fish tonight.$', source)
re.findall('fish tonight\.$', source)
re.findall('[wf]ish', source)
re.findall('[wfs]+', source)
re.findall('ght\W', source)
re.findall('I (?=wish)', source)
re.findall('(?<=I) wish', source)
re.findall(r'\bfish', source)
m = re.search(r'(. dish\b).*(\bfish)', source)
m.group()
m.groups()
m = re.search(r'(?P<DISH>. dish\b).*(?P<FISH>\bfish)', source)
m.group()
m.groups()
m.group('DISH')
m.group('FISH')
| true |
a6eee1df9e7b04ec30db1b28cfbb113e7b8a6999 | Python | sevskii111/ravn-norm-pokaz | /ravn.py | UTF-8 | 753 | 2.640625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from shared import *
def ptheor(a, b, m):
return np.full(m, 1 / m)
def x(a, b, r):
return np.random.uniform(a, b, r)
def M(a, b):
return (a + b) / 2
def D(a, b):
return (b - a) ** 2 / 12
c = int(input('m:'))
a = float(input('a:'))
b = float(input('b:'))
df = pd.DataFrame(columns=['N', 'M', 'm', '|M - m|', 'D', 'g', '|D - g|', 'δ'])
while True:
N = int(input("N:"))
xs = x(a, b, N)
print(xs[:q])
mean, std, mc, gc = M(a, b), D(a, b), m(xs), g(xs)
pempc = pemp(xs, a, b, c)
ptheorc = ptheor(a, b, c)
xi = Xi(pempc, ptheorc)
df.loc[len(df)] = [N, mean, mc, np.abs(mean - mc), std, gc, np.abs(std - gc), xi]
print(df) | true |
7f71d9d0096ce72921957ffe180ae9c66d41bab2 | Python | GermanNoob/pybotvac | /pybotvac/account.py | UTF-8 | 3,966 | 2.625 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | """Account access and data handling for beehive endpoint."""
import logging
import os
import shutil
import requests
from .exceptions import NeatoRobotException
from .robot import Robot
from .session import Session
_LOGGER = logging.getLogger(__name__)
class Account:
"""
Class with data and methods for interacting with a pybotvac cloud session.
:param email: Email for pybotvac account
:param password: Password for pybotvac account
"""
def __init__(self, session: Session):
"""Initialize the account data."""
self._robots = set()
self.robot_serials = {}
self._maps = {}
self._persistent_maps = {}
self._session = session
@property
def robots(self):
"""
Return set of robots for logged in account.
:return:
"""
if not self._robots:
self.refresh_robots()
return self._robots
@property
def maps(self):
"""
Return set of userdata for logged in account.
:return:
"""
self.refresh_maps()
return self._maps
def refresh_maps(self):
"""
Get information about maps of the robots.
:return:
"""
for robot in self.robots:
resp2 = self._session.get("users/me/robots/{}/maps".format(robot.serial))
self._maps.update({robot.serial: resp2.json()})
def refresh_robots(self):
"""
Get information about robots connected to account.
:return:
"""
resp = self._session.get("users/me/robots")
for robot in resp.json():
try:
robot_object = Robot(
name=robot["name"],
vendor=self._session.vendor,
serial=robot["serial"],
secret=robot["secret_key"],
traits=robot["traits"],
endpoint=robot["nucleo_url"],
)
self._robots.add(robot_object)
except NeatoRobotException:
_LOGGER.warning("Your robot %s is offline.", robot["name"])
continue
self.refresh_persistent_maps()
for robot in self._robots:
robot.has_persistent_maps = robot.serial in self._persistent_maps
@staticmethod
def get_map_image(url, dest_path=None, file_name=None):
"""
Return a requested map from a robot.
:return:
"""
try:
image = requests.get(url, stream=True, timeout=10)
if dest_path:
image_url = url.rsplit("/", 2)[1] + "-" + url.rsplit("/", 1)[1]
if file_name:
image_filename = file_name
else:
image_filename = image_url.split("?")[0]
dest = os.path.join(dest_path, image_filename)
image.raise_for_status()
with open(dest, "wb") as data:
image.raw.decode_content = True
shutil.copyfileobj(image.raw, data)
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as ex:
raise NeatoRobotException("Unable to get robot map") from ex
return image.raw
@property
def persistent_maps(self):
"""
Return set of persistent maps for logged in account.
:return:
"""
self.refresh_persistent_maps()
return self._persistent_maps
def refresh_persistent_maps(self):
"""
Get information about persistent maps of the robots.
:return:
"""
for robot in self._robots:
resp2 = self._session.get(
"users/me/robots/{}/persistent_maps".format(robot.serial)
)
self._persistent_maps.update({robot.serial: resp2.json()})
| true |
d90748655a99237396b7cc8b297c8f4522c7ee02 | Python | aishwaryaprasher/isha | /june28.py | UTF-8 | 914 | 2.5625 | 3 | [] | no_license | from tkinter import *
from tkinter.filedialog import askopenfile
def cmd1():
lb1.configure(text="xyz")
def cmd2():
a=askopenfile()
root = Tk()
menu=Menu(root)
root.config(menu=menu) #root ka menu bar new menu assign kia h
filemenu=Menu(menu)
menu.add_cascade(label='file' , menu=filemenu)
filemenu.add_command(label='New' , command=cmd1)
filemenu.add_command(label='open',command=cmd2)
filemenu.add_separator()
filemenu.add_command(label='exit', command=root.quit)
lb1=Label(root,text="hello")
lb1.pack()
mainloop()
from tkinter import*
main=Tk()
ourMessage='this is our Message '
messageVar=Message(main,text=ourMessage)
messageVar.config(bg='blue')
main.mainloop()
messageVar.pack()
from tkinter import*
master=Tk()
w=Scale(master,from_=0,to=42)
w.pack()
w=Scalew=Scale(master,from_=0,to=200,orient=HORIZONTAL)
w.pack()
mainloop()
from tkinter import*
master=Tk()
w=Scale(master,from_=0,to=42)
| true |
acb3f64eee45c6c21b2c847d4e01ad0b919e6e14 | Python | bvermeulen/Django | /update_currencies.py | UTF-8 | 2,555 | 2.78125 | 3 | [
"MIT"
] | permissive | ''' update currency is meant as a cron job to update the currencies in the database
used in howdimain for table stock_currency. It is not using the Django ORM
but directly with sql.
'''
from decouple import config
import requests
import psycopg2
from howdimain.utils.plogger import Logger
logformat = '%(asctime)s:%(levelname)s:%(message)s'
Logger.set_logger(config('LOG_FILE'), logformat, 'INFO')
logger = Logger.getlogger()
class UpdateCurrencies:
host = 'localhost'
db_port = config('DB_PORT')
db_user = config('DB_USER')
db_user_pw = config('DB_PASSWORD')
database = config('DB_NAME')
access_key = config('access_key_currency')
# forex_url = 'http://api.currencylayer.com/live'
forex_url = 'http://apilayer.net/api/live'
@classmethod
def update_currencies(cls):
logger.info(f'update currencies using {cls.forex_url}')
connect_string = f'host=\'{cls.host}\' port=\'{cls.db_port}\' '\
f'dbname=\'{cls.database}\' user=\'{cls.db_user}\' '\
f'password=\'{cls.db_user_pw}\''
connection = psycopg2.connect(connect_string)
cursor = connection.cursor()
params = {'access_key': cls.access_key}
forex_dict = {}
try:
res = requests.get(cls.forex_url, params=params)
if res:
forex_dict = res.json().get('quotes', {})
else:
logger.info(f'connection error: {cls.forex_url} {params}')
return
except requests.exceptions.ConnectionError:
logger.info(f'connection error: {cls.forex_url} {params}')
return
if forex_dict:
sql_string = ('SELECT currency, usd_exchange_rate FROM stock_currency '
' ORDER BY currency;')
cursor.execute(sql_string)
for currency in cursor.fetchall():
usd_exchange_rate = forex_dict.get('USD' + currency[0], '')
if usd_exchange_rate:
sql_string = (f'UPDATE stock_currency SET '
f'usd_exchange_rate=\'{usd_exchange_rate}\' WHERE '
f'currency=\'{currency[0]}\';')
cursor.execute(sql_string)
else:
logger.info(f'unable to get currency data for {cls.forex_url} {params}')
connection.commit()
cursor.close()
connection.close()
if __name__ == '__main__':
uc = UpdateCurrencies()
uc.update_currencies()
| true |
9621f72d70c49a3f8e0afed4cf6e71f894c6ee69 | Python | anyatran/school | /CG/SciPy/file_append_1.py | UTF-8 | 1,160 | 3.625 | 4 | [] | no_license | """
Program name: file_append_1.py
Objective:Write multiple lines to a file.
Keywords: file write, append, create, open
============================================================================79
Explanation: NOTE: Once there is data in a file you can add new data onto
the end ("a"=append) using FILE = open(filename,"a")
Author: Mike Ohlson de Fine
"""
# file_append_1.py
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Open an existing file and add (append) data to it.
filename_1 = "/constr/test_write_1.dat"
filly = open(filename_1,"a") # Open a file in append mode
filly.write("\n")
filly.write("This is number four and he has reached the door")
for i in range(0,5):
filename_2 = "/constr/test_write_2.dat"
filly = open(filename_2,"a") # Create a file in append mode
filly.write("This is number five and the cat is still alive")
filename_3 = "/constr/test_write_2.dat"
filly = open(filename_3,"w") # Create a file in write mode
# The command below overwrites previous data - "w" is really "overwrite"
filly.write("This is number six and they cannot find the fix")
| true |
295e1387f8a170de09d22c959c5437d3477759e6 | Python | sfade070/keras_min | /custom_layers/pooling.py | UTF-8 | 3,119 | 3.109375 | 3 | [] | no_license | import numpy as np
from numpy.lib.stride_tricks import as_strided
def pool2d(a, kernel_size, stride, padding, pool_mode='max'):
"""
2D Pooling
Parameters:
a: input 4D array
a.shape = (D,H,W,C)
kernel_size: int, the size of the window
stride: int, the stride of the window
padding: int, implicit zero paddings on both sides of the input
pool_mode: string, 'max' or 'avg'
"""
# Padding
a = np.pad(a, padding, mode='constant')
# Window view of a
output_shape = (
a.shape[0],
(a.shape[1] - kernel_size) // stride + 1,
(a.shape[2] - kernel_size) // stride + 1,
a.shape[3]
)
kernel_size = (kernel_size, kernel_size)
a_w = as_strided(a,
shape=output_shape + kernel_size,
strides=(a.strides[0], stride * a.strides[1], stride * a.strides[2], a.strides[3]) + a.strides[1:3]
)
a_w = a_w.reshape(-1, *kernel_size)
# Return the result of pooling
if pool_mode == 'max':
return a_w.max(axis=(1, 2)).reshape(output_shape)
elif pool_mode == 'avg':
return a_w.mean(axis=(1, 2)).reshape(output_shape)
######################################################################
# test : predictions & run_time
######################################################################
if __name__ == "__main__":
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # to ignore warning errors
import time
import tensorflow.keras.backend as keras
import tensorflow.keras.layers as layers
A = np.random.randn(3, 200, 200, 32)
avg_time = [0, 0]
outs = [[], []]
x = keras.constant(A)
pool2d_keras = layers.MaxPooling2D(pool_size=(2, 2), strides=None, padding="same", input_shape=x.shape)
n_simulations = 10
for _ in range(n_simulations):
# Keras
t1 = time.time()
o1 = pool2d_keras(x)
avg_time[0] += (time.time() - t1)/n_simulations
o1 = o1.numpy()
outs[0].append(o1)
# our methods
t1 = time.time()
o2 = pool2d(A, kernel_size=2, stride=2, padding=0, pool_mode='max')
avg_time[1] += (time.time() - t1)/n_simulations
outs[1].append(o2)
print("difference of predictions ", [(o1 - o2).sum() for o1, o2 in zip(*outs)])
print("the average run time of keras: ", avg_time[0], "the average run time of our implementation: ", avg_time[1])
print('Ratio speed: (our_implementation/keras)', avg_time[1] / avg_time[0])
#################################################
# result
#################################################
# difference of predictions [2.0189942075574525e-05, 2.0189942075574525e-05, 2.0189942075574525e-05, 2.0189942075574525e-05, 2.0189942075574525e-05, 2.0189942075574525e-05, 2.0189942075574525e-05, 2.0189942075574525e-05, 2.0189942075574525e-05, 2.0189942075574525e-05]
# the average run time of keras: 0.0027794837951660156 the average run time of our implementation: 0.055673837661743164
# Ratio speed: (our_implementation/keras) 20.030279636301252
| true |
b2aeaae5140ff994403520117626a09427e9890a | Python | corne12345/Project-_Euler | /005.py | UTF-8 | 209 | 3.234375 | 3 | [] | no_license | result = 2520
prime = 2
while True:
if prime == 20:
print (result)
break
elif result % prime == 0:
prime = prime + 1
else:
result = result + 2520
prime = 2 | true |
f8507bdd97f930da79515300974c43ead4d2ae45 | Python | ykravtsow/qa | /DZ3/src/test_square.py | UTF-8 | 470 | 3.15625 | 3 | [] | no_license | import pytest
import sys
sys.path.append(".")
from square import Square
from figure import Figure
S = Square('my square', 40)
F = Figure('test figure')
def test_square_area():
assert S.area == 1600
def test_square_perimeter():
assert S.perimeter == 160
def test_square_angles():
assert S.angles == 4
def test_square_name():
assert S.name == 'my square'
def test_square_figure_addition():
F.area = 2000
assert S.add_square(F) == 3600
| true |
79f6213293a7c1ee87c2f4dbec87fbf0754b168a | Python | ab41j1t4000/software_stuff | /Python/practice/Webscraper/flipkart_scrapper.py | UTF-8 | 758 | 2.609375 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import pandas as pd
products = []
prices = []
ratings = []
content = requests.get("https://www.flipkart.com/laptops/~buybac+k-guarantee-on-laptops-/pr?sid=6bo%2Cb5g&uniq")
response = content.content
# content = driver.page_source
soup = BeautifulSoup(response,"html.parser")
for a in soup.findAll('a',href=True,attrs={'class':'_31qSD5'}):
name = a.find('div',attrs={'class':'_3wU53n'})
price = a.find('div',attrs={'class':'_1vC4OE _2rQ-NK'})
rating = a.find('div',attrs={'class':'hGSR34'})
products.append(name.text)
prices.append(price.text)
ratings.append(rating.text)
df = pd.DataFrame({'Product Name':products, 'Price':prices,'Ratings':ratings})
df.to_csv('flipkart.csv')
print(df)
| true |
5ec149f6a82ea4532ac0bdc6ffee1ce9a771a2f6 | Python | ruhanjot/DistributedReplays | /backend/utils/global_functions.py | UTF-8 | 960 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | import logging
from flask import Flask, g
from backend.utils.checks import get_checks
from backend.database.objects import Player
logger = logging.getLogger(__name__)
def create_jinja_globals(app: Flask, global_object):
is_admin, is_alpha, is_beta = get_checks(global_object)
app.jinja_env.globals.update(isAdmin=is_admin)
app.jinja_env.globals.update(isAlpha=is_alpha)
app.jinja_env.globals.update(isBeta=is_beta)
app.jinja_env.globals.update(pop=pop)
app.jinja_env.filters.update(debug=debug)
def pop(list):
return list.pop(len(list) - 1)
def debug(text):
logger.warning(str(text))
return ''
def get_current_user_id(player_id=None) -> str:
if player_id is not None:
return player_id
try:
return UserManager.get_current_user().platformid
except Exception as e:
logger.error(e)
class UserManager:
@staticmethod
def get_current_user() -> Player:
return g.user
| true |
4cf19bf55561ea9598cd740a3b984f1a2af34bd6 | Python | AI-Jiny/Python-Practice | /Problem/03_for문/01_구구단.py | UTF-8 | 95 | 3.6875 | 4 | [] | no_license | a = input()
for i in range(1,10):
a = int(a)
print("{} * {} = {}".format(a, i, i * a)) | true |
7891e9e63f44147ea65185c8aa2a7162c4003997 | Python | daniel-reich/ubiquitous-fiesta | /CzrTZKEdfHTvhRphg_11.py | UTF-8 | 1,082 | 2.78125 | 3 | [] | no_license |
def gcd(d,n):
while d%n!=0:
rem=d%n
d=n
n=rem
return n
def reducefrac(frac_p):
while gcd(int(frac_p[frac_p.index('/')+1:]),int(frac_p[0:frac_p.index('/')]))!=1:
n,d=int(frac_p[0:frac_p.index('/')]),int(frac_p[frac_p.index('/')+1:])
frac_n=str(n//gcd(d,n))
frac_d=str(d//gcd(d,n))
frac_p=frac_n+'/'+frac_d
return frac_p
def mixed_number(frac):
if frac[0]=='-':
frac=frac[1:]
sign='-'
else:
sign=''
numerator=int(frac[0:frac.index('/')])
denominator=int(frac[frac.index('/')+1:])
whole_p=str(numerator//denominator)+' '
frac_p=str(numerator%denominator)+ '/' +str(denominator)
if whole_p=='0 ':
whole_p=''
if frac_p[0:frac_p.index('/')]=='0':
frac_p=''
whole_p=whole_p[0:len(whole_p)-1]
if frac_p!='':
if gcd(int(frac_p[0:frac_p.index('/')]),int(frac_p[frac_p.index('/')+1:]))!=1:
frac_p=reducefrac(frac_p)
res=sign+whole_p+frac_p
if res=='':
res='0'
return res
| true |
f8d020c9f10a756123f3159fcefe98f2de1faadf | Python | willcrichton/psypl-experiments | /psypl/experiments/variable_span.py | UTF-8 | 2,597 | 2.5625 | 3 | [] | no_license | import pandas as pd
from scipy.stats import wasserstein_distance
from ..base import Experiment
from ..utils import all_names, rand_const, sample, shuffle
class VariableSpanExperiment(Experiment):
all_n_var = [3, 4, 5, 6]
def exp_name(self, N_var, N_trials):
return f"varmem_{N_var}_{N_trials}"
def generate_experiment(self, N_trials=40):
trial_n_var = [N for N in self.all_n_var for _ in range(N_trials // len(self.all_n_var))]
return {
"trials": [self.generate_trial(N_var) for N_var in shuffle(trial_n_var)],
"between_trials_time": 2000,
}
def generate_trial(self, N_var):
names = sample(all_names, k=N_var)
return {
"variables": [
{"variable": names[i], "value": rand_const()} for i in range(N_var)
],
"presentation_time": N_var * 1500,
}
def eval_response(self, N_var, experiment, results):
df = []
for i, (trial, result) in enumerate(zip(experiment["trials"], results)):
correct = 0
badvalue = 0
badname = 0
for j, var in enumerate(trial["variables"]):
for var2 in result["response"]:
if var["variable"] == var2["variable"]:
if var["value"] == int(var2["value"]):
correct += 1
else:
badvalue += 1
break
else:
badname += 1
df.append(
{
"N_var": N_var,
"correct": correct,
"badvalue": badvalue,
"badname": badname,
}
)
return pd.DataFrame(df)
def simulate_trial(self, trial, model):
wm = model()
for v in trial["variables"]:
wm.store(v["variable"], v["value"])
response = []
for v in trial["variables"]:
value = wm.load(v["variable"])
if value is not None:
response.append({"variable": v["variable"], "value": value})
return response
def simulation_loss(self, gt, sim):
def dists(df):
return [
df[df.N_var == N_var].correct.tolist()
for N_var in sorted(df.N_var.unique())
]
return sum(
[
wasserstein_distance(gt_dist, sim_dist)
for gt_dist, sim_dist in zip(dists(gt), dists(sim))
]
)
| true |
499c8b503b5e6452228f64be3183510dd3aa27e7 | Python | itm-dsc-idc-2020-1/idc-practica-6-raspberry-pi-sincronizacion-de-tiempo-EstherPH | /hora.py | UTF-8 | 745 | 3 | 3 | [] | no_license |
import datetime
from time import ctime
import os
import ntplib
servidor_de_tiempo = "pool.ntp.org"
print("\nObteniendo la hora del servidor NTP:")
cliente_ntp = ntplib.NTPClient()
respuesta = cliente_ntp.request(servidor_de_tiempo)
print(respuesta.tx_time)
hora_actual = datetime.datetime.strptime(ctime(respuesta.tx_time), "%a %b %d %H:%M:%S %Y")
print("Respuesta de " + servidor_de_tiempo + ": " + str(hora_actual) + "\n")
separador = " "
sep = str(hora_actual).split(separador)
fecha = sep[0]
hora = sep[1]
fechass = fecha.split("-")
anio = fechass[0]
mes = fechass[1]
dia = fechass[2]
horasrr = hora.split(":")
fhora = horasrr[0]
fmin = horasrr[1]
hola = 'date -u ' + mes + dia + fhora + fmin + anio
print(hola)
os.system(hola) | true |
eafdd060812422cf7edd7e8e39df88c6d27c3fc1 | Python | kunweiTAN/techgym_ai | /Wk2S.py | UTF-8 | 1,526 | 3.4375 | 3 | [] | no_license | #AI-TECHGYM-3-11-A-1
#回帰問題と分類問題
#インポート
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
#データフレーム
data_added_dummies = pd.read_csv("./data_added_dummies.csv")
#6000万円以下のデータに絞る
data_added_dummies = data_added_dummies[data_added_dummies["取引価格(総額)"] < 60000000]
#目的変数、説明変数(重回帰)
x = data_added_dummies.drop("取引価格(総額)", axis=1)
y = data_added_dummies["取引価格(総額)"]
#学習データとテストデータの分割
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
#モデル、学習、予測(Lasso),WARNINGが出るのでtol=0.1と指定する
lr_multi2 = Lasso(alpha=1, normalize=True, tol=0.1)
lr_multi2.fit(X_train, y_train)
y_pred = lr_multi2.predict(X_test)
#MAE
print('MAE(Lasso)',mean_absolute_error(y_pred, y_test))
#決定係数
print('決定係数(Lasso)',r2_score(y_test, y_pred))
#モデル、学習、予測(Ridge)
lr_multi2 = Ridge(alpha=0.1, normalize=True)
lr_multi2.fit(X_train, y_train)
y_pred = lr_multi2.predict(X_test)
#MAE
print('MAE(Ridge)',mean_absolute_error(y_pred, y_test))
#決定係数
print('決定係数(Ridge)',r2_score(y_test, y_pred))
| true |
34115542afcfa95d83679d4ecd1135994b469fdf | Python | xyeras/AnimalFarm | /Tests/Black Box/sikuliTests/viewDataBenchmark.sikuli/viewDataBenchmark.py | UTF-8 | 743 | 2.859375 | 3 | [] | no_license | #we are seeing how much it takes to load data from database to meet our NFR1
#ONE MUST ALREADY BE LOGGED IN AND IN THE DASHBOARD
import unittest
#you should already be logged into dashboard so this will cause you to go to that screen
click("1525678946189.png")
#measure the time it takes for the database to load
class TestDataBaseTime(unittest.TestCase):
def test_data_retrieval(self):
click("1525678847697.png")
assert("dataTable.png")
#run test 5 times to get the average
for n in range(5):
suite = unittest.TestLoader().loadTestsFromTestCase(TestDataBaseTime)
unittest.TextTestRunner(verbosity=2).run(suite)
#we click here so we can go back to the dashboard
click("1525681354992.png")
| true |
d1908e0a6d6acb5501aa84b6867899002ef90101 | Python | Hamza-Bik/python | /pythonScripting.py | UTF-8 | 4,148 | 3.453125 | 3 | [] | no_license | import os
text = "this is not a reversed text"
# text = "said"
def reverse(x):
output=""
for s in range(len(x)):
output += x[len(x) - (s+1) : len(x) - s]
return output
# print("the reversed text is: "+reverse(text))
# print('said'[len('said')-2:len('said')-1])
# no_list = [10,20,30,40]
def average(x):
#complete the function's body to return the average
r = 0
for k in x:
r += k
return r/len(x)
no_list = [1,2,3,4,12,5]
# print(f"average is {average(no_list)}")
def maximum(x):
#complete the function to return the highest number in the list
max = 0
for i in x:
if i > max:
max = i
return max
# print(maximum(no_list))
# Task 4:
no_list = [22,22,2,1,11,11,2,2,3,3,3,4,5,5,5,55,55,66]
# no_list =[1,1,1,2,2,3,3,3,3,4,5,5,6]
def unique_list(l):
#complete the function's body to return the unique list of numbers
no_list.sort()
output = []
for i in range(len(l)-1):
if l[i] < l[i + 1]:
output.append(l[i])
if l[len(l)-1] == l[len(l)-1] and i == len(l)-2:
output.append(l[len(l)-1])
return output
# no_list.sort()
# print(no_list)
# print(unique_list(no_list))
# print(len(no_list))
list=[9,2,1,60,17,24]
def maxNbr2(x):
largest = 0
secondLargest = 0
for i in x:
if i > largest:
secondLargest = largest
largest = i
elif i > secondLargest:
secondLargest = i
return secondLargest
print(maxNbr2(list))
# list = os.listdir("Desktop/png2")
print(list[3])
#"united-kingdom-flag-country-nation-union-empire-33115.png"
# TO:
#"United Kingdom.png" (space included)
# for i in range(len(list[0])):
# s=list[0]
# print(s.split('-')[i])
# for count,filename in enumerate(os.listdir("Desktop/png")):
# print(count,filename)
def main():
for count, filename in enumerate(os.listdir("Desktop/png")):
newName ="country" + str(count*10) + ".png"
src ='Desktop/png/'+ filename
newName ='Desktop/png/'+ newName
# rename() function will
# rename all the files
os.rename(src, newName)
# main()
s=list[3]
# print(s.find("-flag"))
# # s=s[0:s.find("-flag")]
# s=(s[0:s.find("-flag")] + s[s.find("."):]).replace('-',' ')
print(s)
# print(s.replace('-',' '))
def main():
for count, s in enumerate(os.listdir("Desktop/png/")):
newName = (s[0:s.find("-flag")] + s[s.find("."):]).replace('-',' ')
oldFileName ='Desktop/png/'+ s
newName ='Desktop/png/'+ newName
os.rename(oldFileName, newName)
print("Done :)")
# main()
#folderPath exemple like :"Desktop/images" and you must use ""
def main(folderPath):
for count, s in enumerate(os.listdir(folderPath)):
newName = (s[0:s.find("-flag")] + s[s.find("."):]).replace('-',' ')
oldFileName = folderPath + '/' + s
newName = folderPath + '/' + newName
os.rename(oldFileName, newName)
print("Done :|")
#main("Desktop/png/")
def expand(x):
s=""
for elem in x:
s+=elem
return s*3
print(expand(['string1', 'string2']))
def expandV2(x):
return ''.join(x) * 3
# >>> a=1
# >>> b=2
# >>> c=3
# >>> a,b,c=b,c,a
# >>> print(a,b,c)
# 2 3 1
import re
ass="[Crunchyroll] Naruto Shippuden - 18.ass"
mkv="[AnimeRG] Naruto Shippuden - 018 [720p] [x265] [pseudo].mkv"
def naruto2():
#for episode > 99
for count, vid in enumerate(os.listdir("Desktop/#")):
newName = "[AnimeRG] Naruto Shippuden - " + "".join(re.findall(r'\d+',vid[:36])) + " [720p] [x265] [pseudo].ass"
oldFileName ='Desktop/#/'+vid
newName ='Desktop/#/'+ newName
os.rename(oldFileName, newName)
print(vid[:36])
naruto2()
def naruto1():
#for episode < 100
for count, vid in enumerate(os.listdir("Desktop/#")):
newName = "[AnimeRG] Naruto Shippuden - " + "".join(re.findall(r'\d+',vid[:35])) + " [720p] [x265] [pseudo].ass"
oldFileName ='Desktop/#/'+vid
newName ='Desktop/#/'+ newName
os.rename(oldFileName, newName)
print(vid[:35])
| true |
ed02e2904c63794810bc62578179f94bf03193bd | Python | kres0167/Programmering | /MicroPython koder/temp med led kode.py | UTF-8 | 489 | 3.34375 | 3 | [] | no_license | # importere Pin, ADC and PWM klasserne
from machine import Pin, ADC, PWM
# Importere sleep klassen fra time modulet
from time import sleep
led = PWM(Pin(4), 5000)
# Instantiere ADC objekt kaldet potentiometer
temp = ADC(Pin(36))
temp.width(ADC.WIDTH_10BIT)
temp.atten(ADC.ATTN_11DB)
while True:
temp_val = temp.read()
# Gemmer analog pin værdi i variabel
# Sætter duty cycle value til potentiometer value
led.duty(temp_val)
print("Duty: ", led.duty())
sleep(0.1)
| true |
e859982ba92d9e38525c786e72fde1a804fedec2 | Python | teaglebuilt/bocadillo | /bocadillo/error_handlers.py | UTF-8 | 1,713 | 3.34375 | 3 | [
"MIT"
] | permissive | from .request import Request
from .response import Response
from .errors import HTTPError
# Built-in HTTP error handlers.
async def error_to_html(req: Request, res: Response, exc: HTTPError):
"""Convert an exception to an HTML response.
The response contains a `<h1>` tag with the error's `title` and,
if provided, a `<p>` tag with the error's `detail`.
# Example
```html
<h1>403 Forbidden</h1>
<p>You do not have the permissions to perform this operation.</p>
```
"""
res.status_code = exc.status_code
html = f"<h1>{exc.title}</h1>"
if exc.detail:
html += f"\n<p>{exc.detail}</p>"
res.html = html
async def error_to_json(req: Request, res: Response, exc: HTTPError):
"""Convert an exception to a JSON response.
The response contains the following items:
- `error`: the error's `title`
- `status`: the error's `status_code`
- `detail`: the error's `detail` (if provided)
# Example
```json
{
"error": "403 Forbidden",
"status": 403,
"detail": "You do not have the permissions to perform this operation."
}
```
"""
res.status_code = exc.status_code
res.json = exc.as_json()
async def error_to_text(req: Request, res: Response, exc: HTTPError):
"""Convert an exception to a plain text response.
The response contains a line with the error's `title` and, if provided,
a line for the error's `detail`.
# Example
```
403 Forbidden
You do not have the permissions to perform this operation.
```
"""
res.status_code = exc.status_code
text = exc.title
if exc.detail:
text += f"\n{exc.detail}"
res.text = text
| true |
8932bc5c84239dc73d910d00dcc6fa895075e5e5 | Python | SuyangChen/MPSE | /MPSE/mview/old/mds.py | UTF-8 | 17,556 | 2.734375 | 3 | [] | no_license | ### MDS implementation ###
import numbers, math, random
import matplotlib.pyplot as plt
import numpy as np
import misc, distances, gd
class MDS(object):
"""\
Class with methods to solve MDS problems.
"""
def __init__(self, D, dim=2, verbose=0, title='', labels=None):
"""\
Initializes MDS object.
Parameters:
D : (N by N) numpy array
Distance or dissimilarity matrix.
dim : int > 0
Embedding dimension.
verbose : int >= 0
Print status of methods in MDS object if verbose > 0.
title : string
Title assigned to MDS object.
labels : list or array
Labels attached to points corresponding to D.
"""
if verbose > 0:
print('+ mds.MDS('+title+'):')
self.verbose = verbose; self.title = title; self.labels = labels
assert isinstance(D,np.ndarray); shape=D.shape
assert len(shape)==2; assert shape[0]==shape[1]
distances.clean(D,verbose=verbose)
self.D = D; self.N = shape[0]
self.D_rms = np.sqrt(np.sum(D**2)/(self.N*(self.N-1)))
assert isinstance(dim,int); assert dim > 0
self.dim = dim
self.cost_function = lambda X: stress(self.D,X)
self.gradient_function = lambda X: stress_gradient(self.D,X)
self.partial_function = lambda X,n: stress_partial(self.D,X,n)
self.batch_function = lambda X_batch,indices: \
stress_batch(self.D,X_batch,indices)
def F(X,batches=None,batch_number=None,batch_size=None):
if batches is None and batch_number is None and batch_size is None:
return F_full(self.D,X)
elif batches is not None:
return F_batch(self.D,X,batches)
else:
if isinstance(batch_number,int):
batch_size = math.ceil(self.N/batch_number)
elif isinstance(batch_size,int):
batch_number = math.ceil(self.N/batch_size)
else:
sys.exit('wrong batch_size/batch_number in MDS.F()')
indices = list(range(self.N)); random.shuffle(indices)
batches = [list(indices[j*batch_size:(j+1)*batch_size]) for \
j in range(batch_number)]
return F_batch(self.D,X,batches)
self.F = F
self.H = {}
if verbose > 0:
print(f' number of points : {self.N}')
print(f' rms of D : {self.D_rms:0.2e}')
print(f' embedding dimension : {self.dim}')
if labels is None:
labels = list(range(self.N))
self.labels = labels
def initialize(self, X0=None, title='',**kwargs):
"""\
Set initial embedding.
Parameters:
X0 : numpy array or None
Initial embedding. If set to None, the initial embedding is produced
randomly using misc.initial_embedding().
"""
if self.verbose > 0:
print('- MDS.initialize('+title+'):')
if X0 is None:
X0 = misc.initial_embedding(self.N,dim=self.dim,
radius=self.D_rms,**kwargs)
if self.verbose > 0:
print(' method : random')
else:
assert isinstance(X0,np.ndarray)
assert X0.shape == (self.N,self.dim)
if self.verbose > 0:
print(' method : initialization given')
self.X = X0
self.update()
self.X0 = self.X.copy()
if self.verbose > 0:
print(f' initial stress : {self.cost:0.2e}[{self.ncost:0.2e}]')
def update(self,H=None):
self.cost = self.cost_function(self.X)
self.ncost = np.sqrt(self.cost/(self.N*(self.N-1)/2))/self.D_rms
if H is not None:
if bool(self.H) is True:
H['cost'] = np.concatenate((self.H['cost'],H['cost']))
H['steps'] = np.concatenate((self.H['steps'],H['steps']))
H['iterations'] = self.H['iterations']+H['iterations']
self.H = H
def forget(self):
self.X = self.X0; self.H = {}
self.update()
def optimize(self, agd=True, batch_size=None, batch_number=None, lr=0.01,
**kwargs):
"""\
Optimize stress function using gradient-based methods. If batch size or
number are given, optimization begins with stochastic gradient descent.
If agd is set to True, optimization ends with adaptive gradient descent.
"""
if self.verbose > 0:
print('- MDS.optimize():')
if batch_number is not None or batch_size is not None:
F = lambda X: self.F(X,batch_number=batch_number,
batch_size=batch_size)
if self.verbose > 0:
print(' method : stochastic gradient descent')
if batch_number is None:
print(f' batch size : {batch_size}')
else:
print(f' batch number : {batch_number}')
self.X, H = gd.mgd(self.X,F,lr=lr,**kwargs)
self.update(H=H)
if agd is True:
F = lambda X: self.F(X)
if self.verbose > 0:
print(' method : exact gradient & adaptive gradient descent')
self.X, H = gd.agd(self.X,F,**kwargs,**self.H)
self.update(H=H)
if self.verbose > 0:
print(f' final stress : {self.cost:0.2e}[{self.ncost:0.2e}]')
def figureX(self,title='mds embedding',labels=None,colors=None,edges=None,
plot=True, ax=None):
if labels is None:
labels = self.labels
if self.dim >= 2:
if ax is None:
fig, ax = plt.subplots()
else:
plot = False
if edges is not None:
if isinstance(edges,numbers.Number):
edges = edges-self.D
for i in range(self.N):
for j in range(i+1,self.N):
if edges[i,j] > 0:
ax.plot([self.X[i,0],self.X[j,0]],
[self.X[i,1],self.X[j,1]],'-',
linewidth=0.25,color='blue')#,l='b')
ax.scatter(self.X[:,0],self.X[:,1],s=25,c=colors)
ax.title.set_text(title+f' - stress = {self.cost:0.2e}[{self.ncost:0.2e}]')
if plot is True:
plt.draw()
plt.pause(0.1)
def figureH(self,title='Computation history for X',plot=True):
assert hasattr(self,'H')
fig = plt.figure()
plt.semilogy(self.H['cost'], label='cost')
plt.semilogy(self.H['steps'], label='step size')
plt.xlabel('iterations')
plt.legend()
plt.title(title)
if plot is True:
plt.draw()
plt.pause(0.2)
return fig
def figure(self,title='mds computation & embedding',labels=None,
plot=True):
#if labels is None:
# labels = self.labels
if self.dim >= 2:
fig,axs = plt.subplots(1,2)
plt.suptitle(title+f' - stress = {self.cost:0.2e}'+
f'[{self.ncost:0.2e}]')
axs[0].semilogy(self.H['cost'], label='cost')
axs[0].semilogy(self.H['steps'], label='step size')
axs[0].legend()
axs[1].scatter(self.X[:,0],self.X[:,1],c=labels)
if plot is True:
plt.draw()
plt.pause(0.1)
return fig
def graph(self,edge_bound=1.01,plot=True,ax=None,title=None):
import networkx as nx
G = nx.Graph()
positions = {}
for n in range(self.N):
label = self.labels[n]
G.add_node(label)
positions[label] = self.X[n]
for i in range(self.N):
for j in range(i+1,self.N):
if self.D[i,j] <= edge_bound:
G.add_edge(self.labels[i],self.labels[j])
if ax is None:
fig = plt.figure()
nx.draw_networkx(G, pos=positions)
nx.draw_networkx_edges(G, pos=positions)
plt.title(title)
plt.axis('off')
if plot is True:
plt.show(block=False)
return fig
else:
nx.draw_networkx(G, pos=positions, ax=ax)
nx.draw_networkx_edges(G, pos=positions, ax=ax)
###
def stress(D,X):
"""\
Returns MDS stress.
Parameters:
D : target distance matrix (n x n)
X : node positions, organized by row (n x p)
"""
N = len(D)
stress = 0
for i in range(N):
for j in range(i+1,N):
dij = np.linalg.norm(X[i]-X[j])
stress += (D[i,j]-dij)**2
return stress
def stress_gradient(D,Y):
"""\
Returns gradient matrix of MDS stress at given node positions
Parameters:
X : positions, organized by row (n x p)
D : target distance matrix (n x n)
"""
N = len(D)
A = np.zeros((N,N))
for i in range(N):
for j in range(i+1,N):
dij = np.linalg.norm(Y[i]-Y[j])
c = 2*(dij-D[i,j])/dij
A[i,i] += c
A[j,j] += c
A[i,j] += -c
A[j,i] += -c
R = A @ Y
return R
def F_full(D,X,batches=None):
"""\
Returs MDS stress and gradient for matrix D at embedding X.
Parameters:
D : numpy array
Distance/dissimilarity matrix.
X : numpy array
Positions/embedding.
Returns:
stress : float
MDS stress at X (or approximation given by batch).
grad : numpy array
MDS gradient at X (or approximation given by batch).
"""
N = len(D)
stress = 0; dX = np.zeros(X.shape)
for i in range(N):
for j in range(i+1,N):
Xij = X[i]-X[j]
dij = np.linalg.norm(Xij)
diffij = dij-D[i,j]
stress += diffij**2
dXij = 2*diffij/dij*Xij
dX[i] += dXij
dX[j] -= dXij
return stress, dX
def F_batch(D,X,batches):
"""\
Returs MDS approximate stress and gradient for matrix D at embedding X, by
dividing the data into batches and only including terms in each batch.
Parameters:
D : numpy array
Distance/dissimilarity matrix.
X : numpy array
Positions/embedding.
batches: list
List containing lists with indices in each batch.
Returns:
stress : float
MDS stress at X (or approximation given by batch).
grad : numpy array
MDS gradient at X (or approximation given by batch).
"""
N = len(D)
stress = 0; dX = np.zeros(X.shape)
for batch in batches:
batch_size = len(batch)
for i in range(batch_size):
for j in range(i+1,batch_size):
I = batch[i]; J = batch[j]
XdiffIJ = X[I]-X[J]
dIJ = np.linalg.norm(XdiffIJ)
diffIJ = dIJ-D[I,J]
stress += diffIJ**2*(N-1)/(batch_size-1)
dXIJ = 2*diffIJ/dIJ*XdiffIJ*(N-1)/(batch_size-1)
dX[I] += dXIJ
dX[J] -= dXIJ
return stress, dX
def stress_partial(D,X,i):
"""\
Returns partial gradient of MDS stress function with respect to node i,
evaluated at X.
"""
N,dim = X.shape
partial = np.zeros(dim)
indices = list(range(N)); indices.remove(i)
for j in indices:
dij = np.linalg.norm(X[i]-X[j])
rel_error = (dij-D[i,j])/dij
partial += 2*rel_error*(X[i]-X[j])
return partial
def stress_batch(D,Y_batch,indices):
"""\
Returns approximation of the block of the stress gradient given by the list
of indices. The partial derivatives are approximated using only the
distances and positions with indices in the index list.
"""
batch_size, dim = Y_batch.shape
batch_gradient = np.zeros((batch_size,dim))
for i in range(batch_size):
I = indices[i]
for j in range(i+1,batch_size):
J = indices[j]
diffIJ = Y_batch[i]-Y_batch[j]
dIJ = np.linalg.norm(diffIJ)
rel_error = (dIJ-D[I,J])/dIJ
partial_term = 2*rel_error*(diffIJ)
batch_gradient[i] += partial_term
batch_gradient[j] -= partial_term
return batch_gradient
### TESTS ###
def example_disk(N=100,dim=2,**kwargs):
print('\n***mds.example_disk()***')
Y = misc.disk(N,dim); labels = misc.labels(Y)
plt.figure()
plt.scatter(Y[:,0],Y[:,1],c=labels)
plt.title('original data')
plt.draw()
plt.pause(0.1)
D = distances.compute(Y)
title = 'basic disk example'
mds = MDS(D,dim=dim,verbose=1,title=title,labels=labels)
mds.initialize()
mds.figureX(title='initial embedding')
mds.optimize(**kwargs)
mds.figureX(title='final embedding',labels=labels,edges=.2)
mds.figure(title='final embedding',labels=labels)
plt.show()
def example_approx(N=30,dim=2,batch_number=5):
print('\n***mds.example_disk_batch()***\n')
Y = misc.disk(N,dim); labels = misc.labels(Y)
plt.figure()
plt.scatter(Y[:,0],Y[:,1],c=labels)
plt.title('Original data')
plt.draw()
plt.pause(0.1)
D = distances.compute(Y)
title = 'basic disk example using approximate gradient'
mds = MDS(D,dim=dim,verbose=1,title=title,labels=labels)
mds.initialize()
mds.approximate(verbose=2,max_iters=200,
lr=0.1,batch_number=batch_number,algorithm='gd')
mds.figureX(title='Final embedding')
mds.figureH()
plt.show()
def disk_compare(N=100,dim=2): ###
print('\n***mds.disk_compare()***')
X = misc.disk(N,2); labels = misc.labels(X)
plt.figure()
plt.scatter(X[:,0],X[:,1],c=labels)
plt.title('original data')
plt.draw()
plt.pause(0.1)
D = distances.compute(X)
mds = MDS(D,dim=dim,verbose=1,title='disk experiments',labels=labels)
mds.initialize()
mds.figureX(title='initial embedding')
title = 'full gradient & agd'
mds.optimize(algorithm='agd',verbose=2,label=title)
mds.figureX(title=title)
mds.figureH(title=title)
mds.forget()
title = 'approx gradient & gd'
mds.approximate(algorithm='gd',verbose=2,label=title)
mds.figureX(title=title)
mds.figureH(title=title)
mds.forget()
title = 'combine'
mds.approximate(algorithm='gd',verbose=2,label=title)
mds.optimize(verbose=2,label=title,max_iters=10)
mds.figureX(title=title)
mds.figureH(title=title)
plt.show()
def example_disk_noisy(N=100,dim=2):
print('\n***mds.example_disk_noisy()***\n')
noise_levels = [0.001,0.005,0.01,0.03,0.07,0.1,0.15,0.2,0.7,1.0]
stress = []
Y = misc.disk(N,dim)
D = distances.compute(Y)
for noise in noise_levels:
D_noisy = distances.add_noise(D,noise)
mds = MDS(D_noisy,dim,verbose=1,title=f'noise : {noise:0.2f}')
mds.initialize()
mds.optimize(algorithm='agd',max_iters=300,verbose=1)
stress.append(mds.ncost)
fig = plt.figure()
plt.loglog(noise_levels,stress,'.-')
plt.xlabel('noise level')
plt.ylabel('stress')
plt.title('Normalized MDS stress for various noise levels')
plt.show()
def example_disk_dimensions(N=100):
print('\n***mds.example_disk_dimensions()***\n')
dims = range(1,11)
stress = []
for dim in dims:
Y = misc.disk(N,dim)
D = distances.compute(Y)
mds = MDS(D,dim,verbose=1,label=f'dimension : {dim}')
mds.initialize_Y()
mds.optimize(algorithm='agd',max_iters=300)
stress.append(mds.ncost)
fig = plt.figure()
plt.semilogy(dims,stress)
plt.xlabel('dimension')
plt.ylabel('stress')
plt.title('Normalized MDS stress for various dimensions')
plt.show()
### EMBEDDABILITY TESTS ###
def embeddability_dims(ax=None):
print('\n**mds.embeddability_dims()')
N=50
ncost = []
dims = list(range(2,50,5))
XX = misc.disk(N,20)
#XX = misc.box(N,20)
for dim in dims:
X = XX[:,0:dim]
D = distances.compute(X)
mds = MDS(D,dim=2,verbose=1)
mds.initialize()
mds.optimize()
ncost.append(mds.ncost)
if ax is None:
fig, ax = plt.subplots(1)
plot = True
else:
plot = False
ax.plot(dims,ncost)
if plot is True:
plt.show()
def embeddability_noise(ax=None):
print('\n**mds.embeddability_noise()')
N=50
ncost = []
noise_list = [0]+10**np.arange(-4,0,0.5)
X = misc.disk(N,4)
DD = distances.compute(X)
for noise in noise_list:
D = DD*(1+np.random.randn(N,N)*noise)
mds = MDS(D,dim=4,verbose=1)
mds.initialize()
mds.optimize()
ncost.append(mds.ncost)
if ax is None:
fig, ax = plt.subplots(1)
plot = True
else:
plot = False
ax.semilogx(noise_list,ncost)
if plot is True:
plt.show()
if __name__=='__main__':
example_disk()
#example_disk(agd=False,batch_number=10,max_iters=200)
#example_disk(batch_number=10)
#example_approx(N=100)
#disk_compare(N=100)
#example_disk_noisy(50)
#example_disk_dimensions(50)
#embeddability_dims()
#embeddability_noise()
| true |