blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8576d9174b9a439462ff5fa98b4e91de5121c918
|
Python
|
keezysilencer/XSS_Scan-
|
/scan.py
|
UTF-8
| 876
| 3.234375
| 3
|
[] |
no_license
|
import requests
import re
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from urllib.parse import urljoin
class Scanner:
def __init__(self, url):
self.target_url = url
self.target_links = []
def extract_links_from(self, url):
response = requests.get(url)
return re.findall('(?:href=")(.*?)"', response.content.decode('ISO-8859-1'))
def crawl(self, url=None):
if url == None:
url = self.target_url
href_link = self.extract_links_from(url)
for link in href_link:
link = urljoin(url, link)
if self.target_url in link and link not in self.target_links:
self.target_links.append(link)
print(link)
self.crawl(link)
target_url = "http://google.com/"
vuln_scanner = Scanner(target_url)
vuln_scanner.crawl()
| true
|
4cf5b00667bfb1909ed281cca86f419973981d00
|
Python
|
osmaoguzhan/akinci
|
/akinci.py
|
UTF-8
| 2,356
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
akinci - Automatic Network and Port Scanner
Copyright (C) 2019 Oguzhan Osma
"""
from model import networkScanner
import argparse
from controller import dbOp as db
__author__ = "Oguzhan Osma"
__version__ = "v1.0"
def argParse():
parser = argparse.ArgumentParser()
parser.add_argument('-p', "--port", required=False,
help="Specific Port Ex: <-p 80>\nPort Range Ex: <-p 22-45>\nFor All Ports Ex <-p -1>\n")
parser.add_argument('-d', "--db", required=False,
help="1: MAC\n 2:IP Address\n 3: Host IP\n4: All")
args = parser.parse_args()
if args.port != None:
networkScanner.networkScan(args.port)
elif args.db != None:
if args.db == "1":
mac = input("Please type the MAC you'd like to search on DB:")
if len(mac) > 0:
db.selectstar(str(mac),"mac")
else:
print("No written MAC!!")
elif args.db == "2":
ip = input("Please type the IP Address you'd like to search on DB:")
if len(ip) > 0:
db.selectstar(str(ip),"ip")
else:
print("Pls type an IP!!")
elif args.db == "3":
hostIP = input("Please type the Host IP you'd like to search on DB:")
if len(hostIP) > 0:
db.selectstar(str(hostIP), "hostip")
else:
print("No written Host IP!!")
elif args.db == "4":
db.getalldata()
else:
print("Wrong option!!")
else:
print("No arguments!!")
def createBanner():
print('\033[91m'+"=================================================")
print('\033[91m'+" ______ _ __ _____ ______ ______ _____\n"
'\033[91m'+"| | | | | | / / | | | | \ \ | | | | \n"
'\033[91m'+"| |__| | | |-< < | | | | | | | | | | \n"
'\033[91m'+"|_| |_| |_| \_\ _|_|_ |_| |_| |_|____ _|_|_\n")
print('\033[91m'+"================================================="+'\033[0m')
print("akinci - Automatic Network and Port Scanner\n"
"Copyright (C) 2019 Oguzhan Osma")
print('\033[91m' + "=================================================" + '\033[0m')
if __name__ == '__main__':
createBanner()
argParse()
| true
|
e550d418c8c256b29dee9630cacf450f32fd183b
|
Python
|
heikalb/CMPT825
|
/project/app/model_selector.py
|
UTF-8
| 1,522
| 2.671875
| 3
|
[] |
no_license
|
import pickle
import nltk
import spacy
from nltk.corpus import stopwords
from nltk import RegexpTokenizer
print('Loading language model...')
nlp = spacy.load('en_core_web_sm')
print('Loading stopwords...')
nltk.download('stopwords')
corpus_cnn = pickle.load( open( "../text_similarity/corpus_cnn.pkl", "rb" ) )
corpus_daily = pickle.load( open( "../text_similarity/corpus_dailymail.pkl", "rb" ) )
corpus_sports = pickle.load( open( "../text_similarity/corpus_sports.pkl", "rb" ) )
corpus_politics = pickle.load( open( "../text_similarity/corpus_politics.pkl", "rb" ) )
corpus_science = pickle.load( open( "../text_similarity/corpus_science.pkl", "rb" ) )
tokenizer = RegexpTokenizer(r'\w+')
stopword_set = set(stopwords.words('english'))
def nlp_clean(data):
new_data = []
new_str = data.lower()
dlist = tokenizer.tokenize(new_str)
dlist = list(set(dlist).difference(stopword_set))
new_data.append(dlist)
return dlist
def get_similarity(text1, text2):
return nlp(' '.join(nlp_clean(text1))).similarity(nlp(' '.join(nlp_clean(text2))))
def get_best_model(text):
best_similarity=0
best_model=''
for model in models_to_evaluate.keys():
sim = get_similarity(text,models_to_evaluate[model])
if sim > best_similarity:
best_similarity = sim
best_model = model
return best_model, best_similarity
models_to_evaluate = {
'CNN_News':corpus_cnn,
'DailyMail':corpus_daily,
'Sports':corpus_sports,
'Politics':corpus_politics,
'Science':corpus_science,
}
| true
|
682e23c4148aae83edd342a7b243cf809efe721e
|
Python
|
ncss-2015-group-4/trivia
|
/difficulty_old.py
|
UTF-8
| 2,077
| 3.46875
| 3
|
[] |
no_license
|
EASY = 1
MEDIUM = 2
HARD = 3
BEGINNER = 1
INTERMEDIATE = 2
EXPERT = 3
def difficulty(people_answers):
'''This is finding the difficulty level of a question.
people_answers is a list of the skill level of the person and their answer to the question.'''
sum = 0
for person, correct in people_answers:
if correct:
if person == BEGINNER:
sum += EASY
elif person == INTERMEDIATE:
sum += MEDIUM
elif person == EXPERT:
sum += HARD
else:
if person == BEGINNER:
sum += MEDIUM
elif person == INTERMEDIATE:
sum += HARD
elif person == EXPERT:
sum += HARD
average = sum/len(people_answers)
if 0 <= average < 2:
return "Easy"
elif 2 <= average < 3:
return "Medium"
elif 3 < average:
return "Hard"
def skill_level(questions_answers):
''' This is findind the skill level of a user.
questions_answers is a list of the questions the user has answered and if they got it right or wrong. '''
total = 0
for question, correct in questions_answers:
if correct:
if question == EASY:
total += 1
elif question == MEDIUM:
total += 2
elif question == HARD:
total += 3
final = total/len(questions_answers)
if final < 1.25:
return 'Beginner'
elif 1.25 <= final < 2.25:
return 'Intermediate'
elif 2.25 <= final:
return 'Expert'
if __name__ == '__main__':
print(difficulty([(INTERMEDIATE, True)]))
print(difficulty([(INTERMEDIATE, True), (BEGINNER, True)]))
print(difficulty([(BEGINNER, True), (INTERMEDIATE, True), (EXPERT, True)]))
print(skill_level([(EASY, True)]))
print(skill_level([(EASY, True), (MEDIUM, True), (HARD, False)]))
print(skill_level([(EASY, True), (EASY, True), (EASY, True), (EASY, True)]))
print(skill_level([(HARD, True), (EASY, False), (EASY, True), (MEDIUM, False), (HARD, False), (MEDIUM, True), (MEDIUM, True)]))
print(skill_level([(EASY, False), (MEDIUM, True), (MEDIUM, True), (MEDIUM, True)]))
print(skill_level([(HARD, True), (HARD, True), (HARD, True)]))
print(skill_level([(HARD, True), (HARD, True), (HARD, True), (EASY, False), (EASY, False)]))
| true
|
133865aa43984dcaf9b447482f85ecb6e24b1442
|
Python
|
Ella2604/Python
|
/hash_maps.py
|
UTF-8
| 899
| 3.375
| 3
|
[] |
no_license
|
personal_info = {
"nume": "Marius",
"varsta": "23",
"nota": "10"
}
def print_map():
print("Map ")
print(personal_info)
print(" ")
def access_values():
print("#4 accessing values")
print(personal_info.values())
print(" ")
def print_value():
print("#2 print a value")
print("Age: " + personal_info["varsta"])
def access_key():
print("#3 accessing keys")
print(personal_info.keys())
def add_values():
print("#5 adding values")
print("normal entry")
print(personal_info)
print("adding value ")
personal_info["tara"]= "Ro"
print(personal_info.values())
def remove_value():
print("# remove values")
print(personal_info)
personal_info.pop("varsta")
print(personal_info)
if __name__ == "__main__":
print_map()
print_value()
access_key()
access_values()
add_values()
remove_value()
| true
|
2a96b60626aac235911b9f2be4e99cd71521422f
|
Python
|
daniel-reich/turbo-robot
|
/7AQgJookgCdbom2Zd_8.py
|
UTF-8
| 1,283
| 4.40625
| 4
|
[] |
no_license
|
"""
Create a function that takes a string of words and moves the first letter of
each word to the end of it, then adds 'ay' to the end of the word. This is
called "Pig Latin" and it gets more complicated than the rules in this
particular challenge. I've intentionally kept things simple, otherwise this
would turn into an extremely tedious challenge.
* Move the first letter of each word to the end of the word.
* Add "ay" to the end of the word.
* Words starting with a vowel (A, E, I, O, U) simply have "WAY" appended to the end.
### Examples
pig_latin("Cats are great pets.")
➞ "Atscay areway reatgay etspay."
pig_latin("Tom got a small piece of pie.")
➞ "Omtay otgay away mallsay iecepay ofway iepay."
pig_latin("He told us a very exciting tale.")
➞ "Ehay oldtay usway away eryvay excitingway aletay."
### Notes
Be sure to preserve proper capitalization and punctuation.
"""
def pig_latin(txt):
tlst = txt.split()
for i,w in enumerate(tlst):
up = w[0].isupper()
w = w.lower()
punc = w[-1] if not w[-1].isalpha() else ''
w = w[:-1] if punc else w
if w[0] in 'aeiou':
w += 'way'
else:
w = w[1:]+w[0]+'ay'
tlst[i] = (w+punc).capitalize() if up else w+punc
return ' '.join(tlst)
| true
|
50ef003a40d02556df977fd21fa8075bc6f5cfce
|
Python
|
Vijay-Yosi/biostack
|
/apps/scout/tests/commands/update/test_update_individual_cmd.py
|
UTF-8
| 2,440
| 2.578125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""Tests for update individual command"""
from click.testing import CliRunner
from scout.commands.update.individual import individual as ind_cmd
# from scout.server.extensions import store
def test_update_individual_no_args():
"""Tests the CLI that updates a individual"""
# GIVEN a CLI object
runner = CliRunner()
# WHEN updating a individual without any args
result = runner.invoke(ind_cmd)
# THEN assert it fails since its missing mandatory arguments
assert result.exit_code == 2
def test_update_individual(empty_mock_app):
"""Tests the CLI that updates a individual"""
# GIVEN a CLI object
runner = empty_mock_app.test_cli_runner()
# WHEN updating a individual without case and ind
result = runner.invoke(ind_cmd, ["--case-id", "acase", "--ind-id", "anind"])
# THEN assert it exits without problems
assert result.exit_code == 0
def test_update_individual_existing(empty_mock_app, real_adapter):
"""Tests the CLI that updates a individual"""
# GIVEN a CLI object
runner = empty_mock_app.test_cli_runner()
# GIVEN a database with a case_obj
case_id = "acase"
ind_id = "anind"
case_obj = {"case_id": case_id, "individuals": [{"individual_id": ind_id}]}
real_adapter.case_collection.insert_one(case_obj)
# WHEN updating a individual without case and ind
result = runner.invoke(ind_cmd, ["--case-id", case_id, "--ind-id", ind_id])
# THEN assert it exits without problems since nothing happened
assert result.exit_code == 0
def test_update_alignment_path(mock_app, real_populated_database, bam_path):
"""Tests the CLI that updates a individual"""
# GIVEN a CLI object
runner = mock_app.test_cli_runner()
# GIVEN a database with a case_obj
existing_case = real_populated_database.case_collection.find_one()
case_id = existing_case["_id"]
ind_info = existing_case["individuals"][0]
ind_id = ind_info["individual_id"]
# WHEN updating a individual without case and ind
result = runner.invoke(
ind_cmd, ["--case-id", case_id, "--ind-id", ind_id, "--alignment-path", str(bam_path)],
)
# THEN assert it exits without problems
assert result.exit_code == 0
# THEN assert that the new alignment path was added
fetched_case = real_populated_database.case_collection.find_one()
assert fetched_case["individuals"][0]["bam_file"] == str(bam_path.resolve())
| true
|
ab009bf528023f9e43ae45d9f033de4c66884143
|
Python
|
henrymorgen/Just-Code
|
/src/1227.airplane-seat-assignment-probability/airplane-seat-assignment-probability.py
|
UTF-8
| 106
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
class Solution:
def nthPersonGetsNthSeat(self, n: int) -> float:
return 1.0 if n == 1 else 0.5
| true
|
b8e4999fc3872d0a46083bf4ed9c4f3727931aa6
|
Python
|
axaymountry326/Pacman_portal_CPSC386
|
/powerpills.py
|
UTF-8
| 559
| 2.953125
| 3
|
[] |
no_license
|
# Aaron Xaymountry
# CPSC 386-01
# MW 5:30-6:45pm
# Pacman game with portals
import pygame
from pygame.sprite import Sprite
class Powerpills(Sprite):
def __init__(self, screen):
super(Powerpills, self).__init__()
self.screen = screen
self.height = 7
self.width = 7
img = pygame.image.load('images/powerpill.png')
img = pygame.transform.scale(img, (self.height, self.width))
self.rect = img.get_rect()
self.image = img
def blitpowerpills(self):
self.screen.blit(self.image, self.rect)
| true
|
8a941be75e308ef8504646d0a5a81d4e4de92976
|
Python
|
makeitlab/software_tools
|
/PythonScripts/misc/tele.py
|
WINDOWS-1251
| 553
| 2.578125
| 3
|
[] |
no_license
|
import telebot
bot = telebot.TeleBot('xyz') # api key
@bot.message_handler(commands=['start'])
def start_message(message):
bot.send_message(message.chat.id, ', /start')
@bot.message_handler(content_types=['text'])
def send_text(message):
if message.text.lower() == '':
bot.send_message(message.chat.id, ', ')
elif message.text.lower() == '':
bot.send_message(message.chat.id, ', ')
bot.polling()
| true
|
0f7a74167136a099eef0b0646910f17cbd7a66bb
|
Python
|
cosminvlaicu/PythonSyntax
|
/sets/basic.py
|
UTF-8
| 303
| 4
| 4
|
[] |
no_license
|
s1 = {"a", "b", "c"}
print(s1)
print(len(s1))
# set is implemented as a hashtable => O(1) find, insert, delete
if "a" in s1:
print("a is in s1")
# sets can contain different data types
s2 = {"a", 1, True, 40}
print(s2)
# list to set
lst = [1, 1, 2, 3, 4, 5]
s3 = set(lst)
print(lst)
print(s3)
| true
|
639fb90f08ddcacbce0fe99b9a44f1574d110b65
|
Python
|
AlzubaidiTurki/inappropiate-kid-videos-detector
|
/metrics.py
|
UTF-8
| 5,462
| 2.921875
| 3
|
[] |
no_license
|
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
'''
This function is used for computing ROC and plot it with AUC (can do more than this too, but we used it for ROC only),
but it requires dataset for TP and TN. so you probably cannot use this function.
from pyAudioAnalysis import audioTrainTest as aT
import sklearn.metrics
import plotly.graph_objs as go
def plotROC (): # if code has bugs, try to provide full path for every folder.
cwd = os.path.abspath(os.getcwd())
_, _, _, _, _, fpr_scream, tpr_scream = aT.evaluate_model_for_folders([cwd+"\\testing\\screams", cwd+"\\testing\\non_scream"],cwd+"\\models\\svm_screams_nonscream", "svm_rbf","screams")
_, _, _, _, _, fpr_explosion, tpr_explosion = aT.evaluate_model_for_folders([cwd+ "\\testing\\explosion", cwd+"testing\\non_explosion"],cwd+"\\models\\svm_explosion_nonexplosion", "svm_rbf","explosion")
_, _, _, _, _, fpr_violent, tpr_violent = aT.evaluate_model_for_folders([cwd+"\\testing\\violent", cwd+"\\testing\\nonviolent"],cwd+"\\models\\svm_violent_nonviolent", "svm_rbf","violent")
figs = go.Figure()
figs.add_trace(go.Scatter(x=fpr_scream, y=tpr_scream, showlegend=True, name = "Screams (AUC = {:.2f})".format(sklearn.metrics.auc(fpr_scream, tpr_scream))))
figs.add_trace(go.Scatter(x=fpr_explosion, y=tpr_explosion, showlegend=True , name = "Explosions (AUC = {:.2f})".format(sklearn.metrics.auc(fpr_explosion, tpr_explosion))))
figs.add_trace(go.Scatter(x=fpr_violent, y=tpr_violent, showlegend=True, name = "Violent (AUC = {:.2f})".format(sklearn.metrics.auc(fpr_violent, tpr_violent))))
figs.update_xaxes(title_text="false positive rate")
figs.update_yaxes(title_text="true positive rate")
figs.update_layout(title = 'ROC Curve for screams, explotions, and Violent')
figs.show()
'''
def plot_figure(cm, name):
print(cm)
df_cm = pd.DataFrame(cm, range(2), range(2))
sn.set(font_scale=1.4) # for label size
df_cm.columns = ['Actual Yes', 'Actual No']
df_cm.index = ['Pred Yes', 'Pred No']
sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, fmt='g') # font size
plt.title(name)
plt.show()
def plot_bar(labels, legend_labels, data):
x = np.arange(len(labels)) # the label locations
gap = 0.5
bw = (1 - gap) / 4
fig, ax = plt.subplots()
colors = ["magenta", 'green', "purple", "orange"]
for i in range(len(data)):
ax.bar(x + bw * i, data[i], width=bw, color=colors[i])
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.set_ylabel('Percentages')
ax.legend(legend_labels)
plt.show()
def print_cal(cm):
print("Accuracy: %.4f" % calculate_accuracy(cm))
print("Precision: %.4f" % calculate_precision(cm))
print("Recall: %.4f" % calculate_recall(cm))
print("F-1 Score: %.4f" % calculate_f1(cm))
def calculate_f1(cm):
precision = calculate_precision(cm)
recall = calculate_recall(cm)
return (2 * precision * recall) / (precision + recall)
def calculate_recall(cm):
return (cm[0][0]) / (cm[0][0] + cm[1][0])
def calculate_accuracy(cm):
sum = 0
for m in cm:
sum += m[0] + m[1]
return (cm[0][0] + cm[1][1]) / sum
def calculate_precision(cm):
return (cm[0][0]) / (cm[0][0] + cm[0][1])
if __name__ == "__main__":
total_cm = []
model_names = ["Cross", "Buddha", "Star", "Pyramid", "Xmas", "Jack", "Mason", "Knife", "Pistol", "Hammer",
"Scissor"]
model_names = ["screams/non_screams","explosion/non_explosion"]#,"violent/non_violent"] for audio
models_cm = [
[[9, 4],
[3, 11]], # Cross
[[26, 3],
[2, 12]], # Buddha
[[22, 2],
[2, 13]], # Star
[[33, 0],
[9, 15]], # Pyramid
[[21, 1],
[10, 14]], # Xmas
[[28, 2],
[3, 13]], # Jack
[[12, 0],
[8, 15]], # Mason
[[15, 1],
[13, 14]], # Knife
[[20, 1],
[3, 14]], # Pistol
[[25, 4],
[5, 11]], # Hammer
[[25, 1],
[5, 14]] # Scissor
]
models_cm = [ [[14,5],[44,101]], [[6,11],[9,138]]]#, [[25,11],[69,59]]] for audio.
TP = 0
TN = 0
FP = 0
FN = 0
for i in range(len(models_cm)):
TP += models_cm[i][0][0]
TN += models_cm[i][1][1]
FP += models_cm[i][0][1]
FN += models_cm[i][1][0]
total_cm = [[TP, FP], [FN, TN]]
# Uncomment this to plot each class separated.
for i in range(len(models_cm)):
plot_figure(models_cm[i], model_names[i])
plot_figure(total_cm, "All models")
for i in range(len(models_cm)):
print("===============" + model_names[i] + "===============")
print_cal(models_cm[i])
print("===============" + "All Classes" + "===============")
print_cal(total_cm)
data = [
[], # Accuracy
[], # Precision
[], # Recall
[] # F1-Score
]
for cm in models_cm:
acc = calculate_accuracy(cm)
precision = calculate_precision(cm)
recall = calculate_recall(cm)
f1 = calculate_f1(cm)
data[0].append(int(acc*100))
data[1].append(int(precision*100))
data[2].append(int(recall*100))
data[3].append(int(f1*100))
print(f"{cm} acc, pre, rec, f1: \n {data}")
#plot_bar(model_names, ["Accuracy", "Precision", "Recall", "F1-Score"], data)
| true
|
69944bd66d99ffa8ceda2c0fec1e63ea1b518cd8
|
Python
|
matty-allison/Databases--EMOP
|
/log_in.py
|
UTF-8
| 3,011
| 2.78125
| 3
|
[] |
no_license
|
# Window for logging in a current student
from tkinter import *
import mysql.connector
from tkinter import messagebox
import datetime
box = Tk()
box.title("Log in!")
box.config(bg="green")
box.geometry("450x300")
class log:
def __init__(self, master):
self.name = Label(master, text="Enter your name and surname: ")
self.name.place(x=130, y=20)
self.name.config(bg="green")
self.name_entry = Entry(master)
self.name_entry.place(x=145, y=50)
self.name_entry.config(bg="#9ccb3b")
self.id_number = Label(master, text="Enter your ID number: ")
self.id_number.place(x=155, y=100)
self.id_number.config(bg="green")
self.id_entry = Entry(master)
self.id_entry.place(x=145, y=150)
self.id_entry.config(bg="#9ccb3b")
self.logbtn = Button(master, text="log in", command=self.logIn)
self.logbtn.place(x=190, y=200)
self.logbtn.config(bg="#9ccb3b", borderwidth="10")
self.backbtn = Button(master, text="Back", command=self.Back)
self.backbtn.place(x=10, y=250)
self.backbtn.config(bg="#9ccb3b", borderwidth="5")
self.logout = Button(master, text="Log out", command=self.LOGout)
self.logout.place(x=350, y=250)
self.logout.config(bg="green", borderwidth="5", state="disabled")
def Back(self):
box.destroy()
import First
def LOGout(self):
box.destroy()
import logoutstudents
# function used to search for the persons log in info, this function also keeps track of date and time of log in
def logIn(self):
try:
if self.name_entry.get() == "":
messagebox.showerror('ERROR', "Invalid, Please enter the required information.")
elif self.id_entry.get() == "":
messagebox.showerror('ERROR', "Invalid, Please enter the required information.")
elif len(self.id_entry.get()) != 13:
messagebox.showerror('ERROR', "Invalid, Please enter a valid ID number.")
else:
now = datetime.datetime.now()
signin_time = now.strftime("%y-%m-%d %H:%M:%S")
db = mysql.connector.connect(
host='127.0.0.1',
user='lifechoices',
password='@Lifechoices1234',
auth_plugin='mysql_native_password',
database='sign_up_and_log_in'
)
my_cursor = db.cursor()
code = "UPDATE mytable_students SET sign_in=%s WHERE id_number=%s"
values = (signin_time, self.id_entry.get())
my_cursor.execute(code, values)
db.commit()
self.logout.config(state="normal")
messagebox.showinfo('WELCOME', "Welcome back student")
except ValueError:
if self.id_entry.get() != int:
messagebox.showerror('ERROR', "Invalid ID")
x = log(box)
box.mainloop()
| true
|
fd07f429d827a326a181c9679f4b029cc1419873
|
Python
|
joepalermo/tensorflow-ffn
|
/mnist_loader.py
|
UTF-8
| 1,727
| 3.4375
| 3
|
[] |
no_license
|
"""
Original by Michael Nielsen.
Modified by Joseph Palermo.
A library to load the MNIST image data.
"""
import cPickle
import gzip
import numpy as np
from DataSet import DataSet
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
"""
f = gzip.open('data/mnist_data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Convert MNIST data into a format convenient for consumption by a machine
learning algorithm."""
# load data as tuples of features and labels
tr_d, va_d, te_d = load_data()
# extract features and labels, and convert labels to one-hot format
tr_features, tr_labels = tr_d[0], convert_labels_to_one_hot(tr_d[1])
va_features, va_labels = va_d[0], convert_labels_to_one_hot(va_d[1])
te_features, te_labels = te_d[0], convert_labels_to_one_hot(te_d[1])
# wrap data in DataSet
training_data = DataSet(tr_features, tr_labels)
validation_data = DataSet(va_features, va_labels)
test_data = DataSet(te_features, te_labels)
return training_data, validation_data, test_data
# return a one-hot 1-d np array corresponding to the integer argument
def vectorized_result(j):
e = np.zeros(10)
e[j] = 1.0
return e
# convert a 1-d numpy array into a 2-d array with one-hot entries
def convert_labels_to_one_hot(labels, max_label=10):
one_hot_labels = np.zeros((labels.shape[0], max_label))
for i, label in enumerate(labels):
one_hot_labels[i] = vectorized_result(label)
return one_hot_labels
| true
|
465ca56f4534cc6fc610f2585494ff6b450616d2
|
Python
|
ivaltukhova/medanalitica1
|
/mednet.py
|
UTF-8
| 981
| 2.65625
| 3
|
[] |
no_license
|
from neo4j import GraphDatabase
import logging
from neo4j.exceptions import ServiceUnavailable
class Mednet:
def __init__(self, uri, user, password):
self.driver = GraphDatabase.driver(uri, auth=(user, password))
def close(self):
self.driver.close()
def find_all(self):
with self.driver.session() as session:
result = session.read_transaction(self._find_all)
return result
@staticmethod
def _find_all_mo(tx):
query = (
'''MATCH (n:MO)
RETURN n.`Наименование` AS name
ORDER BY n.`Наименование`'''
)
result = tx.run(query)
try:
return {row["Наименование"]: row["Наименование"].title() for row in result}
except ServiceUnavailable as exception:
logging.error("{query} raised an error: \n {exception}".format(query=query, exception=exception))
raise
| true
|
608099e27582924a4e28162120b68632ed7ed3be
|
Python
|
iltison/homework
|
/TSP/tsp.py
|
UTF-8
| 13,917
| 3.703125
| 4
|
[] |
no_license
|
"""
Для использования алгоритма с произволными значениями расстояний.
Требуется передать в класс tsp переменную matrix (матрицу расстояний).
По диагонали могут находиться любые значения, алгоритм изменит на бесконечность .
Пример:
matrix = [[999,0,1],
[0,999,1],
[1,1,999]]
test = tsp()
test.start(matrix)
Для проверки алгоритма присутсвует тестируемая оболочка.
За основу взят материал из источника: https://habr.com/ru/post/246437/
Для проверки маршрута использовал онлайн калькулятор: http://habr.x1site.ru
"""
import copy
class tsp():
def __init__(self):
"""
Функция для инициализации переменных.
"""
self.matrix = []
self.full_way = []
self.way_price = 0
def change_diagonal(self,matrix):
"""
Функция для замены главной диагонали на значения бесконечности.
Args:
matrix - матрица.
Returns:
matrix - матрица c измененной диагональю.
"""
n=len(matrix)
m=len(matrix[0])
return [[float('inf') if i==j else matrix[i][j] for j in range(m)] for i in range(n)]
def find_min(self,list_number):
"""
Функция для нахождения минимума.
Args:
list_number - массив значений.
Returns:
min_el - минимальный элемент.
"""
min_el = float('inf')
for i in range(len(list_number)):
if list_number[i] == '_': continue
if list_number[i] < min_el:
min_el = list_number[i]
if min_el == float('inf'): min_el = 0
return min_el
def reduction(self,matrix_list,element):
"""
Функция для вычитания элементов из массива.
Args:
matrix_list - массив значений матрицы.
element - элемент, который надо вычесть.
Returns:
matrix_list - массив значений с вычтенным значением.
"""
for i in range(len(matrix_list)):
if matrix_list[i] == '_': continue
if matrix_list[i] == float('inf'): continue
matrix_list[i] -= element
return matrix_list
def find_low_price_way(self,matrix):
"""
Функция для вычисления нижней оценки стоимости маршрута.
Args:
matrix - матрица.
Returns:
subtract_Sum - сумма констант.
matrix - матрица.
"""
subtract_Sum = 0
# Поиск констант строк
for row in range(len(matrix)):
min_element = self.find_min(matrix[row])
subtract_Sum += min_element
matrix[row] = self.reduction(matrix[row],min_element)
matrix_2 = []
# Поиск констант столбцов
for i in range(len(matrix)):
column = [x[i] for x in matrix]
min_element = self.find_min(column)
subtract_Sum += min_element
matrix_2.append(self.reduction(column,min_element))
# транспонирование матрицы
matrix = [list(i) for i in zip(*matrix_2)]
return subtract_Sum, matrix
def find_sum_zero_ways(self,matrix):
"""
Функция для вычисления штрафа за неиспользование для каждого нулевого элемента.
Args:
matrix - матрица.
Returns:
ways - пути и сумма штрафа.
"""
# Нахождение нулевых элементов
zero_list = []
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
zero_list.append((i,j))
ways = {}
# Нахождение штрафа нулевых элементов
for zero in zero_list:
orig = matrix[zero[0]][zero[1]]
matrix[zero[0]][zero[1]] = float('inf')
row = matrix[zero[0]]
column = [row[zero[1]] for row in matrix]
ways[zero] = self.find_min(row) + self.find_min(column)
matrix[zero[0]][zero[1]] = orig
return ways
def find_max_way(self,matrix):
"""
Функция для нахождения максимального штрафа.
Args:
matrix - матрица.
Returns:
max_way - максимальный штраф.
"""
zero_list = self.find_sum_zero_ways(matrix)
if len(zero_list) == 0:
max_way = None
else:
max_way = max(zero_list, key=zero_list.get)
return max_way
def delete_tree(self,matrix,max_way):
"""
Функция для удаления импользованного маршрута (строки и столбца).
Args:
matrix - матрица.
max_way - путь с максимальным штрафом.
Returns:
matrix - матрица.
"""
for j in range(len(matrix[max_way[0]])):
matrix[max_way[0]][j] = '_'
for j in range(len(self.matrix[max_way[1]])):
matrix[j][max_way[1]] = '_'
return matrix
def Reverse(self,tuples):
"""
Функция для отражения кортежа.
Args:
tuples - кортеж.
Returns:
new_tup - перевернутый кортеж.
"""
new_tup = tuples[::-1]
return new_tup
def change_zero(self,matrix,way):
"""
Функция для замены посещенных городов на бесконечность.
Args:
matrix - матрица.
way - путь.
Returns:
matrix - матрица.
"""
if matrix[way[0]][way[1]] == '_':
return matrix
else:
matrix[way[0]][way[1]] = float('inf')
return matrix
def find_min_tree(self,matrix):
"""
Функция выбора минимальной ветви графа.
Args:
matrix - матрица.
Returns:
idx - индекс ответвления графа.
"""
min_el = float('inf')
idx = None
for i in matrix:
if i[2] == True:
if i[0] < min_el:
min_el = i[0]
idx = matrix.index(i)
return idx
def find_ways(self,matrix,idx = -2):
"""
Функция прохождения конечного графа для нахождения путей.
Args:
matrix - матрица.
idx - индекс ответвления графа. Если не переадется, то равен -2, что говорит об последнем элементе.
Returns:
idx - индекс ответвления графа.
"""
way = matrix[idx][4]
if (way == None):
pass # выход из рекурсии
else:
if matrix[idx][5]:
self.full_way.append(way)
idx = matrix[idx][3]
self.find_ways(matrix,idx)
def sort_way(self,ways):
"""
Функция сортировки полного пути.
Args:
ways - список путей.
Returns:
sorted_ways - отсортированный список путей.
"""
sorted_ways = []
second_element = ways[0][1]
for i in range(len(ways)):
for j in ways:
if second_element == j[0]:
sorted_ways.append(j)
second_element = j[1]
break
return sorted_ways
def check_inf(self,matrix):
"""
Функция проверки на бесконечность, чтобы путь не зациклился.
Если в матрице в строке отсутсвует бесконечный путь, то в определенный стобец
записывается конечный путь.
По главной ОТСОРТИРОВАННОЙ диагонали должны быть бесконечности. Порядок стобцов неважен.
Args:
matrix - матрица.
Returns:
matrix - матрица с добавленной бесконечностью.
"""
not_inf = []
for i in range(len(matrix)):
count = 0
count_null = 0
for j in range(len(matrix[0])):
if matrix[i][j] == '_': count_null +=1
if matrix[i][j] == float('inf'):
count += 1
if count == 0 and count_null != len(matrix):
not_inf.append(i)
for i in range(len(matrix)):
count = 0
count_null = 0
for j in range(len(matrix[0])):
if matrix[j][i] == '_': count_null +=1
if matrix[j][i] == float('inf'):
count += 1
if count == 0 and count_null != len(matrix):
not_inf.append(i)
if len(not_inf) != 0:
matrix = self.change_zero(matrix, tuple(not_inf))
return matrix
def find_last_way(self,matrix):
"""
Функция для поиска последнего пути и добавления в общий список.
Args:
matrix - матрица.
"""
for i in range(len(matrix)):
for j in range(len(matrix)):
if matrix[i][j] == float('inf'):
self.full_way.append(tuple([i,j]))
break
def start(self,matrix):
"""
Функция для нахождения ветвей графа и записи в список.
Args:
matrix - матрица.
"""
# главная диагональ заменяется на бесконечности.
self.matrix = self.change_diagonal(matrix)
# Список хранения овтетвлений графа.
all_matrix = []
# Копия матрицы для нахождения первого пути и нижней границы.
matrix_find_low = copy.deepcopy(self.matrix)
# Нахождение нижней границы.
coef,matrix_find_low = self.find_low_price_way(matrix_find_low)
# Запись в список.
all_matrix.append([coef,matrix_find_low, True,0,None,False])
# Цмкл для создания графа и прерывания, когда не будет путей.
while True:
# индекс минимального ответвления.
idx = self.find_min_tree(all_matrix)
# коэффицент нижней границы.
coef_bot = all_matrix[idx][0]
# копии матриц для поиска пути содержащего путь и не содержащего
matrix_copy1 = copy.deepcopy(all_matrix[idx][1])
matrix_copy2 = copy.deepcopy(all_matrix[idx][1])
# путь с максимальным штрафом
max_way = self.find_max_way(all_matrix[idx][1])
if max_way == None:
break
# поиск ответвления содержпщего путь.
matrix_copy1 = self.change_zero(matrix_copy1, max_way)
coef1,matrix_copy1 = self.find_low_price_way(matrix_copy1)
# поиск ответвления не содержпщего путь.
matrix_copy2 = self.change_zero(matrix_copy2, self.Reverse(max_way))
matrix_copy2 = self.delete_tree(matrix_copy2,max_way)
matrix_copy2 = self.check_inf(matrix_copy2)
coef2,matrix_copy2 = self.find_low_price_way(matrix_copy2)
# запись в список ответвлений.
all_matrix.append([coef_bot + coef2,matrix_copy2, True,idx,max_way, True])
all_matrix.append([coef_bot + coef1,matrix_copy1, True,idx,max_way, False])
# Добавления условия, что маршрут посещен.
all_matrix[idx][2] = False
# обратное прохождения графа для получения путей.
self.find_ways(all_matrix)
self.find_last_way(all_matrix[-2][1])
sorted_way = self.sort_way(self.full_way)
# получение суммы всего пути
self.way_price = all_matrix[-2][0]
print('Сумма пути = {}, путь = {}'.format(self.way_price,sorted_way))
| true
|
2b44aa756788910d126cf57e5eab5f21b61260bb
|
Python
|
geonsoo/HCDE310
|
/Homeworks/hw8/postapp/backup.py
|
UTF-8
| 6,245
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import jinja2
import urllib, urllib2, webbrowser, json
import os
import logging
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def pretty(obj):
return json.dumps(obj, sort_keys=True, indent=2)
def safeGet(url):
try:
return urllib2.urlopen(url)
except urllib2.HTTPError, e:
print "The server couldn't fulfill the request."
print "Error code: ", e.code
except urllib2.URLError, e:
print "We failed to reach a server"
print "Reason: ", e.reason
return None
def flickrREST(baseurl = 'https://api.flickr.com/services/rest/',
method = 'flickr.photos.search',
api_key = '939bd2892135d413752e51d7e320b081',
format = 'json',
params={},
printurl = False
):
params['method'] = method
params['api_key'] = api_key
params['format'] = format
params['sort'] = 'interestingness-desc'
if format == "json": params["nojsoncallback"]=True
url = baseurl + "?" + urllib.urlencode(params)
if printurl:
print url
else:
return safeGet(url)
def getPhotoIDs(tags="Seattle",n=100):
resp = flickrREST(params={"tags":tags,"per_page":n})
if resp is not None:
photosdict = json.loads(resp.read())['photos']
if photosdict is not None:
if photosdict.has_key('photo') and len(photosdict['photo']) > 0:
return [photo['id'] for photo in photosdict['photo']]
return None
def getPhotoInfo(photoID):
resp = flickrREST(method="flickr.photos.getInfo",params={"photo_id":photoID})
if resp is not None:
return json.loads(resp.read())['photo']
else:
return None
class Photo():
def __init__(self,pd):
self.title=pd['title']['_content'].encode('utf-8')
self.author=pd['owner']['username'].encode('utf-8')
self.userid = pd['owner']['nsid']
self.tags = [tag["_content"] for tag in pd['tags']['tag']]
self.numViews = int(pd['views'])
self.commentcount = int(pd['comments']['_content'])
self.url = pd['urls']['url'][0]['_content']
self.thumbnailURL = self.makePhotoURL(pd)
def makePhotoURL(self,pd,size="q"):
## get a photo url, following documentation at https://www.flickr.com/services/api/misc.urls.html
url = "https://farm%s.staticflickr.com/%s/%s_%s_%s.jpg"%(pd['farm'],pd['server'],pd['id'],pd['secret'],size)
return url
def __str__(self):
return "~~~ %s ~~~\nauthor: %s\nnumber of tags: %d\nviews: %d\ncomments: %d"%(self.title ,self.author ,len(self.tags),self.numViews,self.commentcount)
class MainHandler(webapp2.RequestHandler):
def get(self):
#print statements don't work well
#print "In MainHandler"
logging.info("In MainHandler")
template_values={}
template = JINJA_ENVIRONMENT.get_template('greetinput.html')
self.response.write(template.render(template_values))
'''
class HelloHandler(webapp2.RequestHandler):
def get(self):
vals = {}
vals['page_title']="Hello page"
logging.info(type(self))
req = self.request
logging.info(type(req))
vals['url']= req.url
## for url paths that look like /hello.html?n=4&name=you
n = int(req.get('n', 1))
name = req.get('name', 'world')
vals['greeting']="Hello " + name
vals['counter_list']= range(n)
template = JINJA_ENVIRONMENT.get_template('hello.html')
self.response.write(template.render(vals))
class GreetHandler(webapp2.RequestHandler):
def get(self):
vals = {}
vals['page_title']="Greeting form"
template = JINJA_ENVIRONMENT.get_template('greetform.html')
self.response.write(template.render(vals))
def greet_person(name, t):
if t == "birthday":
return "Happy Birthday this month, %s!" % (name)
else:
return "Hello %s" % (name)
'''
class GreetResponseHandler(webapp2.RequestHandler):
a = flic
def post(self):
vals = {}
vals['page_title']="Greeting Page Response"
vals['photolist']= []
name = self.request.get('username')
go = self.request.get('gobtn')
logging.info(name)
logging.info(go)
if name:
# if form filled in, greet them using this data
# greet_types = self.request.get_all('greet_type')
# logging.info(greet_types)
#vals['greetings'] = [greet_person(name, t) for t in greet_types]
template = JINJA_ENVIRONMENT.get_template('response.html')
self.response.write(template.render(vals))
logging.info('name= '+name)
#else:
#if not, then show the form again with a correction to the user
# vals['prompt'] = "How can I greet you if you don't enter a name?"
# template = JINJA_ENVIRONMENT.get_template('greetform.html')
# self.response.write(template.render(vals))
# for all URLs except alt.html, use MainHandler
application = webapp2.WSGIApplication([ \
# ('/greetings', GreetHandler),
('/gresponse', GreetResponseHandler),
# ('/hello.html', HelloHandler),
('/.*', MainHandler)
],
debug=True)
| true
|
e21332d4bae409ea25c3dbbf02db39ad015d661e
|
Python
|
AhmedAliGhanem/PythonForNetowrk-Cisco
|
/15 Error Handling exceptions2.py
|
UTF-8
| 493
| 3.015625
| 3
|
[] |
no_license
|
def main():
try:
for line in readfile('xlines.txt'): print(line.strip())
except FileNotFoundError as e:
print("cannot read file:",e)
def readfile(filename):
fh = open(filename)
return fh.readlines()
if __name__ == "__main__": main()
#without rise exceptions
#def main():
# for line in readfile('xlines.txt'): print(line.strip())
#def readfile(filename):
# fh = open(filename)
# return fh.readlines()
#if __name__ == "__main__": main()
| true
|
77073678b5f36938d5af9b205d2b09be772251f5
|
Python
|
ramsayleung/mp-just-for-fun
|
/lyrics.py
|
UTF-8
| 3,045
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from lxml import html
import weixin_logger
class LyricsSearcher(object):
def __init__(self):
logger = weixin_logger.WeixinLogger(__name__)
self.logger = logger.get_logger()
self.search_url = 'http://www.xiami.com/search?key='
self.headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;",
"Accept-Encoding": "gzip",
"Accept-Language": "zh-CN,zh;q=0.8",
"Referer": "http://www.example.com/",
"User-Agent":
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36"
}
def html_downloader(self, url):
content = requests.get(url, headers=self.headers).text
# with open('/tmp/fck.html', 'w') as file:
# file.write(content)
return content
def song_parser(self, content):
text = html.fromstring(content)
tag_index = 1
try:
song_name = text.xpath(
"//div[@id='wrapper']/div[2]/div[1]/div/div[2]/div/div[1]/table\
/tbody[1]/tr/td[2]/a[1]")[0].text
if song_name is None:
song_name = text.xpath(
"//div[@id='wrapper']/div[2]/div[1]/div/div[2]/div/div[1]\
/table/tbody[1]/tr/td[2]/a[2]")[0].text
tag_index = 2
else:
pass
singer_name = text.xpath(
"//div[@class='result_main']/table/tbody[1]/tr/td[3]/a")[
0].text
lyrics_url = text.xpath(
"//div[@class='result_main']/table/tbody[1]/tr/td[2]/a[{}]\
/@href".format(tag_index))[0]
except IndexError, e:
self.logger.warn("找不到该歌词对应的歌曲")
self.logger.error(e)
song_name = None
singer_name = None
lyrics_url = None
return song_name, singer_name, lyrics_url
def lyrics_parser(self, content):
try:
text = html.fromstring(content)
lyrics = text.xpath("//div[@id='lrc']/div[1]")[0].text_content()
except IndexError, e:
self.logger.warn("找不到该歌词对应的歌曲")
self.logger.error(e)
lyrics = None
return lyrics
def search_main(self, key):
url = self.search_url + key
content = self.html_downloader(url)
song_name, singer_name, lyrics_url = self.song_parser(content)
if song_name is None and singer_name is None:
reply = u"OMG,你好有品味耶,Sam没办法找到对应的歌曲,要不你再确定一下你的歌词?".encode('utf-8')
else:
lyrics = self.lyrics_parser(self.html_downloader(lyrics_url))
reply = u"艺人:{0} 歌名:{1}\n完整歌词:{2}".format(
singer_name.strip(), song_name, lyrics).encode('utf-8')
return reply
| true
|
634926732636e14be5268961fd96ffbbf005c94f
|
Python
|
bo-yang/robotics_exercises
|
/diff_drive.py
|
UTF-8
| 1,165
| 3.359375
| 3
|
[] |
no_license
|
import numpy as np
def diffdrive(x, y, theta, v_l, v_r, t, l):
""" Implement the forward kinematics for the differential drive.
x, y, theta is the pose of the robot.
v_l and v_r are the speed of the left and right wheels.
t is the driving time.
l is the distance between the wheels.
"""
if v_l == v_r:
theta_n = theta
x_n = x + v_l * t * np.cos(theta)
y_n = y + v_l * t * np.sin(theta)
else:
R = l/2 * (v_l + v_r) / (v_r - v_l)
ICC_x = x - R * np.sin(theta)
ICC_y = y + R * np.cos(theta)
omega = (v_r - v_l) / l
ol = omega * t
Tf = np.array([[np.cos(ol), -np.sin(ol), 0], [np.sin(ol), np.cos(ol), 0], [0, 0, 1]])
X = np.array([x-ICC_x, y-ICC_y, theta])
v = np.array([ICC_x, ICC_y, ol])
(x_n, y_n, theta_n) = np.matmul(Tf, X) + v
return x_n, y_n, theta_n # new pose of the robot
L = 0.5
x = 1.5
y = 2.0
theta = np.pi/2
(x_1, y_1, theta_1) = diffdrive(x, y, theta, 0.3, 0.3, 3, L)
(x_2, y_2, theta_2) = diffdrive(x_1, y_1, theta_1, 0.1, -0.1, 1, L)
(x_3, y_3, theta_3) = diffdrive(x_2, y_2, theta_2, 0.2, 0, 2, L)
| true
|
80b0ce279718045bda81edbaabe6f23f7cbafbb8
|
Python
|
nemborkar/coding-challenges-to-gitgud
|
/Python/min-max.py
|
UTF-8
| 542
| 3.53125
| 4
|
[] |
no_license
|
# https://www.hackerrank.com/challenges/np-min-and-max/problem
# after understanding numpy and figuring out the input process,
# this one was too easy
import numpy
# this chunk is working and enable it later
n, m = input().split()
n = int(n) # 0 axis
m = int(m) # 1 axis
first_array = numpy.zeros((n,m))
for i in range(n):
input_string = input().split()
for j in range(len(input_string)):
first_array[i,j] = int(input_string[j])
my_final_array = numpy.min(first_array, axis=1)
print(int(numpy.max(my_final_array)))
| true
|
b111d74ff5582a475e28c2ceea181f8684cd1351
|
Python
|
Wenzurk-Ma/Python-Crash-Course
|
/Chapter 07/counting.py
|
UTF-8
| 438
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Title : TODO
# Objective : TODO
# Created by: Wenzurk
# Created on: 2018/2/17
# current_number = 0
# while current_number <= 5:
# print(current_number)
# current_number += 1
# while current_number < 10:
# current_number += 1
# if current_number % 2 == 0:
# continue
#
# print(current_number)
x = 1
while x <= 5:
print(x)
x += 1
#这个循环将没完没了!
# while x <= 5:
# print(x)
| true
|
2a6a51d9392fe8a2940d33ee2b77f55eecc6e9d8
|
Python
|
obskyr/poketrainer-trainer
|
/pokehelp.py
|
UTF-8
| 14,933
| 3.265625
| 3
|
[] |
no_license
|
# -*- coding: cp1252 -*-
###############################################
## ##
## TABLE OF CONTENTS: ##
## 0 - Type assets ##
## 0.0 - List of types ##
## 0.1 - Dictionary of type advantages ##
## 0.2 - Dictionary of type defenses ##
## 1 - Useful fuctions ##
## 1.0 - List + dictionary returns ##
## 1.1 - Print functions ##
## 1.2 - Return functions ##
## ##
###############################################
##0 TYPE ASSETS
##0.0 LIST OF TYPES
typel = ['Normal', 'Fire', 'Water', 'Electric', 'Grass', 'Ice', 'Fighting', 'Poison', 'Ground', 'Flying', 'Psychic', 'Bug', 'Rock', 'Ghost', 'Dragon', 'Dark', 'Steel', 'Fairy']
typellc = [x.lower() for x in typel]
##0.1 DICTIONARY OF TYPE ADVANTAGES
typedict = {
'Normal': { ## NorTD
2.0: [],
1.0: typel[0:12] + typel[14:16] + ['Fairy'],
0.5: ['Rock', 'Steel'],
0.0: ['Ghost']
},
'Fire': { ## FirTD
2.0: ['Grass', 'Ice', 'Bug', 'Steel'],
1.0: ['Normal', 'Electric'] + typel[6:11] + ['Ghost', 'Dark', 'Fairy'],
0.5: ['Fire', 'Water', 'Rock', 'Dragon'],
0.0: []
},
'Water': { ## WatTD
2.0: ['Fire', 'Ground', 'Rock'],
1.0: ['Normal', 'Electric', 'Ice', 'Fighting', 'Poison', 'Flying', 'Psychic', 'Bug', 'Ghost', 'Dark', 'Steel', 'Fairy'],
0.5: ['Water', 'Grass', 'Dragon'],
0.0: []
},
'Electric': { ## EleTD
2.0: ['Water', 'Flying'],
1.0: ['Normal', 'Fire', 'Ice', 'Fighting', 'Poison', 'Psychic','Bug', 'Rock', 'Ghost', 'Dark', 'Steel', 'Fairy'],
0.5: ['Electric', 'Grass', 'Dragon'],
0.0: ['Ground']
},
'Grass': { ## GraTD
2.0: ['Water', 'Ground','Rock'],
1.0: ['Normal', 'Electric', 'Ice', 'Fighting', 'Psychic', 'Ghost', 'Dark', 'Fairy'],
0.5: ['Fire', 'Grass', 'Poison', 'Flying', 'Bug', 'Dragon', 'Steel'],
0.0: []
},
'Ice': { ## IceTD
2.0: ['Grass', 'Ground', 'Flying', 'Dragon'],
1.0: ['Normal', 'Electric', 'Fighting', 'Poison', 'Psychic', 'Bug', 'Rock', 'Ghost', 'Dark', 'Fairy'],
0.5: ['Fire', 'Water', 'Ice', 'Steel'],
0.0: []
},
'Fighting': { ## FigTD
2.0: ['Normal', 'Ice', 'Rock', 'Dark', 'Steel'],
1.0: ['Fire', 'Water', 'Electric', 'Grass', 'Fighting', 'Ground', 'Dragon'],
0.5: ['Poison', 'Flying', 'Psychic', 'Bug', 'Fairy'],
0.0: ['Ghost'],
},
'Poison': { ## PoiTD
2.0: ['Grass', 'Fairy'],
1.0: ['Normal', 'Fire', 'Water', 'Electric', 'Ice', 'Fighting', 'Flying', 'Psychic', 'Bug', 'Dragon', 'Dark'],
0.5: ['Poison', 'Ground', 'Rock', 'Ghost'],
0.0: ['Steel'],
},
'Ground': { ## GroTD
2.0: ['Fire', 'Electric', 'Poison', 'Rock', 'Steel'],
1.0: ['Normal', 'Water', 'Ice', 'Fighting', 'Ground', 'Psychic', 'Ghost', 'Dragon', 'Dark'],
0.5: ['Grass', 'Bug'],
0.0: ['Flying'],
},
'Flying': { ## FlyTD
2.0: ['Grass', 'Fighting'],
1.0: ['Normal', 'Fire', 'Water', 'Ice', 'Poison', 'Ground', 'Flying', 'Psychic', 'Ghost', 'Dragon', 'Dark', 'Fairy'],
0.5: ['Electric', 'Rock', 'Steel'],
0.0: [],
},
'Psychic': { ## PsyTD
2.0: ['Fighting', 'Poison'],
1.0: typel[0:6] + ['Ground', 'Flying', 'Bug', 'Rock', 'Ghost', 'Dragon','Fairy'],
0.5: ['Psychic', 'Steel'],
0.0: ['Dark'],
},
'Bug': { ## BugTD
2.0: ['Grass', 'Psychic', 'Dark'],
1.0: ['Normal', 'Water', 'Electric', 'Ice', 'Ground', 'Bug', 'Rock', 'Dragon'],
0.5: ['Fire', 'Fighting', 'Poison', 'Flying', 'Ghost', 'Steel', 'Fairy'],
0.0: [],
},
'Rock': { ## RocTD
2.0: ['Fire', 'Ice', 'Flying', 'Bug'],
1.0: ['Normal', 'Water', 'Electric', 'Grass', 'Poison', 'Psychic', 'Rock', 'Ghost', 'Dragon', 'Dark', 'Fairy'],
0.5: ['Fighting', 'Ground', 'Steel'],
0.0: [],
},
'Ghost': { ## GhoTD
2.0: ['Psychic', 'Ghost'],
1.0: typel[1:10] + ['Bug', 'Rock', 'Dragon', 'Steel', 'Fairy'],
0.5: ['Dark'],
0.0: ['Normal'],
},
'Dragon': { ## DraTD
2.0: ['Dragon'],
1.0: typel[0:14] + ['Dark'],
0.5: ['Steel'],
0.0: ['Fairy'],
},
'Dark': { ## DarTD
2.0: ['Psychic', 'Ghost'],
1.0: typel[0:6] + ['Poison', 'Ground', 'Flying', 'Bug', 'Rock', 'Dragon', 'Steel'],
0.5: ['Fighting', 'Dark', 'Fairy'],
0.0: [],
},
'Steel': { ## SteTD
2.0: ['Ice', 'Rock', 'Fairy'],
1.0: ['Normal', 'Grass'] + typel [6:12] + ['Ghost', 'Dragon', 'Dark'],
0.5: ['Fire', 'Water', 'Electric', 'Steel'],
0.0: [],
},
'Fairy': { ## FaiTD
2.0: ['Fighting', 'Dragon', 'Dark'],
1.0: ['Normal', 'Water', 'Electric', 'Grass', 'Ice', 'Ground', 'Flying', 'Psychic', 'Bug', 'Rock', 'Ghost', 'Fairy'],
0.5: ['Fire', 'Poison', 'Steel'],
0.0: [],
},
}
##0.2 DICTIONARY OF TYPE DEFENSES
typedefdict = {}
for typec in typel:
tempd = {2.0: [], 1.0: [], 0.5: [], 0.0: []}
for x in typedict:
for y in typedict[x]:
if typec in typedict[x][y]:
tempd[y].append(x)
typedefdict[typec] = tempd
##1 USEFUL FUNCTIONS
##1.0 LIST + DICTIONARY RETURNS
## Oh my god, Samuel. Did you seriously forget you wrote these functions? These would've been useful AGES ago.
def typeadv(typec):
"""Returns 4 lists about the type typec."""
x = typedict[typec.title()]
return x[2.0], x[1.0], x[0.5], x[0.0]
def typedef(typec):
"""Returns 4 lists about the type typec."""
x = typedefdict[typec.title()]
return x[2.0], x[1.0], x[0.5], x[0.0]
def typed(typec, td=typedict):
"""Returns 4 lists about the type typec, from dictionary td."""
x = td[typec.title()]
return x[2.0], x[1.0], x[0.5], x[0.0]
## The D functions mostly save some typing.
def typeadvD(typec):
"""Returns a dict about the type typec."""
x = typedict[typec.title()]
return x
def typedefD(typec):
"""Returns a dict about the type typec."""
x = typedefdict[typec.title()]
return x
def typedD(typec, td=typedict):
"""Returns a dict about the type typec, from dictionary td."""
x = td[typec.title()]
return x
##1.1 PRINT FUNCTIONS
## These following functions are used to PRINT SUPER EFFECTIVE, PRINT EFFECTIVE,
## PRINT NOT VERY EFFECTIVE, PRINT NOT EFFECTIVE, PRINT DEFENSE SUPER EFFECTIVE,
## PRINT DEFENSE EFFECTIVE, PRINT DEFENSE NOT VERY EFFECTIVE, PRINT DEFENSE
## NOT EFFECTIVE, respectively.
def printse(typec, prefix="", suffix=""):
"""Prints super effective, with prefix and suffix."""
e = typeadv(typec)[0]
for n in e:
print str(prefix) + n + str(suffix)
def printe(typec, prefix="", suffix=""):
"""Prints effective, with prefix and suffix."""
e = typeadv(typec)[1]
for n in e:
print str(prefix) + n + str(suffix)
def printnve(typec, prefix="", suffix=""):
"""Prints not very effective, with prefix and suffix."""
e = typeadv(typec)[2]
for n in e:
print str(prefix) + n + str(suffix)
def printne(typec, prefix="", suffix=""):
"""Prints not effective, with prefix and suffix."""
e = typeadv(typec)[3]
for n in e:
print str(prefix) + n + str(suffix)
def printdse(typec, prefix="", suffix=""):
"""Prints defensive super effective, with prefix and suffix."""
e = typedef(typec)[0]
for n in e:
print str(prefix) + n + str(suffix)
def printde(typec, prefix="", suffix=""):
"""Prints defensive effective, with prefix and suffix."""
e = typedef(typec)[1]
for n in e:
print str(prefix) + n + str(suffix)
def printdnve(typec, prefix="", suffix=""):
"""Prints defensive not very effective, with prefix and suffix."""
e = typedef(typec)[2]
for n in e:
print str(prefix) + n + str(suffix)
def printdne(typec, prefix="", suffix=""):
"""Prints defensive not effective, with prefix and suffix."""
e = typedef(typec)[3]
for n in e:
print str(prefix) + n + str(suffix)
## HERE ENDS BORING PRINT FUNCTIONS
## FOLLOWING: SLIGHTLY MORE FUN PRINT FUNCTIONS
## Actually, the following functions prints the advantages and defenses of type 'typec', respectively, in a slightly non-friendly format.
def printadvs(typec):
"""Prints effects of type typec in a new-line list format."""
print "The type", typec, "is super effective (2.0x damage) against the following types:"
print
printse(typec, " ")
print
print typec, "is normally effective (1.0x damage) against:"
print
printe(typec, " ")
print
print typec, "is not very effective (0.5x damage) against:"
print
printnve(typec, " ")
print
print typec, "is not effective (0.0x damage) against:"
print
printne(typec, " ")
print
def printdefs(typec):
"""Prints effects against type typec in a new-line list format."""
print "The following types are super effective (2.0x damage) against" + typec + ":"
print
printdse(typec, " ")
print
print "The following are normally effective (1.0x damage) against" + typec + ":"
print
printde(typec, " ")
print
print "The following are not very effective (0.5x damage) against" + typec + ":"
print
printdnve(typec, " ")
print
print "The following are not effective (0.0x damage) against" + typec + ":"
print
printdne(typec, " ")
print
## printptable prints a table of advantages or defenses for a certain type.
def printptable(typec, t="adv", at=typedict, ad=typedefdict):
"""Prints a table of advantages or defenses (t) for type typec, using dicts at and ad."""
typec = typec[0].upper() + typec[1:].lower() ## This line capitalizes the type correctly for future use with the dictionaries.
if t == "adv":
typedict = at
if t == "def":
typedict = ad
else:
typedict = at
if typedict == at:
tabletype = "advantages"
else:
tabletype = "defenses" ## Previous lines set the type of table to be printed - advantages or defenses.
print "2.0x\t1.0x\t0.5x\t0.0x\t" + "<<" + typec.upper(), tabletype + ">>" ## Labels the columns.
print "\t\t\t\t\t|"
if len(typedict[typec][2.0]) > 0:
se = 1
else:
se = 0
if len(typedict[typec][1.0]) > 0:
e = 1
else:
e = 0
if len(typedict[typec][0.5]) > 0:
nve = 1
else:
nve = 0
if len(typedict[typec][0.0]) > 0:
ne = 1
else:
ne = 0 ## Previous lines set boring, uneffective ways to tell if a column is empty.
al = 0
cur1 = 0
cur2 = 0
cur3 = 0
cur4 = 0
while al < 4: # al is a variable that stores how many columns are empty - and since there are only 3 columns, 4 is where it stops printing.
al = 0
if se == 1:
try:
print typedict[typec][2.0][cur1][0:7] + "\t",
cur1 += 1
except IndexError:
se = 0
print "\t",
al += 1
else:
print "\t",
al += 1
if e == 1:
try:
print typedict[typec][1.0][cur2][0:7] + "\t",
cur2 += 1
except IndexError:
e = 0
print "\t",
al += 1
else:
print "\t",
al += 1
if nve == 1:
try:
print typedict[typec][0.5][cur3][0:7] + "\t",
cur3 += 1
except IndexError:
nve = 0
print "\t",
al += 1
else:
print "\t",
al += 1
if ne == 1:
try:
print typedict[typec][0.0][cur4][0:7] + "\t\t|",
cur4 += 1
except IndexError:
ne = 0
print "\t\t|",
al += 1
else:
al += 1
if al == 4:
break
print "\t\t|",
print ## The long part before this just prints every type in the table in a "SE\tE\t\NVE\tNE\t" format.
##1.2 RETURN FUNCTIONS
def typegen(typel=typel):
"""Generates a random type."""
return choice(typel)
def attackEffectivity(atype, type1, type2=None, STAB=None, typedict=typedict, typedefdict=typedefdict):
"""Returns the attack effectivity of atype against type1 and optionally type2, with optional STAB and dictionary choice."""
effect1 = None ## Setting variables for whole
effect2 = None ## dummy fun the family.
if type1.lower() == type2.lower(): ## Check for two of the same type.
type2 = None
## If you don't want to have pokehelp.py prompt the user about STAB:
## Make sure your program always supplies a multiplier as STAB argument
if STAB == None: ## The following indent takes care of the STAB multiplier.
STAB = raw_input("Does the attack have STAB? (Y/N) >").lower().strip()
while True:
if STAB == "y" or STAB == "yes":
STAB = 1.5
break
elif STAB == "n" or STAB == "no":
STAB = 1.0
break
else:
STAB = raw_input("Please input Y or N. >").lower().strip()
while effect1 == None: ## Sets the local variable effect1 to whatever effect type1 corresponds to in the active dictionary.
try:
for effect in typedefD(type1):
for comparisontype in typedefD(type1)[effect]:
if comparisontype.lower() == atype.lower():
effect1 = effect
break
if effect1 != None:
break
except KeyError:
type1 = raw_input("Please input a valid first type. >")
if type2 != None and type2 != "": ## Sets the local variable effect2 to whatever effect type2 corresponds to in the active dictionary, if type2 is valid.
try:
for effect in typedefD(type2):
for comparisontype in typedefD(type2)[effect]:
if comparisontype.lower() == atype.lower():
effect2 = effect
break
if effect2 != None:
break
except KeyError:
pass
if effect2 == None: ## For single-typed Pok�mon
return effect1 * STAB
return effect1 * effect2 * STAB ## For double-typed Pok�mon
| true
|
8e36e398f8f14fd66108eeb649fe51b96f7b5b4a
|
Python
|
sunbaby01/nerual-network
|
/nerualnetwork.py
|
UTF-8
| 3,820
| 2.671875
| 3
|
[] |
no_license
|
import torch
import random
import numpy as np
q=0
#结果产生器
def resultgenerator(x):
return [[ 2*i*i -3*i+3 for i in eachx ] for eachx in x]
class fullconnection:
def __init__(self,inputnum,outputnum):
self.w=[[(random.random()-0.5)*2 for i in range(inputnum)] for i in range(outputnum)]
self.b=[0 for i in range(outputnum)]
self.lr=0.0002
def forward(self,x):
if q==1:print("x",x)
if q==1:print("w",self.w)
if q==1:print('b',self.b)
self.x=x
output=[]
for eachx in x:
outtemp=[]
for i,m in zip(self.w,self.b):
temp=0
for j,k in zip(i,eachx):
temp=temp+j*k
temp=temp+m
outtemp.append(temp)
output.append(outtemp)
self.output=output
if q==1:print("output",output,'\n')
return output
def backward(self,dy):
dx=[]
for eachdy,eachx in zip(dy,self.x):
dxtemp=[]
for i in range(len(eachx)):
temp=0
for j,k in zip([m[i] for m in self.w],eachdy):
temp=temp+j*k
dxtemp.append(temp)
dx.append(dxtemp)
for n in range(len(self.w)):
for k in range(len(self.w[0])):
self.w[n][k]=self.w[n][k]-self.lr* sum([eachdy[n]*eachx[k] for eachdy,eachx in zip(dy,self.x)] )
for i in range(len(self.b)):
self.b[i]=self.b[i]-self.lr*sum([m[i] for m in dy])
return dx
def __call__(self,x):
return self.forward(x)
def __str__(self):
return "fullconnection input:"+str(len(self.w[0]))+"output:"+str(len(self.w))
#return "w:"+str(self.w)+"b:"+str(self.b)
class relu:
def __init__(self):
pass
def forward(self,x):
self.x=x
return [[i if i>-0 else 0.1*i for i in eachx ]for eachx in x]
def __call__(self,x):
return self.forward(x)
def backward(self,dy):
return [[ j if i > -0 else 0.1*j for i,j in zip(eachx,eachdy)]for eachdy,eachx in zip(dy,self.x)]
def __str__(self):
return "relu"
#函数模拟器
class model:
def __init__(self,*num):
lastnum=num[0]
self.layer=[]
for i in num[1:]:
self.layer.append(fullconnection(lastnum,i))
if i!=num[-1]:
self.layer.append(relu())
lastnum=i
def forward(self,x):
result=x
for i in self.layer:
result=i.forward(result)
return result
def backward(self,loss):
dy=loss
for i in reversed(self.layer):
dy=i.backward(dy)
def __call__(self,x):
return self.forward(x)
m=model(1, 200,20,2,1)
for i in range(100000):
if q==1:print('----------------------')
batch=160
x= [[(random.random()-0.5)]for i in range(batch)]#(random.random()-0.5) *4
#x=[[0.1],[-0.3],[0.3] ,[-0.3],[0.3]]
y=m.forward(x)
t=resultgenerator(x)
#print(t)
loss=[]
for eachy,eacht in zip(y,t):
loss.append([i-j for i ,j in zip(eachy,eacht)])
#print("all:",x,y[0],t,loss)
m.backward(loss)
if i%5==0:
z1=m.forward([[0.1]])
q1=resultgenerator([[0.1]])
z2=m.forward([[0.3]])
q2=resultgenerator([[0.3]])
z3=m.forward([[-0.3]])
q3=resultgenerator([[-0.3]])
print(q1[0][0]-z1[0][0], q2[0][0]-z2[0][0],q3[0][0]-z3[0][0],sum([ i[0]for i in loss])/batch)
# for i in m.layer:
# print(i)
| true
|
4e18554e2dbd56aa38e13aa0d2b1741a3b828c0a
|
Python
|
joeking11829/tornwamp
|
/tests/test_topic.py
|
UTF-8
| 6,323
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
import unittest
from mock import patch
from tornwamp.topic import Topic, TopicsManager
from tornwamp.session import ClientConnection
class MockSubscriber(object):
dict = {"author": "plato"}
class TopicTestCase(unittest.TestCase):
def test_constructor(self):
topic = Topic("the.monarchy")
self.assertEqual(topic.name, "the.monarchy")
self.assertEqual(topic.subscribers, {})
self.assertEqual(topic.publishers, {})
def test_dict(self):
topic = Topic("the.republic")
subscriber = MockSubscriber()
topic.subscribers[123] = subscriber
expected_dict = {
'name': 'the.republic',
'publishers': {},
'subscribers': {
123: {"author": "plato"}
}
}
self.assertEqual(topic.dict, expected_dict)
def test_connections(self):
topic = Topic("start.trek")
topic.subscribers = {1: 2}
topic.publishers = {3: 4}
expected = {1: 2, 3: 4}
self.assertEqual(topic.connections, expected)
class TopicsManagerTestCase(unittest.TestCase):
maxDiff = None
def test_add_subscriber(self):
manager = TopicsManager()
connection = ClientConnection(None, name="Dracula")
manager.add_subscriber("romania", connection, 432)
connection = manager["romania"].subscribers.pop(432)
self.assertEqual(connection.name, "Dracula")
self.assertTrue("romania" in connection.topics["subscriber"])
def test_remove_subscriber(self):
manager = TopicsManager()
connection = ClientConnection(None, name="Dracula")
manager.add_subscriber("romania", connection, 95)
self.assertEqual(len(manager["romania"].subscribers), 1)
self.assertTrue("romania" in connection.topics["subscriber"])
manager.remove_subscriber("romania", 95)
self.assertEqual(len(manager["romania"].subscribers), 0)
self.assertFalse("romania" in connection.topics["subscriber"])
def test_remove_subscriber_inexistent_connection(self):
manager = TopicsManager()
answer = manager.remove_subscriber("inexistent", None)
self.assertIsNone(answer)
def test_add_publisher(self):
manager = TopicsManager()
connection = ClientConnection(None, name="Frankenstein")
manager.add_publisher("gernsheim", connection, 123)
connection = manager["gernsheim"].publishers.pop(123)
self.assertEqual(connection.name, "Frankenstein")
self.assertTrue("gernsheim" in connection.topics["publisher"])
def test_remove_publisher(self):
manager = TopicsManager()
connection = ClientConnection(None, name="Frankenstein")
manager.add_publisher("gernsheim", connection, 123)
self.assertEqual(len(manager["gernsheim"].publishers), 1)
self.assertTrue("gernsheim" in connection.topics["publisher"])
manager.remove_publisher("gernsheim", 123)
self.assertEqual(len(manager["gernsheim"].publishers), 0)
self.assertFalse("gernsheim" in connection.topics["publisher"])
def test_remove_publisher_inexistent_connection(self):
manager = TopicsManager()
answer = manager.remove_publisher("inexistent", None)
self.assertIsNone(answer)
def test_remove_connection(self):
manager = TopicsManager()
connection = ClientConnection(None, name="Drakenstein")
manager.add_publisher("gernsheim", connection)
self.assertEqual(len(manager["gernsheim"].publishers), 1)
self.assertTrue("gernsheim" in connection.topics["publisher"])
manager.add_subscriber("romania", connection)
self.assertEqual(len(manager["romania"].subscribers), 1)
self.assertTrue("romania" in connection.topics["subscriber"])
manager.remove_connection(connection)
self.assertEqual(len(manager["romania"].subscribers), 0)
self.assertEqual(len(manager["gernsheim"].publishers), 0)
@patch("tornwamp.session.create_global_id", side_effect=[1, 2])
@patch("tornwamp.topic.create_global_id", side_effect=[3, 4])
def test_dict(self, mock_id, mock_id_2):
manager = TopicsManager()
mr_hyde = ClientConnection(None, name="Mr Hyde")
mr_hyde.last_update = None
dr_jekyll = ClientConnection(None, name="Dr Jekyll")
dr_jekyll.last_update = None
manager.add_subscriber("scotland", mr_hyde)
manager.add_publisher("scotland", dr_jekyll)
expected_dict = {
'scotland': {
'name': 'scotland',
'publishers': {
4: {
'id': 2,
'last_update': None,
'name': 'Dr Jekyll',
'topics': {
'subscriber': {},
'publisher': {
'scotland': 4
}
},
'zombie': False,
'zombification_datetime': None
}
},
'subscribers': {
3: {
'id': 1,
'last_update': None,
'name': 'Mr Hyde',
'topics': {
'subscriber': {
'scotland': 3
},
'publisher': {}
},
'zombie': False,
'zombification_datetime': None
}
}
}
}
self.assertEqual(manager.dict, expected_dict)
def test_get_connection(self):
manager = TopicsManager()
frodo = ClientConnection(None, name="Frodo")
sam = ClientConnection(None, name="Sam")
manager.add_subscriber("lord.of.the.rings", frodo, subscription_id=1)
manager.add_publisher("lord.of.the.rings", sam, subscription_id=2)
hopefully_frodo = manager.get_connection("lord.of.the.rings", 1)
hopefully_sam = manager.get_connection("lord.of.the.rings", 2)
self.assertEqual(frodo, hopefully_frodo)
self.assertEqual(sam, hopefully_sam)
| true
|
6943cb88730019116525d0e541b0638045471392
|
Python
|
Jimut123/code-backup
|
/python/coursera_python/deeplearning_ai_Andrew_Ng/1_NN_DL/work/Week 3/Planar data classification with one hidden layer/planar_utils.py
|
UTF-8
| 2,253
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s
def load_planar_dataset():
np.random.seed(1)
m = 400 # number of examples
N = int(m/2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m,D)) # data matrix where each row is a single example
Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 4 # maximum ray of the flower
for j in range(2):
ix = range(N*j,N*(j+1))
t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta
r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def load_extra_datasets():
N = 200
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)
no_structure = np.random.rand(N, 2), np.random.rand(N, 2)
return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure
| true
|
1db35165674d265cef6c1e252bd26022c243e782
|
Python
|
sbrj/App-Lumiar
|
/menu.py
|
UTF-8
| 2,510
| 2.921875
| 3
|
[] |
no_license
|
from kivy.uix.screenmanager import Screen
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from kivy.uix.image import Image
from kivy.uix.behaviors.button import ButtonBehavior
from kivy.uix.label import Label
from kivy.properties import ListProperty
from kivy.graphics import Color, Ellipse, Rectangle
from kivy.core.window import Window
class Menu(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def on_pre_enter(self):
Window.bind(on_request_close=self.confirmacao)
def confirmacao(self, *args,**kwargs):
'''
Método utilizado para confirmar a saída do App
'''
box = BoxLayout(orientation='vertical', padding=10, spacing=10)
botoes = BoxLayout(padding=10, spacing=10)
popup = self.PopUp(box, botoes)
box.add_widget(popup[0])
box.add_widget(popup[1])
popup[2].open()
return True
def PopUp(self, box, botoes,*args):
pop = Popup(title='Deseja sair?', content=box, size_hint=(None,None),size=(300,180))
sim = Botao(text='Sim', on_release=App.get_running_app().stop)
nao = Botao(text='Não', on_release=pop.dismiss)
botoes.add_widget(sim)
botoes.add_widget(nao)
atencao = Image(source='atencao.png')
return atencao, botoes, pop
class Botao(ButtonBehavior, Label):
'''
Propriedades do botao da tela menu
'''
cor = ListProperty([0.1,0.4,0.5,1])
cor2 = ListProperty([0.1,0.2,0.3,1])
def __init__(self, **kwargs):
super(Botao,self).__init__(**kwargs)
self.atualizar()
def on_pos(self,*args):
self.atualizar()
def on_size(self,*args):
self.atualizar()
def on_press(self, *args):
self.cor,self.cor2 = self.cor2,self.cor
def on_cor(self,*args):
self.atualizar()
def on_release(self,*args):
self.cor = self.cor2
def atualizar(self,*args):
self.canvas.before.clear()
with self.canvas.before:
Color(rgba=(self.cor))
Ellipse(size=(self.height,self.height),
pos=self.pos)
Ellipse(size=(self.height,self.height),
pos=(self.x+self.width-self.height,self.y))
Rectangle(size=(self.width-self.height,self.height),
pos=(self.x+self.height/2.0,self.y))
| true
|
3b3402ac875884dc5df9c78293f0c88d4f69a2cf
|
Python
|
huangshaoqi/programming_python
|
/XDL/python/Part_1/test_py/查找模块.py
|
UTF-8
| 360
| 3.03125
| 3
|
[] |
no_license
|
import re
import pprint
code = []
with open('source.txt') as f:
content = f.read()
patten_str = '<code class="xref">.*</code>'
m1 = re.findall(patten_str, content)
# print(m1)
patten_str1 = '>(.*)<'
for var in m1:
m2 = re.findall(patten_str1, var)
code.append(m2[0])
pprint.pprint(code)
print("Total: %s" % len(code))
| true
|
be2228b990b70d7330a298535387758c62db8317
|
Python
|
Yichuans/wdpa-qa
|
/point.py
|
UTF-8
| 1,223
| 2.640625
| 3
|
[] |
no_license
|
# Load packages and modules
import sys, arcpy
from wdpa.qa import arcgis_table_to_df, find_wdpa_rows, pt_checks, INPUT_FIELDS_PT
from wdpa.export import output_errors_to_excel
# Load input
input_pt = sys.argv[1]
output_path = sys.argv[2]
# Let us welcome our guest of honour
arcpy.AddMessage('\nAll hail the WDPA\n')
# Convert Point table to pandas DataFrame
arcpy.AddMessage('Converting to pandas DataFrame')
pt_df = arcgis_table_to_df(input_pt, INPUT_FIELDS_PT)
result = dict()
# Run the checks
arcpy.AddMessage('--- Running QA checks on Points ---')
for pt_check in pt_checks: # pt_checks is a dictionary with checks' descriptive names and function names
arcpy.AddMessage('Running:' + pt_check['name'])
# checks are not currently optimised, thus return all pids regardless
wdpa_pid = pt_check['func'](pt_df, True)
# For each check, obtain the rows that contain errors
if wdpa_pid.size > 0:
result[pt_check['name']] = find_wdpa_rows(pt_df, wdpa_pid)
# Write output to file
arcpy.AddMessage('Writing output to Excel')
output_errors_to_excel(result, output_path, pt_checks, 'point')
arcpy.AddMessage('\nThe QA checks on POINTS have finished. \n\nWritten by Stijn den Haan and Yichuan Shi\nAugust 2019')
| true
|
4a19b7e6fe576c3744b75d4401e8df32187a213c
|
Python
|
daviddamilola/django-api-starter
|
/app/community/models.py
|
UTF-8
| 900
| 2.59375
| 3
|
[] |
no_license
|
from django.db import models
import uuid
# Create your models here.
class Puppy(models.Model):
id = models.UUIDField(primary_key=True, editable=False, default=uuid.uuid4)
name = models.CharField(max_length=255)
age = models.IntegerField()
breed = models.CharField(max_length=255)
color = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def get_breed(self):
return f"{self.name} belongs to {self.breed} breed"
def __str__(self):
return self.name
def __repr__(self):
return f"{self.name} is added"
class Room(models.Model):
id = models.UUIDField(primary_key=True, editable=False, default=uuid.uuid4)
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
ordering = ('name', )
| true
|
ea20db527ff2a662b5af973088143af3d1fcdcd3
|
Python
|
saultorre1995/autodiff
|
/autodiff.py
|
UTF-8
| 19,527
| 3.265625
| 3
|
[] |
no_license
|
# Autodifferentiation Library
# Written by Saul Gonzalez Resines
# Mail:sgr40@bath.ac.uk
import warnings
import numpy as np
def sin(a,owner=None):
'''
Sine Function calling an inner class of the DObject that
wraps the operator into an Operator object.
The input must be a DObject or a float,integer but in the last
case you need to include the owner DTrack of the Operator.
The a must be in Radians.
'''
if isinstance(a,DObject):
return a.sin();
pass;
elif isinstance(a,float) or isinstance(a,int) and isinstance(owner,DTrack):
constant=owner.set_cte(value=a);
return constant.sin();
else:
raise TypeError("Incompatible types, or the object DTrack not defined.")
def cos(a,owner=None):
'''
Cosine Function calling an inner class of the DObject that
wraps the operator into an Operator object.
The input must be a DObject or a float,integer but in the last
case you need to include the owner DTrack of the Operator.
The a must be in Radians.
'''
if isinstance(a,DObject):
return a.cos();
elif isinstance(a,float) or isinstance(a,int) and isinstance(owner,DTrack):
constant=owner.set_cte(value=a);
return constant.cos();
else:
raise TypeError("Incompatible types, or the object DTrack not defined.")
def logb (a,b,owner=None):
'''
Log base 10 calling an inner class of the DObject that
wraps the operator into an Operator object.
The input must be a DObject or a float,integer but in the last
case you need to include the owner DTrack of the Operator.
'''
if isinstance(a,DObject):
return a.log10();
elif isinstance(a,float) or isinstance(a,int) and isinstance(owner,DTrack):
constant=owner.set_cte(value=a);
return constant.log10();
else:
raise TypeError("Incompatible types, or the object DTrack not defined.")
def logn (a,owner=None):
'''
Natural Logarithm calling an inner class of the DObject that
wraps the operator into an Operator object.
The input must be a DObject or a float,integer but in the last
case you need to include the owner DTrack of the Operator.
'''
if isinstance(a,DObject):
return a.logn();
elif isinstance(a,float) or isinstance(a,int) and isinstance(owner,DTrack):
constant=owner.set_cte(value=a);
return constant.logn();
else:
raise TypeError("Incompatible types, or the object DTrack not defined.")
#def exp (a,owner=None):
# '''
# Exponentiation
# '''
# pass;
#
#def sqrt (a,owner=None):
# '''
# Square Root
# '''
# pass;
class DObject:
"""
A class that encodes all the
objects (variables,constants,operators)
that are supported by the package.
"""
def __init__(self,owner=None):
self.owner=owner
pass
# Operator overloading over the class DObject
# for the sum and the multiplication.
def _wrapper(self,operator,other):
# Just create the object
if self.owner is None:
print("The variable with name "+self.name+" does not have any owner")
return None;
else:
if isinstance(other,DObject):
return self.owner.set_ope(operator,self,other);
elif isinstance(other,float) or isinstance(other,int):
constant = self.owner.set_cte(other);
return self.owner.set_ope(operator,self,constant)
else:
raise TypeError("Incompatible Types");
def _wrapper_single(self,operator):
return self.owner.set_ope(operator,self,None)
def sin(self):
return self._wrapper_single(Sine);
def cos(self):
return self._wrapper_single(Cosine);
def logn(self):
return self._wrapper_single(LogN);
#def cos(self):
# return self._wrapper_single(Cosine);
def __add__(self,other):
return self._wrapper(Add,other);
def __sub__(self,other):
return self._wrapper(Sub,other);
def __mul__(self,other):
return self._wrapper(Multiply,other);
def __pow__(self,other):
return self._wrapper(Power,other);
def __truediv__(self,other):
return self._wrapper(Divide,other);
#def sin(self):
# return self._
class DTrack:
# This is the object the get all the DObjects
def __init__(self,head_node=None):
self.head_node=head_node;
self.cte={};
self.var={};
self.ope={};
ordering=[];
# Numbering the different operators ad DObjects
self.n_cte = 0;
self.n_var = 0;
self.n_add = 0;
self.n_sub = 0;
self.n_mul = 0;
self.n_pow = 0;
self.n_sine = 0;
self.n_cos = 0;
self.n_div = 0;
def __repr__(self):
return "DTrack Object"
def set_var(self,value,name=None):
# Set Variable
this = Variable(self,value,name);
return this;
def set_cte(self,value,name=None):
# Set Constant
this = Constant(self,value,name);
return this;
def set_ope(self,operator,a,b):
# Set Operator
this=operator(self,a,b);
return this;
def set_header(self,head):
# Set the function or header
self.head_node=head;
def get_var(self,var):
if isinstance(Variable):
return self.var[var.name]
elif isinstance(str):
return self.var[var]
else:
raise TypeError("Not a Variable or string");
return None
def reset(self):
# Restart the full of the instances (variables)
self.head_node=None;
self.cte.clear();
self.var.clear();
self.ope.clear();
self.ordering=[];
self.n_cte=0;
self.n_var=0;
self.n_add=0;
self.n_sub=0;
self.n_mul=0;
self.n_pow=0;
self.n_div=0;
self.n_sine=0;
self.n_cos=0;
def Topsort(self):
vis = set();
order = [];
def _check(node):
if node not in vis:
# avoid the repeat of the same variable or constant;
# operators are going always to be different;
vis.add(node);
if isinstance(node,Operator):
for inp in node.inputs:
_check(inp);
order.append(node);
if self.head_node is None:
print("The head_node is None so not topological order executed");
return None
else:
_check(self.head_node)
# set ordering
self.ordering=order
return order;
def Forward(self,order=[]):
if len(order)==0:
order=self.ordering;
for node in order:
# Add the placeholder option for an easier handling
if isinstance(node,Operator):
node.value = node.forward(*[inp_node.value for inp_node in node.inputs]);
def Backward(self,order=[]):
if len(order)==0:
order=self.ordering;
# The last of the head_node gradient is always one
order[-1].gradient=1;
vis=set();
for node in reversed(order):
if isinstance(node,Operator):
inputs = node.inputs;
grads = node.backward(*[inp.value for inp in inputs],dout=node.gradient)
for inp,grad in zip(inputs,grads):
if inp not in vis:
inp.gradient = grad;
else:
inp.gradient += grad;
vis.add(inp)
return [node.gradient for node in order]
def Forward_Obj(self,order=[]):
if len(order)==0:
order=self.ordering;
for node in order:
if isinstance(node,Operator):
node.value_obj = node.forward(*node.inputs)
def Backward_Obj(self,order=[]):
if len(order)==0:
order=self.ordering;
# The last of the head_node gradient is always one
vis=set();
order[-1].gradient_obj = self.set_cte(value=1.0);
for node in reversed(order):
if isinstance(node,Operator):
grads = node.backward(*node.inputs,dout=node.gradient_obj)
for inp,grad in zip(node.inputs,grads):
if inp not in vis:
inp.gradient_obj = grad;
else:
inp.gradient_obj += grad;
vis.add(inp)
return [node.gradient_obj for node in order]
def GetGradObj(self):
self.Forward_Obj();
self.Backward_Obj();
return None
def Simplify(self):
'''Try to reduce
the ammount of nodes
for the fast computation.
Eliminate the nodes of two constants.
'''
pass
def PlotGraph(self):
''' Plots the graph
starting for the headnode
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
if self.head_node is None:
raise TypeError("The head_node is equal to None. Maybe it was not set.")
return ;
# Create The Figure and a single axis
fig = plt.figure();
ax = fig.add_subplot(1,1,1);
start_xy=(0,0)
polygons=[];
maxnodes=[];
def _checknode(node,count=0):
if isinstance(node,Operator):
count+=1;
for inp in node.inputs:
_checknode(inp,count);
else:
maxnodes.append(count)
# Here execute the code for checking the maximum distance
_checknode(self.head_node,0);
totlev=max(maxnodes);
maxdist=3**(max(maxnodes));
def _plotnode(node,pos,level=1):
# Function to plot the operators with a circle
size=10/level
dist_y = maxdist/(level);
dist_x = 6*maxdist/totlev;
# Get the info
if isinstance(node,DObject):
if node.value is None:
valnode=""
else:
valnode="{:.2f}".format(node.value)
if node.gradient is None:
gradval=""
else:
gradval="{:.2f}".format(node.gradient)
info = node.name+"\n"+"val: "+valnode+\
"\n"+"grad: "+\
gradval
# Plot the info
if isinstance(node,Operator):
polygons.append(mpl.patches.Circle(pos,radius=1,color="lime"))
ax.annotate(info,xy=pos,ha="center",va="center",fontsize=size);
level+=1;
if len(node.inputs)==2:
# if the operator has two inputs
post=(pos[0]+dist_x, pos[1]+dist_y);
polygons.append(mpl.patches.Polygon(([pos,post])))
_plotnode(node.inputs[0],post,level);
post=(pos[0]+dist_x, pos[1]-dist_y);
polygons.append(mpl.patches.Polygon([pos,post]))
_plotnode(node.inputs[1],post,level);
elif len(node.inputs)==1:
# if the operator has another input
post=(pos[0]+dist_x,pos[1]+dist_y);
_plotnode(node.inputs[0],post,level);
else:
# Do not include as not possible
warnings.warn("The input of the operator "+node.name+" is "+\
str(len(node.inputs))+" which is a case not implemented in PlotGraph."+\
"Some nodes will be missing.")
elif isinstance(node,Variable):
polygons.append(mpl.patches.Circle(pos,radius=1,color="red"))
ax.annotate(info,xy=pos,ha="center",va="center",fontsize=size);
else:
polygons.append(mpl.patches.Circle(pos,radius=1,color="cyan"))
ax.annotate(info,xy=pos,ha="center",va="center",fontsize=size);
# Maximum distance required for the tree
_plotnode(self.head_node,start_xy,level=1);
# Patch the lines and the circles to the plot
for pol in polygons:
ax.add_patch(pol)
ax.axis("equal")
ax.autoscale_view();
plt.tight_layout()
plt.show()
# Return the Figure Object
def GetGrad(self,which=[]):
if len(self.ordering)==0:
self.Topsort();
self.Forward();
grads=self.Backward();
return grads
class Constant(DObject):
def __init__(self,owner,value,name=None):
super().__init__(owner);
self.owner.n_cte+=1;
if name is None:
self.name="cte_"+str(self.owner.n_cte);
else:
if name in list(self.owner.cte.keys()):
self.name="cte_"+str(self.owner.n_cte);
warnings.warn("Warning the cte with name: "+name+" has been already defined, replacing with "+self.name)
else:
self.name=name;
self.value=value;
self.gradient=None;
self.gradient_obj=None;
# Updating the owner dictionary
# and assign the object to the dictionary position
self.owner.cte[self.name]=self;
self=self.owner.cte[self.name];
def __repr__(self):
return "The cte is : name "+self.name+" with value "+str(self.value)
class Variable(DObject):
def __init__(self,owner,value,name=None):
super().__init__(owner);
self.owner.n_var+=1;
if name is None:
self.name="var_"+str(self.owner.n_var);
else:
if name in list(self.owner.var.keys()):
self.name="cte_"+str(self.owner.n_var);
warnings.warn("Warning the variable with name: "+name+" has been already defined, replacing with "+self.name)
else:
self.name=name;
self.value=value;
# Updating the owner dictionary
# and assign the object to the dictionary position
self.gradient=None;
self.gradient_obj=None;
self.owner.var[self.name]=self;
self=self.owner.var[self.name];
def __repr__(self):
return "The variable is: name "+self.name+" with value "+str(self.value);
class Operator(DObject):
def __init__(self,owner,name="ope_"):
super().__init__(owner);
self.value=None;
self.value_obj=None;
self.inputs=[];
self.name=name;
self.gradient=None;
self.gradient_obj=None;
def __repr__(self):
return "The name of the operator is, "+self.name ;
class Add(Operator):
def __init__(self,owner,a,b,name="add_"):
super().__init__(owner,name);
self.owner.n_add+=1;
self.name="add_"+str(self.owner.n_add)
self.inputs=[a,b]
# Add the operator to the owner
self.owner.ope[self.name]=self;
self=self.owner.ope[self.name]
def forward(self,a,b):
return a+b;
def forward_obj(self,a,b):
return a+b;
def backward(self,a,b,dout):
return dout,dout;
def backward_obj(self,a,b,dout):
return dout,dout;
class Sub(Operator):
def __init__(self,owner,a,b,name="sub_"):
super().__init__(owner,name);
self.owner.n_sub+=1;
self.name="sub_"+str(self.owner.n_sub)
self.inputs=[a,b]
# Add the operator to the owner
self.owner.ope[self.name]=self;
self=self.owner.ope[self.name]
def forward(self,a,b):
return a-b;
def forward_obj(self,a,b):
return a-b;
def backward(self,a,b,dout):
return dout,-dout;
def backward_obj(self,a,b,dout):
return dout,-dout;
class Multiply(Operator):
def __init__(self,owner,a,b,name="mul_"):
super().__init__(owner,name);
self.owner.n_mul+=1;
self.name="mul_"+str(self.owner.n_mul);
self.inputs=[a,b]
# Add the operator to the owner
self.owner.ope[self.name]=self;
self=self.owner.ope[self.name];
def forward(self,a,b):
return a*b;
def forward_obj(self,a,b):
return a*b;
def backward(self,a,b,dout):
return dout*b,dout*a;
def backward_obj(self,a,b,dout):
return dout*b,dout*a
class Power(Operator):
def __init__(self,owner,a,b,name="pow_"):
super().__init__(owner,name);
self.owner.n_pow+=1;
self.name="pow_"+str(self.owner.n_pow);
self.inputs=[a,b]
# Add the operator to the owner
self.owner.ope[self.name]=self;
self=self.owner.ope[self.name];
def forward(self,a,b):
return np.power(a,b)
def forward_obj(self,a,b):
return self.owner.set_ope(Power,a,b);
def backward(self,a,b,dout):
# mask the values of the logarithm in order to avoid log(0)
#print(a)
#if a<=0:
# loga= -1e20
#else:
loga = np.log(a)
return dout*b*np.power(a,b-1.0),dout*loga*np.power(a,b)
def backward_obj(self,a,b,dout):
return dout*b*self.owner.set_ope(a,b-1.0),dout*self.owner.set_ope();
class Divide(Operator):
def __init__(self,owner,a,b,name="div_"):
super().__init__(owner,name);
self.owner.n_div+=1;
self.name="div_"+str(self.owner.n_div);
self.inputs=[a,b]
# Add the operator to the owner
self.owner.ope[self.name]=self;
self=self.owner.ope[self.name];
def forward(self,a,b):
return a/b;
def forward_obj(self,a,b):
return a/b;
def backward(self,a,b,dout):
return dout/b,-a*dout/b**2.0;
def backward_obj(self,a,b,dout):
return dout/b,-a*dout/b**2.0
class Sine(Operator):
def __init__(self,owner,a,b=None,name="sine_"):
super().__init__(owner,name);
self.owner.n_sine+=1;
self.name="sine_"+str(self.owner.n_sine);
self.inputs=[a]
# Add the operator to the owner
self.owner.ope[self.name]=self;
self=self.owner.ope[self.name];
def forward(self,a):
return np.sin(a);
def backward(self,a,dout):
return [dout*np.cos(a)];
class Cosine(Operator):
def __init__(self,owner,a,b=None,name="cos_"):
super().__init__(owner,name);
self.owner.n_cos+=1;
self.name="cos_"+str(self.owner.n_cos);
self.inputs=[a]
# Add the operator to the owner
self.owner.ope[self.name]=self;
self=self.owner.ope[self.name];
# Here the Cosine
def forward(self,a):
return np.cos(a)
def backward(self,a,dout):
return [-dout*np.sin(a)]
class LogN(Operator):
def __init__(self,owner,a,b=None,name="logn_"):
super().__init__(owner,name);
self.owner.n_logn+=1;
self.name="logn_"+str(self.owner.n_logn);
self.inputs=[a]
# Add the operator to the owner
self.owner.ope[self.name]=self;
self=self.owner.ope[self.name];
def forward(self,a):
return np.log(a);
def backward(self,a,dout):
return [dout/a];
class Tangent(Operator):
# Here the Tangent
pass
| true
|
ba8fd1adb20f102d67a7c517bdb15eb28567eeda
|
Python
|
hashkanna/codeforces
|
/ruf/yandex_algo_qual/rand.py
|
UTF-8
| 265
| 2.890625
| 3
|
[] |
no_license
|
a = set(map(int,input().split()))
n = int(input())
for i in range(n):
l=list(map(int,input().split()))
c=0
r="Unlucky"
for j in l:
if j in a:
c+=1
if c==3:
r="Lucky"
break
print(r)
| true
|
59bc31cba2967b9a506ae6e81d2dd508ad45382f
|
Python
|
kwoneyng/beakjoon
|
/멀리뛰기.py
|
UTF-8
| 193
| 2.828125
| 3
|
[] |
no_license
|
def solution(n):
if n < 2:
return n
answer = 0
dp = [0]*n
dp[0] = 1
dp[1] = 2
for i in range(1,n-1):
dp[i+1] = dp[i-1]+ dp[i]
return dp[n-1]%1234567
| true
|
7b3b3b8b61838eded4b11aac9a55e4a7ac29b269
|
Python
|
nbardy/WordRelationship-finder
|
/graphBuilder.py
|
UTF-8
| 5,911
| 2.671875
| 3
|
[] |
no_license
|
import relationshipFinder
import sys
def __jsHeader(datastore, outputfile):
outputfile.write("""
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js" type="text/javascript"></script>
<script src="highcharts.js" type="text/javascript"></script>
""")
outputfile.write("""
<script>
var chart;
$(document).ready(function() {
var colors = Highcharts.getOptions().colors,
name = 'Common Words';
""")
__outputchartData(datastore, outputfile)
outputfile.write("""
function setChart(name, categories, data, color) {
chart.xAxis[0].setCategories(categories);
chart.series[0].remove();
chart.addSeries({
name: name,
data: data,
color: color || 'white'
});
}
chart = new Highcharts.Chart({
chart: {
renderTo: 'wordGraph',
type: 'column'
},
title: {
text: 'Most Used Words'
},
subtitle: {
text: 'Click the columns to view the words used before and after. Click again to return.'
},
xAxis: {
categories: categories
},
yAxis: {
title: {
text: 'Total percent usage'
}
},
plotOptions: {
column: {
cursor: 'pointer',
point: {
events: {
click: function() {
var drilldown = this.drilldown;
if (drilldown) { // drill down
setChart(drilldown.name, drilldown.categories, drilldown.data, drilldown.color);
} else { // restore
setChart(name, categories, data);
}
}
}
},
dataLabels: {
enabled: true,
color: colors[0],
style: {
fontWeight: 'bold'
},
formatter: function() {
return this.y +'%';
}
}
}
},
tooltip: {
formatter: function() {
var point = this.point,
s = this.x +':<b>'+ this.y +'% usage</b><br/>';
if (point.drilldown) {
s += 'Click to view '+ point.category +' versions';
} else {
s += 'Click to return to common words';
}
return s;
}
},
series: [{
name: name,
data: data,
color: 'white'
}],
exporting: {
enabled: false
}
});
});
</script>
""")
def __printHeader(name, datastore, outputfile):
outputfile.write("""
<html>
<head>
<title>""")
outputfile.write(name)
outputfile.write("</title>")
__jsHeader(datastore, outputfile)
outputfile.write("</head>")
def __printBody(name, outputfile):
outputfile.write("<body>")
outputfile.write('<a href="https://github.com/DivisibleZero/WordRelationship-finder"><img style="position: absolute; top: 0; right: 0; border: 0; z-index:9999" src="https://a248.e.akamai.net/assets.github.com/img/7afbc8b248c68eb468279e8c17986ad46549fb71/687474703a2f2f73332e616d617a6f6e6177732e636f6d2f6769746875622f726962626f6e732f666f726b6d655f72696768745f6461726b626c75655f3132313632312e706e67" alt="Fork me on GitHub"></a>')
outputfile.write("<h1>" + name + "</h1>")
outputfile.write("<div id=\"wordGraph\"></div>")
outputfile.write("<br /><p>Graphing library providing by <a href='http://www.highcharts.com'>highcharts</a></p>")
outputfile.write("</body></html>")
def __outputchartData(datastore, outputfile, size=30):
topWordList = relationshipFinder.getTopWordList(datastore, size)
outputfile.write("var categories = " + str(topWordList) + ';');
total = float(relationshipFinder.wordCount(datastore))
datalist = []
for word in topWordList:
datalist.append(__getDataMap(datastore, word, total))
outputfile.write("var data = " + str(datalist) + ';');
def __getDataMap(datastore, word, total):
datamap = {}
datamap['y'] = 100 * datastore[word]['count']/total
datamap['y'] = truncate(datamap['y'])
datamap['color'] = "#4572A7"
datamap['drilldown'] = {}
subchart = datamap['drilldown']
subchart['name'] = word
subchart['categories'] = relationshipFinder.getTopRelations(datastore, word, 'before', 30)
subchart['data'] = [truncate(100*datastore[word]['before'][relword]/float(datastore[word]['count'])) for relword in subchart['categories']]
subchart['color'] = "#4572A7"
return datamap
def truncate(x):
return float('%.3f'%(x))
def makeChart(outputfilename, datastore):
"""
Makes a chart form the arguments
Output File Name,
datastore
"""
outputfile = open(outputfilename, "w")
__printHeader("Nick's Chat Logs", datastore, outputfile)
__printBody("Nick's Chat Logs", outputfile)
def chartFromPurpleDir(outputfilename, pdirectory):
"""
Outputs a html file for a drilldown bar chart
Takes a first argument as the name of an output file
The second argument is the directory of purple style chat log
"""
datastore = relationshipFinder.processPurpleDir(pdirectory)
makeChart(outputfilename, datastore)
def chartFromTextFile(outputfilename, txtfile):
datastore = relationshipFinder.processTextFile(txtfile)
makeChart(outputfilename, datastore)
def main(argv=None):
if argv==None:
argv = sys.argv
chartFromTextFile(argv[1], argv[2])
if __name__=="__main__":
sys.exit(main())
| true
|
eb04819e6bbb6b184da5c026f46effbbfa529a4a
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03618/s344164292.py
|
UTF-8
| 559
| 2.984375
| 3
|
[] |
no_license
|
import sys
import numpy as np
input = sys.stdin.readline
def main():
s = input().strip()
count = np.zeros((len(s), ord("z") - ord("a") + 1), dtype=int)
s_ = [0] * len(s)
s_[0] = ord(s[0]) - ord("a")
count[0][s_[0]] += 1
for i in range(1, len(s)):
s_[i] = ord(s[i]) - ord("a")
count[i] += count[i-1]
count[i][s_[i]] += 1
ans = 0
for i in range(len(s)-1):
tmp = len(s) - i - 1 - (count[-1, s_[i]] - count[i, s_[i]])
ans += tmp
print(ans+1)
if __name__ == "__main__":
main()
| true
|
b2e58b57332fc2afc11f9d997cb4c814b4aa21aa
|
Python
|
akonopacka/CTF-and-HTB-Writeups
|
/Simple_Programming/check.py
|
UTF-8
| 356
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/python
# Code for CTFLearn challenge Simple Programming
f = open("data.dat", "r")
Lines = f.readlines()
counter = 0
for line in Lines:
print(line.strip())
number_of_zeroes = line.count("0")
number_of_ones = line.count("1")
if number_of_ones % 2 == 0 or number_of_zeroes % 3 == 0:
counter = counter + 1
print(counter)
| true
|
1407315cc8614ec440cd4b66b72dd73d1fcf5cb2
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02679/s928628308.py
|
UTF-8
| 690
| 2.921875
| 3
|
[] |
no_license
|
from operator import itemgetter
from math import gcd
from collections import Counter
N = int(input())
mod = int(1e9+7)
zero = 0
P = {}
for i in range(N):
a,b = list(map(int, input().split()))
if a == 0 and b == 0:
zero += 1
continue
g = gcd(a,b)
a,b = a//g, b//g
if b < 0:
a,b = -a,-b
if b == 0 and a < 0:
a,b = -a,b
#rot90
rot90 = a<=0
if rot90:
a,b = b,-a
#add
if not (abs(a),b) in P:
P[(a,b)] = [0,0]
if not rot90 :
P[(a,b)][0] += 1
else:
P[(a,b)][1] += 1
ans = 1
for k,v in P.items():
#print(k,v)
s,t = v
wk = 1 + pow(2,s,mod) - 1 + pow(2,t,mod) - 1
ans *= wk
ans %= mod
ans += zero
print((ans - 1) % mod)
| true
|
3cf873298996506018d752191af93c6fb13a0cad
|
Python
|
manasa-0982/circle-
|
/print positive number.py
|
UTF-8
| 222
| 3.640625
| 4
|
[] |
no_license
|
l=[]
n=int(input("Enter number of elements: "))
for i in range(1,n+1):
ele=int(input())
l.append(ele)
for i in l:
if(i!=n and i>=0):
print(i,end=",")
elif(i==n and i>=0):
print(i)
| true
|
b668e39f3d0e4993ed8287d46fe7ba96fd0571f5
|
Python
|
MarceloSerra/registerStudents
|
/registerStudents.py
|
UTF-8
| 4,436
| 3.265625
| 3
|
[] |
no_license
|
alunoContador = 0
alunoAprovado = 0
alunoReprovado = 0
alunoExame = 0
alunoMasculinoAprovado = 0;
alunoMasculinoReprovado = 0;
alunoMasculinoExame = 0;
alunoFemininoAprovado = 0;
alunoFemininoReprovado = 0;
alunoFemininoExame = 0;
#SUB-ROTINA COM RELATÓRIO FINAL A SER EXIBIDO
def relatorioAluno():
print('****************************************************************************************')
print('Total de alunos: '+str(alunoContador))
print(str((alunoAprovado/alunoContador)*100)+'% Aprovados | '+str((alunoExame/alunoContador)*100)+' % de Exame | '+str((alunoReprovado/alunoContador)*100)+'% Reprovados')
print('Alunos masculinos aprovados: '+str(alunoMasculinoAprovado)+' | de Exame: '+str(alunoMasculinoExame)+' | e Reprovados: '+str(alunoMasculinoReprovado))
print('Alunas femininas aprovadas: '+str(alunoFemininoAprovado)+' | de Exame: '+str(alunoFemininoExame)+' | e Reprovadas: '+str(alunoFemininoReprovado))
print('****************************************************************************************')
input('Programa Finalizado...')
#QUESTIONA SE O USUÁRIO QUER CADASTRAR E CRIA UM LOOP CASO A OPÇÃO SEJA INVÁLIDA
cadastra = input('Gostaria de Cadastrar? 1=SIM | 2=NÃO | 3=Exibir Relatório ')
if cadastra != '1' and cadastra != '2'and cadastra != '3':
while cadastra != '1' and cadastra != '2' and cadastra != '3':
print('Valor incorreto, tente novamente: ')
cadastra = input('Gostaria de Cadastrar? 1=SIM | 2=NÃO | 3=Exibir Relatório ')
elif cadastra == '2':
input('Programa finalizado...')
elif cadastra== '3':
if alunoContador > 0:
relatorioAluno()
else:
input('Não há relatório válido, cadastre algum aluno antes. Programa finalizado...')
elif cadastra=='1':
while cadastra == '1':
#CADASTRA OS DADOS DO ALUNO E JÁ CALCULA A MÉDIA
alunoNome = input('Digite o nome do aluno: ')
alunoSexo = input('Digite seu sexo: M=Masculino | F=Feminino ')
if alunoSexo != 'M' and alunoSexo != 'F':
while alunoSexo != 'M' and alunoSexo != 'F' and alunoSexo != 'm' and alunoSexo != 'f':
print('Valor incorreto, tente novamente: ')
alunoSexo = input('Digite seu sexo: M=Masculino | F=Feminino ')
alunoNota1 = int(input('Digite a nota 1 do aluno: '))
if alunoNota1 > 10 or alunoNota1 < 0:
while alunoNota1 > 10 or alunoNota1 < 0:
print('Valor incorreto, tente novamente: ')
alunoNota1 = int(input('Digite a nota 1 do aluno: '))
alunoNota2 = int(input('Digite a nota 2 do aluno: '))
if alunoNota2 > 10 or alunoNota2 < 0:
while alunoNota2 > 10 or alunoNota2 < 0:
print('Valor incorreto, tente novamente: ')
alunoNota2 = int(input('Digite a nota 2 do aluno: '))
alunoNota3 = int(input('Digite a nota 3 do aluno: '))
if alunoNota3 > 10 or alunoNota3 < 0:
while alunoNota3 > 10 or alunoNota3 < 0:
print('Valor incorreto, tente novamente: ')
alunoNota3 = int(input('Digite a nota 3 do aluno: '))
alunoMedia = ((alunoNota1+alunoNota2+alunoNota3)/3)
#FILTRAGEM DE MÉDIA GERAL E POR SEXO
if alunoMedia >= 7:
if alunoSexo == 'M' or alunoSexo == 'm':
alunoMasculinoAprovado += 1
alunoAprovado += 1
else:
alunoFemininoAprovado += 1
alunoAprovado += 1
elif alunoMedia < 7 and alunoMedia >= 4:
if alunoSexo == 'M' or alunoSexo == 'm':
alunoMasculinoExame += 1
alunoExame += 1
else:
alunoFemininoExame += 1
alunoExame += 1
else:
if alunoSexo == 'M' or alunoSexo == 'm':
alunoMasculinoReprovado += 1
alunoReprovado += 1
else:
alunoFemininoReprovado += 1
alunoFemininoReprovado += 1
print('A média do '+alunoNome+' é '+str(alunoMedia))
#CONTADOR TOTAL DE ALUNOS
alunoContador += 1
cadastra = input('Gostaria de Cadastrar? 1=SIM | 2=NÃO | 3=Exibir Relatório ')
if cadastra != '1' and cadastra != '2'and cadastra != '3':
while cadastra != '1' and cadastra != '2' and cadastra != '3':
print('Valor incorreto, tente novamente: ')
cadastra = input('Gostaria de Cadastrar? 1=SIM | 2=NÃO | 3=Exibir Relatório ')
elif cadastra == '2':
input('Programa finalizado...')
elif cadastra == '3':
if alunoContador > 0:
relatorioAluno()
else:
input('Não há relatório válido, cadastre algum aluno antes. Programa finalizado...')
| true
|
a7aeb9bb0a8c06674608da23daf82c247afacd0a
|
Python
|
Jowashaha/gMock-Test-Framework-Project-Spring2020
|
/cpp_gen.py
|
UTF-8
| 6,213
| 2.953125
| 3
|
[] |
no_license
|
class CppFile:
def __init__(self):
self.includes = []
self.namespaces = []
self.components = []
def add_include(self, include):
self.includes.append('#include <{}>\n'.format(include))
def add_namespace(self, namespace):
self.namespaces.append('using namespace {};\n'.format(namespace))
def add_component(self, component):
if 'generate' not in dir(component):
msg = '{} does not have attribute generate'.format(component)
raise TypeError(msg)
self.components.append(component.generate())
def generate(self):
result = ''
result += ''.join(self.includes) + '\n'
result += ''.join(self.namespaces) + '\n'
result += ''.join(self.components)
return result
def write_to_file(self, filename):
filename = '{}.cpp'.format(filename.split('.')[0])
with open(filename, 'w') as file:
file.write(self.generate())
# static methods
def convert_params_to_str(params, sep=', '):
return '{}'.format(sep).join(str(x) for x in params)
class StatementGroup:
def __init__(self):
self.statements = []
self.statements_before_specifiers = None
self.public_specifier = None
self.private_specifier = None
def add_statement(self, expression, indent=True, has_semicolon=True):
indent = '\t' if indent else ''
semicolon = ';' if has_semicolon else ''
self.statements.append(
'{}{}{}\n'.format(indent, expression, semicolon))
def preserve_statements_before_specifiers(self):
if self.statements_before_specifiers is None:
self.statements_before_specifiers = self.statements
self.statements = []
def add_public_specifier(self):
self.preserve_statements_before_specifiers()
self.public_specifier = StatementGroup()
self.public_specifier.add_statement('public:', indent=False,
has_semicolon=False)
return self.public_specifier
def add_private_specifier(self):
self.preserve_statements_before_specifiers()
self.private_specifier = StatementGroup()
self.private_specifier.add_statement('private:', indent=False,
has_semicolon=False)
return self.private_specifier
def add_comment(self, comment):
slashes_with_comment = '// ' + comment
self.add_statement(slashes_with_comment, has_semicolon=False)
def add_cout(self, message):
msg_with_quotes = '"{}"'.format(message)
expression = 'cout << {} << endl'.format(msg_with_quotes)
self.add_statement(expression)
def add_function_call(self, name, *params, namespace=None):
if namespace:
name = '{}::{}'.format(namespace, name)
params_as_str = convert_params_to_str(params)
self.add_statement('{}({})'.format(name, params_as_str))
def add_assert_eq(self, val_1, val_2):
self.add_function_call('ASSERT_EQ', val_1, val_2)
def add_nice_mock(self):
self.add_statement('NiceMock<mock_class_name> var_name')
def add_nice_mock(self, class_name):
self.add_statement("NiceMock<" + class_name + "> var_name")
def add_strict_mock(self):
self.add_statement('StrictMock<mock_class_name> var_name')
def add_strict_mock(self, class_name):
self.add_statement("StrictMock<" + class_name + "> var_name")
def generate(self):
if self.statements_before_specifiers:
# do not change self.statements_before_specifiers incase
# the generate() method is called more than once
before_statements = self.statements_before_specifiers
else:
before_statements = ['']
if self.public_specifier:
public_statements = self.public_specifier.generate()
else:
public_statements = ['']
if self.private_specifier:
private_statements = self.private_specifier.generate()
else:
private_statements = ['']
return ''.join(
[*before_statements, *public_statements, *private_statements,
*self.statements])
class CodeBlock(StatementGroup):
def __init__(self, has_semicolon=False):
StatementGroup.__init__(self)
self.has_semicolon = has_semicolon
def generate(self):
semicolon = ';' if self.has_semicolon else ''
all_statements = StatementGroup.generate(self)
return '{{\n{}}}{}\n\n'.format(all_statements, semicolon)
class CppClass(CodeBlock):
def __init__(self, name, base_class=None):
CodeBlock.__init__(self, has_semicolon=True)
self.header = self._generate_header(name, base_class)
def _generate_header(self, name, base_class):
if base_class:
derivation_list = ' : public {}'.format(base_class)
else:
derivation_list = ''
return 'class {}{} '.format(name, derivation_list)
def generate(self):
return self.header + CodeBlock.generate(self)
class Function(CodeBlock):
def __init__(self, return_type, name, *params):
CodeBlock.__init__(self)
self.header = self._generate_header(return_type, name, *params)
def _generate_header(self, return_type, name, *params):
params_as_str = convert_params_to_str(params)
return '{} {}({}) '.format(return_type, name, params_as_str)
def add_return(self, expression):
return_and_expr = 'return {}'.format(expression)
self.add_statement(return_and_expr)
def add_run_all_tests_and_return(self):
self.add_return('RUN_ALL_TESTS()')
def generate(self):
return self.header + CodeBlock.generate(self)
class MacroFunction(CodeBlock):
def __init__(self, name, *params):
CodeBlock.__init__(self)
self.header = self._generate_header(name, *params)
def _generate_header(self, name, *params):
params_as_str = convert_params_to_str(params)
return '{}({}) '.format(name, params_as_str)
def generate(self):
return self.header + CodeBlock.generate(self)
| true
|
3313937b10ac5b9f22182ec308a4a2872f02b236
|
Python
|
niveditarufus/SMAI-Homeworks
|
/Kmeans/extended_kmeans.py
|
UTF-8
| 1,762
| 2.96875
| 3
|
[] |
no_license
|
import numpy as np
import random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from scipy.spatial import distance
import math
def shortest_distance(x1, y1, a, b=1):
d = abs((-a * x1 + b * y1 )) / (math.sqrt(a * a + b * b))
return d
mean1 = [1,1]
cov1 = [[2,1],[1,3]]
mean2 = [3,8]
cov2 = [[2,1],[1,3]]
x1 = np.random.multivariate_normal(mean1, cov1, 500)
x2 = np.random.multivariate_normal(mean2, cov2, 500)
for j in xrange(1):
print(j)
y1 = []
y2 = []
C1 = np.cov(x1.T)
C2 = np.cov(x2.T)
eigenValues, eigenVectors = np.linalg.eig(C1)
idx = eigenValues.argsort()[::-1]
e1 = eigenValues[idx][0]
v1 = eigenVectors[:,idx][0]
m1 = v1[1]/v1[0]
eigenValues, eigenVectors = np.linalg.eig(C2)
idx = eigenValues.argsort()[::-1]
e2 = eigenValues[idx][0]
v2 = eigenVectors[:,idx][0]
m2 = v2[1]/v2[0]
a = np.linspace(-5,5,30)
b1 = m1*a
b2 = m2*a
for i in range(len(x1)):
d1 = shortest_distance(x1[i][0], x1[i][1],m1)
d2 = shortest_distance(x1[i][0], x1[i][1],m2)
if d1<d2:
y1.append(x1[i])
else:
y2.append(x1[i])
for i in range(len(x2)):
d1 = shortest_distance(x2[i][0], x2[i][1],m1)
d2 = shortest_distance(x2[i][0], x2[i][1],m2)
if d1<d2:
y1.append(x1[i])
else:
y2.append(x1[i])
y1 = np.asarray(y1)
y2 = np.asarray(y2)
print(y1.shape, y2.shape)
if(np.array_equal(x1,y1)):
break
else:
x1 = y1
x2 = y2
plt.plot(a,b1,label = "Eigen vector of cluster 1 after convergence")
plt.plot(a,b2,label = "Eigen vector of cluster 2 after convergence")
plt.scatter(x1[:, 0], x1[:, 1], c= 'b',marker = '^',label = "cluster 1 after convergence")
plt.scatter(x2[:, 0], x2[:, 1], c= 'g', label = "cluster 2 after convergence")
plt.legend()
plt.show()
| true
|
7780aec52bdd8699fcc6a5f6d593b6c959e54af2
|
Python
|
AmirBitaraf/CodeforcesBestComments
|
/main.py
|
UTF-8
| 1,565
| 2.703125
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup as Soup
import requests
from sortedcontainers import SortedSet
import threading,sys
start = 1
end = 15000
num = 300
ss = SortedSet(key=lambda a:-a[0])
ws = SortedSet()
_threads = 7
thread = []
def check_list():
while True:
if len(ss) > num:
del(ss[num:])
if len(ws) > num:
del(ws[num:])
time.sleep(5)
def call(start,end,cnt,ss,ws):
for page in range(start,end+1,cnt):
sys.stdout.flush()
url = "http://codeforces.com/blog/entry/%s" % page
r = requests.get(url,allow_redirects = False)
if r.status_code != 200:
print("Blog Entry %s Not Found" % page)
else:
soup = Soup(r.text)
for comment in soup.find_all("div",{"class":"comment"}):
name = comment.find("a",{"class":"rated-user"}).text
rating = int(comment.find("span",{"class","commentRating"}).text)
_id = int(comment.find("table").attrs["commentid"])
ws.add((rating,_id,page,name))
ss.add((rating,_id,page,name))
checker = threading.Thread(target=check_list)
for i in range(_threads):
thread.append(threading.Thread(target=call,args=(start+i,end,_threads,ss,ws)))
thread[-1].start()
for t in thread:
t.join()
print("BEST")
for item in ss:
print("[user:%s] %s http://codeforces.com/blog/entry/%s#comment-%s" % (item[3],item[0],item[2],item[1]))
print("WORST")
for item in ws:
print("[user:%s] %s http://codeforces.com/blog/entry/%s#comment-%s" % (item[3],item[0],item[2],item[1]))
| true
|
153c94624945370384d8e79b84432395e6efb866
|
Python
|
nhs04047/opencv_python_study
|
/P14/threshold_otsu.py
|
UTF-8
| 471
| 2.84375
| 3
|
[] |
no_license
|
import sys
import cv2
src = cv2.imread('image.jpg', cv2.IMREAD_GRAYSCALE)
if src is None:
print('Image load failed')
sys.exit()
# or 연산자로 OTSU 인자 입력
# 반환값 2개, 1개는 OTSU 임계값(실수형), 1개는 dst영상
# cv2.THRESH_OTSU 만 입력해도 됌
th, dst = cv2.threshold(src, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
print("otsu's threshold:", th)
cv2.imshow('src', src)
cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyWindow()
| true
|
eec432be5e8567773e717138fb0c003a09d3e8da
|
Python
|
AlexYangLong/SimpleSpiders
|
/QSBK-spider/qsbk-spider.py
|
UTF-8
| 3,253
| 3.015625
| 3
|
[] |
no_license
|
from user_agent import get_random_useragent
import requests
from bs4 import BeautifulSoup
class HtmlDownloader(object):
"""下载器"""
def download(self, session, url, referer=None):
print(session.cookies.get_dict())
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': 'www.qiushibaike.com',
'User-Agent': get_random_useragent()
}
if referer:
headers['Referer'] = referer
try:
resp = session.get(url, headers=headers)
return resp.text
except BaseException as e:
print('请求失败!爬取失败!', e)
return None
class HtmlParser(object):
"""解析器"""
def get_content_urls(self, server_url, html):
"""
根据页面解析出每个joke的url
:param server_url: server_url
:param html: HTML string
:return: list
"""
urls = []
soup = BeautifulSoup(html, 'html.parser')
jokes_as = soup.find_all('a', class_='contentHerf')
for a in jokes_as:
urls.append(server_url + a.get('href'))
return urls
def get_data_content(self, html):
"""
根据页面解析出joke的内容
:param html: HTML string
:return: content string
"""
soup = BeautifulSoup(html, 'html.parser')
joke_content = soup.find('div', class_='content') if soup.find('div', class_='content') \
else soup.find('div', class_='content-text')
if joke_content:
return joke_content.get_text()
else:
return None
class SpiderMain(object):
"""主程序"""
def __init__(self, target_url, server_url, page=1):
if page == 1:
self.target_url = target_url
else:
self.target_url = target_url + 'page/' + str(page) + '/'
self.server_url = server_url
self.content_urls = []
self.downloader = HtmlDownloader()
self.parser = HtmlParser()
def craw(self):
session = requests.session()
session.get(self.server_url)
print(session.cookies.get_dict())
html_content = self.downloader.download(session, self.target_url, self.target_url)
if html_content:
self.content_urls = self.parser.get_content_urls(self.server_url, html_content)
print('urls count:', len(self.content_urls))
for i, url in enumerate(self.content_urls):
print('craw %d: %s' % ((i + 1), url))
html_content = self.downloader.download(session, url, self.target_url)
data_content = self.parser.get_data_content(html_content)
print(data_content)
else:
print('爬去失败!')
def main():
target_url = 'https://www.qiushibaike.com/text/'
page = 1
server_url = 'https://www.qiushibaike.com'
spider = SpiderMain(target_url, server_url, page)
spider.craw()
if __name__ == '__main__':
main()
| true
|
dda6f4ceacce040968033cc8404c2024957aa15e
|
Python
|
abolisetti/Project-Euler-Solutions
|
/ProjectEuler9.py
|
UTF-8
| 180
| 3.109375
| 3
|
[] |
no_license
|
#Project Euler 9
for a in range(1, 750):
for b in range(1, 750):
c=(a**2 + b**2)**0.5
if a+b+c==1000:
print(a*b*c)
break
#break
| true
|
be5dbc4e864ac2ff700e990bcd45b2f54ba97ed9
|
Python
|
satire6/Anesidora
|
/toontown/src/char/LocalChar.py
|
UTF-8
| 782
| 2.578125
| 3
|
[] |
no_license
|
"""LocalChar module: contains the LocalChar class"""
import DistributedChar
from otp.avatar import LocalAvatar
from otp.chat import ChatManager
import Char
class LocalChar(DistributedChar.DistributedChar, LocalAvatar.LocalAvatar):
"""LocalChar class:"""
def __init__(self, cr):
"""
Local char constructor
"""
try:
self.LocalChar_initialized
except:
self.LocalChar_initialized = 1
DistributedChar.DistributedChar.__init__(self, cr)
LocalAvatar.LocalAvatar.__init__(self, cr)
# Is this redundant with LocalAvatar: ---> self.chatMgr = ChatManager.ChatManager()
self.setNameVisible(0)
# Init the avatar sounds
Char.initializeDialogue()
| true
|
89426f19634d7c133cb81159159bb3ba4f54f3a9
|
Python
|
PyProProgramming/Password_manager
|
/src/db.py
|
UTF-8
| 931
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
# Imports
import os
import sqlite3
# Creating the database file if not already created
if not os.path.exists("data/data.db"):
with open("data/data.db", "w") as file:
pass
# Connecting to the database
conn = sqlite3.connect("data/data.db")
# Getting the cursor object
cursor = conn.cursor()
# Creating the table if it already hasn't been made
cursor.execute("""CREATE TABLE IF NOT EXISTS everything(
password text,
app_url text,
app_name text,
email text,
username text
)""")
# Function to add a password
def add_to_db(username, email, password, app_url, app_name):
cursor.execute("INSERT INTO everything(password, app_url, app_name, email, username) VALUES (?, ?, ?, ?, ?)",
(password, app_url, app_name, email, username))
conn.commit()
# Function to list all password
def list_all():
cursor.execute("SELECT * FROM everything")
return cursor.fetchall()
| true
|
81866f1bdeb96b167537b7447a3c97dc600250c7
|
Python
|
qqiang2006/Stock
|
/class_search_stock.py
|
UTF-8
| 3,837
| 2.75
| 3
|
[] |
no_license
|
#coding=utf8
__author__ = 'lihuixian'
import json, urllib
from urllib import urlencode
import sys
import io
import string
import sqlite3
reload(sys)
sys.setdefaultencoding("utf8")
#股票查询函数
class Stock:
def __init__(self):
self.min_pic=''
self.back_all = ''
self.appkey="×××××××××××"#appkey,可以去聚合数据网注册
self.url=url = "http://web.juhe.cn:8080/finance/stock/hs"
#修改程序调用API的注册参数函数
def modify_appkey_url(self,value1,value2):
self.appkey=value1
self.url=value2
def search_stock(self,ID):
#判断输入格式为数字时
# 自动判定股票代码属于哪个交易所
if ID[0] in ['6','0','3','9']:
ret =int(ID[0])
if ret in [0, 3]:
stock_num = "%s%s" % ("sz", ID)
else:
stock_num = "%s%s" % ("sh", ID)
#判断输入为中文时
else:
#打开股市列表数据库
data_base_stock = sqlite3.connect('D:\Data base\stock.db')
cu = data_base_stock.cursor()
#模糊查询股市ID
cu.execute("select stock_ID from stock_list where stock_name like '%"+ID+"%'")
stock_num_list = cu.fetchall()
stock_num =list(stock_num_list)[0][0]
params = {
"gid" : stock_num, #股票编号,上海股市以sh开头,深圳股市以sz开头如:sh601009
"key" : self.appkey, #APP Key
}
params = urlencode(params)
f = urllib.urlopen("%s?%s" % (self.url, params))
content = f.read()
res = json.loads(content)
if res:
error_code = res["error_code"]
if error_code == 0:
#成功请求
first_result = res["result"]
second_result = first_result[0]
mid_result =second_result["data"]
stock_name =mid_result["name"]
#股票涨跌幅
rate = mid_result["increPer"]
price_change=mid_result["increase"]
pic=second_result["gopicture"]
#分时曲线图
self.min_pic=pic["minurl"]
#股票名称
back_name= "股票名称为: " +stock_name
#股票价格
final_result =mid_result["nowPri"]
back_price="股票最新价格为: " + final_result
if string.atof(rate) > 0:
back_rate= "股票涨幅为:" + rate
else:
back_rate="股票跌幅为: " + rate
if string.atof(price_change) >0:
back_change="股票升值:" + price_change + "元"
else:
back_change="股票贬值:" + price_change + "元"
#股票查询结果
self .back_all=back_name + "\r\n""\r\n" +back_price +"\r\n""\r\n" +back_rate +"\r\n""\r\n" +back_change
else:
self.back_all= "%s:%s" % (res["error_code"],res["reason"])
else:
self.back_all="request api error"
return self.back_all
if __name__ == '__main__':
stock_A=Stock()
# ID=raw_input("请输入股票代码:")
result=stock_A.search_stock("600518")
print "----------------------------"
print(stock_A.back_all)
print "----------------------------"
# result = stock_A.search_stock("中国建筑")
# # print stock_A.min_pic
# print(stock_A.back_all)
# result = stock_A.search_stock("华天科技")
# print "----------------------------"
# print(stock_A.back_all)
# print "----------------------------"
| true
|
b4ba39f0e29ea2fa6838bd2da2f06b9144cf304b
|
Python
|
Ace314159/AoC2020
|
/Day11.py
|
UTF-8
| 2,713
| 3.1875
| 3
|
[] |
no_license
|
def one(state):
while True:
new_state = [line.copy() for line in state]
for y, line in enumerate(state):
for x, seat in enumerate(line):
if seat == '.':
continue
adj = []
for y_diff in range(-1, 1 + 1):
for x_diff in range(-1, 1 + 1):
if x_diff == 0 and y_diff == 0:
continue
check_x = x + x_diff
check_y = y + y_diff
if check_x >= 0 and check_x < len(line) and check_y >= 0 and check_y < len(state):
adj.append(state[check_y][check_x])
if seat == 'L' and all((s in ['L', '.'] for s in adj)):
new_state[y][x] = '#'
elif adj.count('#') >= 4:
new_state[y][x] = 'L'
if new_state == state:
break
state = new_state
ans = 0
for line in state:
for seat in line:
if seat == '#':
ans += 1
return ans
def two(state):
while True:
new_state = [line.copy() for line in state]
for y, line in enumerate(state):
for x, seat in enumerate(line):
if seat == '.':
continue
adj = []
for y_diff in range(-1, 1 + 1):
for x_diff in range(-1, 1 + 1):
if x_diff == 0 and y_diff == 0:
continue
check_x = x + x_diff
check_y = y + y_diff
while True:
if check_x < 0 or check_x >= len(line) or check_y < 0 or check_y >= len(state):
break
if state[check_y][check_x] == '.':
check_x += x_diff
check_y += y_diff
else:
adj.append(state[check_y][check_x])
break
if seat == 'L' and all((s in ['L', '.'] for s in adj)):
new_state[y][x] = '#'
elif adj.count('#') >= 5:
new_state[y][x] = 'L'
if new_state == state:
break
state = new_state
ans = 0
for line in state:
for seat in line:
if seat == '#':
ans += 1
return ans
if __name__ == '__main__':
with open('inputs/11.txt') as f:
lines = [list(line.strip()) for line in f.readlines()]
print(one(lines))
print(two(lines))
| true
|
000bfdc7ad83c7e365aa5c194183f6249b3a8836
|
Python
|
owen-saunders/met-office-hackathon
|
/LAT_LON_to_Nearby_Cyclones.py
|
UTF-8
| 1,901
| 2.640625
| 3
|
[] |
no_license
|
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
import folium
from IPython.display import HTML, display
from itertools import cycle
from geopy.distance import great_circle
import argparse
# Get data in portions to not overload memory
# fields = {'SID', 'YEAR', 'BASIN', 'SUBBASIN', 'NAME', 'LAT', 'LON', 'WMO_WIND' 'STORM_SPEED', 'STORM_DIR'} # -- {0,1,2,3,4,8,9,10,161,162}
df_chunks = pd.read_csv('./ibtracs.ALL.list.v04r00.csv', chunksize=2000, usecols=[0,1,2,3,4,7,8,9,10,161,162], engine='python')
mylist = []
for chunk in df_chunks:
mylist.append(chunk)
df = pd.concat(mylist, axis= 0)
# Conversion to epsg:5387 not needed
crs = {'init':'epsg:4326'}
geometry = [Point(x,y) for x,y in zip(df['LON'], df['LAT'])]
geo_df_nearby = gpd.GeoDataFrame(df, crs = crs, geometry = geometry)
parser = argparse.ArgumentParser()
parser.add_argument('lats', help="Provides a Latitude")
parser.add_argument('lons', help="Provides a Longitude")
#### USER INPUT HERE - REQUIRES LAT and LON
args = parser.parse_args()
lat = args.lats
lon = args.lons
def within_distance_of(lat,lon,distance):
nearby = []
for SID,LON,LAT in zip(geo_df_nearby["SID"],geo_df_nearby["LON"],geo_df_nearby["LAT"]):
if great_circle((lat,lon),(LAT,LON)).km < distance: nearby.append((SID,LAT,LON))
return nearby
nearby = within_distance_of(lat,lon,100)
MapNearby = folium.Map(location=[lat,lon])
colors = cycle(["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "purple", "red", "silver", "teal", "yellow"])
SIDColors = {}
for SID,LAT,LON in nearby:
if SID not in SIDColors:
SIDColors[SID] = next(colors)
folium.vector_layers.CircleMarker(location=[LAT,LON], color=SIDColors[SID], popup=str(SID), radius=8, fill_color='blue').add_to(MapNearby)
display(MapNearby)
| true
|
33182d4b1ac794063c3b7e4d1c822cecb9f17b77
|
Python
|
kubrakosee/python-
|
/Temel Veri Yapıları ve Objeler/HİPOTENÜS_BULMA.py
|
UTF-8
| 402
| 3.09375
| 3
|
[] |
no_license
|
"""
KULLANICIDAN BİR DİK ÜÇGENİN DİK OLAN İKİ KENARINI(A,B)ALIN
VE HİPOTENÜS UZUNLUĞUNU BULMAYA ÇALIŞIN
"""
birinci_kenar=int(input("birinci kenarını giriniz:"))
ikinci_kenar=int(input("ikinci kenarı giriniz:"))
hipotonüs=(birinci_kenar **2+ikinci_kenar**2)**0.5
print("hipotenüs:",hipotonüs)
| true
|
14c957ed93ea6d4ceb3e5803e167c2475db423f3
|
Python
|
Leputa/Leetcode
|
/python/交换前缀.py
|
UTF-8
| 501
| 3.265625
| 3
|
[] |
no_license
|
def exchange_prefix(strs, n, m):
ret = 1
list_set = [[] for _ in range(m)]
for i in range(n):
for j in range(m):
list_set[j].append(strs[i][j])
for j in range(m):
ret = (ret * len(set(list_set[j]))) % 1000000007
return ret
if __name__ == "__main__":
n, m = list(map(int, input().split(" ")))
strs = []
for i in range(n):
strs.append(input())
print(exchange_prefix(strs, n, m))
#print(exchange_prefix(["ABC", "DEF"], 2, 3))
| true
|
3071ca306cb112c0e216a9109fdeddca630921ee
|
Python
|
kwantam/GooSig
|
/libGooPy/rsa.py
|
UTF-8
| 4,091
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/python
#
# (C) 2018 Dan Boneh, Riad S. Wahby <rsw@cs.stanford.edu>
import hashlib
import sys
import libGooPy.primes as lprimes
import libGooPy.prng as lprng
import libGooPy.util as lutil
# RSA-OAEP enc/dec using SHA-256
# NOTE this is a non-standard implementation that you should probably not use except for benchmarking
class RSAPubKey(object):
HASHLEN = 32 # SHA256 digest size
def __init__(self, n, e):
self.n = n
self.e = e
n_octets = (self.n.bit_length() + 7) // 8
if n_octets < 128:
raise ValueError("RSAKey does not support <1024-bit moduli")
self.max_mlen = n_octets - 2 * self.HASHLEN - 2
self.dblen = n_octets - 1 - self.HASHLEN
self.dbmask = (1 << (8 * self.dblen)) - 1
self.lhash = int(hashlib.sha256(b"libGooPy_RSA_OAEP_LABEL").hexdigest(), 16) << (8 * (self.dblen - self.HASHLEN))
@staticmethod
def mask_gen(seed, length):
return lprng.HashPRNG.new(seed).getrandbits(8 * length)
def encrypt(self, m):
mlen = (m.bit_length() + 7) // 8 # round up to some number of bytes
if mlen > self.max_mlen:
raise ValueError("message is too long")
data = self.lhash | (1 << (8 * mlen)) | m
seed = lutil.rand.getrandbits(8 * self.HASHLEN)
dbMask = self.mask_gen(seed, self.dblen)
maskedDb = dbMask ^ data
sMask = self.mask_gen(maskedDb, self.HASHLEN)
maskedSeed = sMask ^ seed
enc_msg = (maskedSeed << (8 * self.dblen)) | maskedDb
return pow(enc_msg, self.e, self.n)
class RSAKey(RSAPubKey):
def __init__(self, p, q):
self.p = p
self.q = q
assert p != q
assert lprimes.is_prime(p)
assert lprimes.is_prime(q)
# find a decryption exponent (must be coprime to carmichael(p * q) == lcm(p-1, q-1)
lam = (p - 1) * (q - 1) // lutil.gcd(p - 1, q - 1)
for e in lprimes.primes_skip(1):
if e > 1000:
raise RuntimeError("could find a suitable exponent!")
d = lutil.invert_modp(e, lam)
if d is not None:
break
self.d = d
# now that we have n and e, initialize the pubkey
RSAPubKey.__init__(self, p * q, e) # pylint: disable=undefined-loop-variable
assert (self.d * self.e) % lam == 1
def get_public_key(self):
return RSAPubKey(self.n, self.e)
def decrypt(self, c):
enc_msg = pow(c, self.d, self.n)
maskedDb = enc_msg & self.dbmask
maskedSeed = enc_msg >> (8 * self.dblen)
if maskedSeed.bit_length() > 8 * self.HASHLEN:
raise ValueError("invalid ciphertext")
sMask = self.mask_gen(maskedDb, self.HASHLEN)
seed = maskedSeed ^ sMask
dbMask = self.mask_gen(seed, self.dblen)
data = dbMask ^ maskedDb
data ^= self.lhash
if data >> (8 * (self.dblen - self.HASHLEN)) != 0:
raise ValueError("invalid ciphertext")
dlen = (7 + data.bit_length()) // 8
data ^= 1 << (8 * (dlen - 1))
if data >> (8 * (dlen - 1)) != 0:
raise ValueError("invalid padding")
if data.bit_length() > 8 * self.max_mlen:
raise ValueError("invalid message")
return data
def main(nreps):
import libGooPy.test_util as tu # pylint: disable=bad-option-value,import-outside-toplevel
def test_endec():
"RSA endec,RSA2048,RSA4096"
(p1, q1) = lutil.rand.sample(tu.primes_1024, 2)
(p2, q2) = lutil.rand.sample(tu.primes_2048, 2)
r1 = RSAKey(p1, q1)
rp1 = r1.get_public_key()
m1 = lutil.rand.getrandbits(512)
c1 = rp1.encrypt(m1)
d1 = r1.decrypt(c1)
r2 = RSAKey(p2, q2)
rp2 = r2.get_public_key()
m2 = lutil.rand.getrandbits(512)
c2 = rp2.encrypt(m2)
d2 = r2.decrypt(c2)
return (m1 == d1, m2 == d2)
tu.run_all_tests(nreps, "RSA", test_endec)
if __name__ == "__main__":
try:
nr = int(sys.argv[1])
except:
nr = 32
main(nr)
| true
|
67d4839eb36e5a1d6b164da9e2cbfd28087bec59
|
Python
|
kokoa-naverAIboostcamp/algorithm
|
/Algorithm/Ryan/BOJ19637.py
|
UTF-8
| 919
| 3.4375
| 3
|
[] |
no_license
|
# 19637번: IF문 좀 대신 써줘
# 방법 1
import sys
def boundary(a,x):
lo,hi=0,len(a)
while lo < hi:
mid=(lo+hi)//2
if x <= a[mid][0] : hi=mid
else: lo = mid +1
return lo
N,M=map(int,sys.stdin.readline().split())
style={}
for _ in range(N):
name,value=sys.stdin.readline().strip().split()
value=int(value)
if value not in style:
style[int(value)]=name
# sort
sort_style=sorted(style.items())
# search
for _ in range(M):
power=int(sys.stdin.readline())
print(sort_style[boundary(sort_style,power)][1])
# 방법 2
import sys
from bisect import bisect_left as bl
N,M=map(int,sys.stdin.readline().split())
styleN,styleV=[],[]
for _ in range(N):
name,value=sys.stdin.readline().strip().split()
styleN.append(name)
styleV.append(int(value))
# search
for _ in range(M):
power=int(sys.stdin.readline())
print(styleN[bl(styleV,power)])
| true
|
4f7e2163d840e808276bc390476c50f558503c76
|
Python
|
shivam-g10/PN-Sequence-based-encryption-using-Raspberry-Pi
|
/pn_rx.py
|
UTF-8
| 2,424
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
import time
import operator as op
import RPi.GPIO as GPIO
inputSignal= []
#Input helper functions
def getCode(n): #Generates code for the numbers
code = []
if(n==0):
code = [1,0,0,0,0]
elif(n==1):
code = [1,0,0,0,1]
elif(n==2):
code = [1,0,0,1,0]
elif(n==3):
code = [1,0,0,1,1]
elif(n==4):
code = [1,0,1,0,0]
elif(n==5):
code = [1,0,1,0,1]
elif(n==6):
code = [1,0,1,1,0]
elif(n==7):
code = [1,0,1,1,1]
elif(n==8):
code = [1,1,0,0,0]
elif(n==9):
code = [1,1,0,0,1]
return code
def getPN(init): #Generates PN Sequence
n = 5
l = pow(2,n)-1
pn = []
i = 0
while(i<l+1):
t = op.xor(init[0],init[4])
j=4
pn.append(init[4])
while(j>0):
init[j] = init[j-1]
j=j-1
init[0] = t
i=i+1
return pn
def split(inputSignal):
sig1 = inputSignal[0:len(inputSignal)-4]
inproc0 = sig1[len(sig1)/2+2:]
inproc1 = sig1[2:len(sig1)/2]
i = 0
inPN0 = []
inPN1 = []
while(i<len(inproc0)):
if(i%2!=0):
inPN0.append(inproc0[i])
inPN1.append(inproc1[i])
i=i+1
inN0 = recoverPN(inPN0)
inN1 = recoverPN(inPN1)
Sig = 10*int(inN1) + int(inN0)
return Sig
def recoverPN(inPN):
flag = 0
i = 0
while(flag ==0):
count = 0
for x, y in zip(inPN, getPN(getCode(i))):
count = count + 1
if x != y:
break
if(count==32):
return i
i = i + 1
if(i==10):
print "invalid sequence"
#Main
def processInput(check): #Get input
print "test"
print "Check : ", check
global inFlag
if(inFlag == 0):
inputSig = []
while True:
print "GPIO.input(7) : ",GPIO.input(7)
if(GPIO.input(7) == True):
inputSig.append(0)
else:
inputSig.append(1)
time.sleep(0.05)
if(len(inputSig)==136):
print "Input complete. Signal length : ",len(inputSig)
break
global inputSignal
inputSignal = inputSig
print "inputSignal : ",inputSig,len(inputSig)
inFlag = 1
inFlag = 0
#Setting up hardware. GPIO Pins
GPIO.setmode(GPIO.BOARD)
#Initialize pin 7 as input with pull down resistor
GPIO.setup(7,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)
try:
print "GPIO.input(7) : ",GPIO.input(7)
while(inFlag==0):
if(GPIO.input(7)==1):
processInput(1)
random =1
while(inFlag != 2):
random =1
if(inFlag==1):
recoveredSignal = split(inputSignal)
print "Recovered Signal : ",recoveredSignal
inFlag = 2
GPIO.cleanup() #Resets GPIO pins of RPi when complete
except KeyboardInterrupt:
GPIO.cleanup() #Resets GPIO pins of RPi when force close
| true
|
ea9225b0ea3d27655dcd2827cde34ebe7d80cbc4
|
Python
|
ericma1999/algo-coursework
|
/dijkstra/implementation/BFS.py
|
UTF-8
| 1,481
| 3.4375
| 3
|
[] |
no_license
|
class Queue():
def __init__(self):
self.queue = []
def isEmpty(self):
return self.queue == []
def enqueue(self, item):
self.queue.append(item)
def dequeue(self):
return self.queue.pop(0)
class BFS:
def __init__(self, G, s):
self.starting = s
self.distToSource = [-1 for v in range(0, G.V)]
self.edgeTo = [-1 for v in range(0, G.V)]
self.__bfs(G, s)
def __bfs(self, G, s):
q = Queue()
q.enqueue(s)
self.distToSource[s] = 0
while(not q.isEmpty()):
v = q.dequeue()
for w in G.adjacencies(v):
index = w.otherEndPoint(v)
if (self.distToSource[index] == -1):
q.enqueue(index)
self.distToSource[index] = self.distToSource[v] + 1
self.edgeTo[index] = v
def hasPathTo(self, v):
return self.distToSource[v] != -1
## BFS calculates shortest path so this is the shortest path
def pathTo(self, v):
if (not self.hasPathTo(v)): return None
path = []
x = v
while (x != self.starting):
path.append(x)
x = self.edgeTo[x]
path.append(self.starting)
return path
## BFS calculates shortest path so this is the shortest length
def lengthTo(self, v):
if (not self.hasPathTo(v)): return None
return self.distToSource[v]
| true
|
4047579f1ab6f9bd3d99b104fdf7553e1dc62955
|
Python
|
canassa/falcor-python
|
/tests/path_syntax/test_parser.py
|
UTF-8
| 1,441
| 3.296875
| 3
|
[] |
no_license
|
"""
Based on:
https://github.com/Netflix/falcor-path-syntax/blob/master/test/parse-tree/parser.spec.js
"""
from falcor.path_syntax import parser
def test_parse_a_simple_key_string():
out = parser('one.two.three')
assert out == ['one', 'two', 'three']
def test_parse_a_string_with_indexers():
out = parser('one[0]')
assert out == ['one', 0]
def test_parse_a_string_with_indexers_followed_by_dot_separators():
out = parser('one[0].oneMore')
assert out == ['one', 0, 'oneMore']
def test_parse_a_string_with_a_range():
out = parser('one[0..5].oneMore')
assert out == ['one', {'from': 0, 'to': 5}, 'oneMore']
def test_parse_a_string_with_a_set_of_tokens():
out = parser('one["test", \'test2\'].oneMore')
assert out == ['one', ['test', 'test2'], 'oneMore']
def test_treat_07_as_7():
out = parser('one[07, 0001].oneMore')
assert out == ['one', [7, 1], 'oneMore']
def test_parse_out_a_range():
out = parser('one[0..1].oneMore')
assert out == ['one', {'from': 0, 'to': 1}, 'oneMore']
def test_parse_out_multiple_ranges():
out = parser('one[0..1,3..4].oneMore')
assert out == ['one', [{'from': 0, 'to': 1}, {'from': 3, 'to': 4}], 'oneMore']
def test_parse_paths_with_newlines_and_whitespace_between_indexer_keys():
out = parser("""one[
0, 1, 2, 3, 4,
5, 6, 7, 8, 9].oneMore""")
assert out == ['one', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'oneMore']
| true
|
8194bf5980cfe16bae604972d67dfc3fe8280427
|
Python
|
Ummamali/photonsBackend
|
/api.py
|
UTF-8
| 5,306
| 2.515625
| 3
|
[] |
no_license
|
from donorsUtils import get_updated_donors
from flask import Flask, request
from flask.json import jsonify
from utils import get_data_from_file, save_data_to_file, good_response, bad_response, pathString_to_contrObject
from flask_cors import cross_origin
from datetime import datetime, date
from time import sleep
app = Flask(__name__)
@app.route('/contributors')
@cross_origin()
def get_contributors():
contributors = get_data_from_file('./contributors.json')
return good_response(contributors)
@app.route('/thismonth')
@cross_origin()
def get_status():
sleep(0.5)
contr_data = get_data_from_file('./contributors.json')
recents = get_data_from_file('./recents.json')
for key in recents:
latest = key
latest_month = [int(item) for item in latest.split('/')]
current_month = [date.today().month, date.today().year]
contr_total = {key: 0 for key in contr_data}
# this is to check if the current month is not in the database
if((latest_month[0] < current_month[0]) or (latest_month[1] < latest_month[1])):
latest = f'{current_month[0]}/{current_month[1]}'
recents[latest] = []
save_data_to_file('./recents.json', recents)
for path_str in recents[latest]:
[contribution_object, contr_name, _] = pathString_to_contrObject(
contr_data, path_str)
contr_total[contr_name] += contribution_object['amount']
return good_response(contr_total)
@app.route('/recents')
@cross_origin()
def get_recents():
index = int(request.args.get('index'))
recentsObj = get_data_from_file('./recents.json')
recents = []
for recList in recentsObj.values():
for item in recList:
recents.append(item)
length = len(recents)
end_index = length - index
if(end_index <= 0):
return good_response(payload={'list': [], 'moreAvailable': False}, msg='Index is probably out of range')
else:
start_index = end_index - 10
if (start_index < 0):
start_index = 0
recents = recents[start_index: end_index]
recents.reverse()
more_available = start_index != 0
return good_response({'list': recents, 'moreAvailable': more_available})
@app.route('/new/contribution', methods=('POST',))
@cross_origin()
def add_contribution():
"""
API Reference
Request Object:
{
*userName: String,
*contObject: {*stamp: int, *amount: int}
}
"""
reqObj = request.get_json()
recents = get_data_from_file('./recents.json')
contributors = get_data_from_file('./contributors.json')
sleep(0.5)
for key in recents:
latest = key
latest_month = [int(item) for item in latest.split('/')]
current_month = [date.today().month, date.today().year]
# this is to check if the current month is not in the database
if((latest_month[0] < current_month[0]) or (latest_month[1] < latest_month[1])):
latest = f'{current_month[0]}/{current_month[1]}'
recents[latest] = []
# some checkings before mutations
if (reqObj["userName"] not in contributors):
return bad_response(msg='User Name not found!!!')
# some mutations in the recents and contributions
user_cont_list = contributors[reqObj["userName"]]["contributions"]
user_cont_list.append(reqObj["contObject"])
cont_string = f"{reqObj['userName']}/{len(user_cont_list) - 1}"
recents[latest].append(cont_string)
save_data_to_file('./recents.json', recents)
save_data_to_file('./contributors.json', contributors)
return good_response({'recentString': cont_string}, msg="New Contribution has been added")
@app.route('/check/username')
@cross_origin()
def check_username():
username = request.args.get('userName')
contributors = get_data_from_file('./contributors.json')
answer = username in contributors
return good_response(payload={"isRegistered": answer})
@app.route("/donors/update", methods=('POST', ))
@cross_origin()
def update_donors():
"""
API REFERENCE:
---reqObject:
{
donorsDiff: dict (object)
}
"""
reqObj = request.get_json()
donors_diff = reqObj.get("donorDiff")
return good_response(get_updated_donors(donors_diff))
@app.route('/new/contributor', methods=('POST', ))
@cross_origin()
def add_contributor():
"""
API Reference:
Request Object:
{
*name: String,
*id: String
}
"""
req_obj = request.get_json()
user_name = req_obj["name"]
user_id = req_obj["id"]
contributiors = get_data_from_file('./contributors.json')
contributiors[user_id] = {
"name": user_name,
"id": user_id,
"contributions": []
}
save_data_to_file('./contributors.json', contributiors)
return good_response(msg='New User Has been added')
@app.route('/donors')
@cross_origin()
def get_donors():
donors = get_data_from_file('donors.json')
return good_response(donors)
@app.route('/check/donor')
@cross_origin()
def check_donor():
donors = get_data_from_file('donors.json')
reqName = request.args.get('name', '')
return good_response(payload={"available": reqName not in donors})
if __name__ == '__main__':
app.run(debug=True)
| true
|
1b4a72e079526a3eb125bfef540cb562f69840e7
|
Python
|
aadeshdhakal/python_assignment_jan5
|
/Prime_read.py
|
UTF-8
| 56
| 2.609375
| 3
|
[] |
no_license
|
fp = open("prime.txt", "r")
print(fp.read())
fp.close()
| true
|
6894bfb1d761a2048ccce33f59783ff04fffdf8a
|
Python
|
Justyouso/python_syntax
|
/my_decorator/class_decorator_no_param.py
|
UTF-8
| 799
| 3.78125
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Author: wangchao
# @Time: 19-10-23 上午11:23
"""
不带参数的类装饰器
"""
class logger(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
print("[INFO]: {func} 正在运行...".format(func=self.func.__name__))
return self.func(*args, **kwargs)
@logger
def say(something):
print("say {}!".format(something))
def say_without_decorator(something):
print("say {}!".format(something))
if __name__ == '__main__':
print("======= 装饰器调用 =======")
say("你好!")
print("======= 未用装饰器调用 =======")
# 1.实例化
logger_say_without_decorator = logger(say_without_decorator)
# 2.调用__call__方法
logger_say_without_decorator("Hello")
| true
|
280c6defd96a32a8369d08e99f55710619b8a89a
|
Python
|
luigisaetta/model-catalogv2
|
/model-files/score.py
|
UTF-8
| 2,865
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
import io
import pandas as pd
import numpy as np
import json
import os
import pickle
import logging
model_name = 'model.pkl'
scaler_name = 'scaler.pkl'
"""
Inference script. This script is used for prediction by scoring server when schema is known.
"""
model = None
scaler = None
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger_pred = logging.getLogger('model-prediction')
logger_pred.setLevel(logging.INFO)
logger_feat = logging.getLogger('features')
logger_feat.setLevel(logging.INFO)
def load_model():
"""
Loads model from the serialized format
Returns
-------
model: a model instance on which predict API can be invoked
"""
global model, scaler
model_dir = os.path.dirname(os.path.realpath(__file__))
contents = os.listdir(model_dir)
if model_name in contents:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), model_name), "rb") as file:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), scaler_name), "rb") as sfile:
model = pickle.load(file)
scaler = pickle.load(sfile)
assert model != None
assert scaler != None
logger_pred.info("Loaded model and scaler...")
else:
raise Exception('{0} is not found in model directory {1}'.format(model_name, model_dir))
return model
# added for data scaling
def preprocess_data(x):
global scaler
logger_pred.info("Scaling features...")
x = scaler.transform(x)
return x
def predict(data, model=load_model()):
"""
Returns prediction given the model and data to predict
Parameters
----------
model: Model instance returned by load_model API
data: Data format as expected by the predict API of the core estimator. For eg. in case of sckit models it could be numpy array/List of list/Panda DataFrame
Returns
-------
predictions: Output from scoring server
Format: {'prediction':output from model.predict method}
"""
logger_pred.info("In predict...")
# some check
assert model is not None, "Model is not loaded"
x = pd.read_json(io.StringIO(data)).values
logger_feat.info("Logging features before scaling")
logger_feat.info(x)
logger_feat.info("...")
# apply scaling
x = preprocess_data(x)
logger_feat.info("Logging features after scaling")
logger_feat.info(x)
logger_feat.info("...")
logger_pred.info("Invoking model......")
preds = model.predict_proba(x)
preds = np.round(preds[:, 1], 4)
preds = preds.tolist()
logger_pred.info("Logging predictions")
logger_pred.info(preds)
return { 'prediction': preds }
| true
|
2b63a0d0531e9d1b17a34ed2c3e6502a663e7b03
|
Python
|
aenglander/python-concurrent-programming-examples
|
/time-api-standard-example.py
|
UTF-8
| 647
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
import http.client
import json
import time
def get_time(name):
client = http.client.HTTPConnection("worldtimeapi.org")
client.request("GET", "/api/ip")
http_response = client.getresponse()
if http_response.status != 200:
print(f"Error Response: {http_response.status} {http_response.reason}")
return
body = http_response.read()
response = json.loads(body)
return f"{name}: {response['datetime']}"
def main():
print(get_time("Process 1"))
print(get_time("Process 2"))
print(get_time("Process 3"))
start = time.time()
main()
finish = time.time()
print(f"Total Time: {finish - start}s")
| true
|
47ab710b85d89b3febf3ec6177513161c90ab5ef
|
Python
|
BerkanR/Programacion
|
/Codewars/Detect Pangram.py
|
UTF-8
| 627
| 4.3125
| 4
|
[
"Apache-2.0"
] |
permissive
|
# A pangram is a sentence that contains every single letter of the alphabet at least once. For example, the sentence
# "The quick brown fox jumps over the lazy dog" is a pangram, because it uses the letters A-Z at least once (case is
# irrelevant).
# Given a string, detect whether or not it is a pangram. Return True if it is, False if not. Ignore numbers and
# punctuation.
def is_pangram(s):
return not set('abcdefghijklmnopqrstuvwxyz') - set(s.lower())
assert (is_pangram("The quick, brown fox jumps over the lazy dog!")) == True, "El texto es pangram"
assert (is_pangram("Hola")) == False, "El texto no es pangram"
| true
|
c9c75f1792cf38be19f93a651fbbbf3f9d1a7cab
|
Python
|
pieterbartsmit/realtimeassimilation
|
/coops.py
|
UTF-8
| 2,836
| 2.640625
| 3
|
[] |
no_license
|
#
# Python package to retrieve data from coops
# API documented at https://tidesandcurrents.noaa.gov/api/
#
import code
import numpy as np
def getTideData( epochtime, station='',workingdirectory='./'):
#
import time
begin_date = time.strftime("%Y%m%d" , time.gmtime(epochtime) )
end_date = begin_date
data = getData( station, kind=0,begin_date='',end_date='',
workingdirectory=workingdirectory)
it = (np.abs(data['t']-epochtime)).argmin()
return( {'z':data['z'][it] })
#
def getData( station, kind=0,begin_date='',end_date='',datum='MSL',
time_zone='GMT',units='metric',workingdirectory='./'):
#
# Grab wind (kind=1) or tide (kind=0) data from the coops servers for a given stationid.
#
import urllib.request
import time
import calendar
import re
import pandas as pd
if kind == 0:
#
product = 'water_level'
#
elif kind == 1:
#
product = 'wind'
#
#endif
#
if station=='':
#
station='9412110'
#
#fi
if begin_date=='':
#
begin_date = time.strftime("%Y%m%d" , time.gmtime() )
#
#fi
if end_date=='':
#
end_date = time.strftime("%Y%m%d" , time.gmtime() )
#
#fi
#
# Generate the url
#
url = 'https://tidesandcurrents.noaa.gov/api/datagetter?'
tideInfo = 'product=' + product + '&application=NOS.COOPS.TAC.WL' \
+ '&begin_date=' + begin_date + '&end_date='+end_date \
+ '&datum=' + datum + '&station=' +station \
+ '&time_zone=' + time_zone + '&units=' +units \
+ '&format=csv'
url = url + tideInfo
#
# Get Data
#
#
# Note on OSX python 3.6 this can throw an error due to missing certificates:
# run Applications/Python 3.6/install Cert... to fix this
#
#
# Dump to local text file before processing
#
[filename , header ] = urllib.request.urlretrieve(url, workingdirectory+'/temp.csv')
#
# Process with pandas
#
data = pd.read_csv( filename,sep='\s*,\s*',engine='python' )
#
t = data['Date Time']
t = [ calendar.timegm( time.strptime( x , "%Y-%m-%d %H:%M")) for x in t ]
t = np.array(t)
#
if kind == 0:
# We only need time and water level...
z = data['Water Level'].values
return( {'t':t,'z':z} )
#
elif kind == 1:
#
#code.interact( local=locals() )
U = data['Speed'].values
Udir = data['Direction'].values
return( {'t':t,'U':U,'Udir':Udir} )
#code.interact( local=locals() )
#
| true
|
3532e0e251aa2c6513b1cdd8274284178b34bd65
|
Python
|
Sharayu1071/Daily-Coding-DS-ALGO-Practice
|
/Hackerank/Python/Weighted Mean.py
|
UTF-8
| 249
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
n = int(input())
arr = list(map(int, input().split()))
weights = list(map(int, input().split()))
print(round(sum([arr[x]*weights[x] for x in range(len(arr))]) / sum(weights), 1))
| true
|
122f144dc8d6b32c16ee23f8d961ad38547d1208
|
Python
|
GXIU/data-driven-pdes
|
/datadrivenpdes/core/equations.py
|
UTF-8
| 12,301
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Equation classes describe differential equations.
Equation class encapsulate the relation between the spatial state derivatives
and time derivatives for different PDE. State derivatives can be used
combined differently to yield various update schemes. (e.g. finite differences
vs finite volumes).
"""
import collections
from typing import (
Any, Dict, Iterator, Mapping, Set, Tuple, Type, TypeVar, Union,
)
import numpy as np
from datadrivenpdes.core import grids
from datadrivenpdes.core import polynomials
from datadrivenpdes.core import states
from datadrivenpdes.core import tensor_ops
import tensorflow as tf
Shape = Union[int, Tuple[int]]
T = TypeVar('T')
class Equation:
""""Base class for PDEs.
Defines method time_derivative that constructs time derivative of the
current state using state derivatives provided by the model. The aim is
to be able to use multiple models and the same integrator for uniform
performance comparison and experimentation.
Attributes:
DISCRETIZATION_NAME: Name of the discretization method.
METHOD: Discretization method type (finite difference or finite volume).
MONOTONIC: Are dynamics guaranteed to be monotonic?
key_definitions: a dict mapping strings to StateDefinitions, providing a map
from keyword arguments required by time_derivative and take_time_step to
StateDefintiion instances defining what these keys represent.
evolving_keys: the set of variable names found in key_definitions that fully
describe the time-dependent state of the equation.
constant_keys: the set of variable names found in key_definitions that fully
describe the time-independent state of the equation.
"""
CONTINUOUS_EQUATION_NAME = ... # type: str
DISCRETIZATION_NAME = ... # type: str
METHOD = ... # type: polynomials.Method
MONOTONIC = ... # type: bool
key_definitions = ... # type: Dict[str, states.StateDefinition]
evolving_keys = ... # type: Set[str]
constant_keys = ... # type: Set[str]
def __init__(self):
self._validate_keys()
def get_parameters(self) -> Dict[str, Any]:
"""Return a dictionary of all parameters used to initialize this object."""
return {}
def _validate_keys(self):
"""Validate the key_definitions, evolving_keys and constant_keys attributes.
"""
repeated_keys = self.evolving_keys & self.constant_keys
if repeated_keys:
raise ValueError('overlapping entries between evolving_keys and '
'constant_keys: {}'.format(repeated_keys))
missing_keys = self.derived_keys - self.all_keys
if missing_keys:
raise ValueError('not all entries in evolving_keys and constant_keys '
'found in key_definitions: {}'.format(missing_keys))
for key in self.base_keys:
key_def = self.key_definitions[key]
if key_def.derivative_orders != (0, 0, 0):
raise ValueError('keys present in evolving keys and constant keys '
'cannot have derivatives, but {} is defined as {}'
.format(key, key_def))
base_name_and_indices = []
for key in self.base_keys:
key_def = self.key_definitions[key]
base_name_and_indices.append((key_def.name, key_def.tensor_indices))
base_name_and_indices_set = set(base_name_and_indices)
if len(base_name_and_indices_set) < len(base_name_and_indices):
raise ValueError('(name, tensor_indices) pairs on each key found in '
'evolving_keys and constant keys must be unique, but '
'some are repeated: {}'
.format(base_name_and_indices))
for key in self.derived_keys:
key_def = self.key_definitions[key]
name_and_indices = (key_def.name, key_def.tensor_indices)
if name_and_indices not in base_name_and_indices_set:
raise ValueError('all keys defined in key_definitions must have the '
'same (name, tensor_indices) pari as an entry found '
'in evolving_keys or state_keys, but this entry does '
'not: {}'.format(key))
# TODO(shoyer): consider caching these properties, to avoid recomputing them.
@property
def all_keys(self) -> Set[str]:
"""The set of all defined keys."""
return set(self.key_definitions)
@property
def base_keys(self) -> Set[str]:
"""Keys corresponding to non-derived entries in the state.
Returns:
The union of evolving and constant keys.
"""
return self.evolving_keys | self.constant_keys
@property
def derived_keys(self) -> Set[str]:
"""Keys corresponding to derived entries in the state.
These can be estimated from other states using either learned or fixed
finite difference schemes.
Returns:
The set of defined keys not found in evolving_keys or constant_keys.
"""
return set(self.key_definitions) - self.base_keys
def find_base_key(self, key: str) -> str:
"""Find the matching "base" key from which to estimate this key."""
definition = self.key_definitions[key]
for candidate in self.base_keys:
candidate_def = self.key_definitions[candidate]
if (candidate_def.name == definition.name and
candidate_def.tensor_indices == definition.tensor_indices):
return candidate
raise AssertionError # should be impossible per _validate_keys()
def time_derivative(
self, grid: grids.Grid, **inputs: tf.Tensor
) -> Dict[str, tf.Tensor]:
"""Returns time derivative of the given state.
Computes time derivatives of the state described by PDE using
provided spatial derivatives.
Args:
grid: description of discretization parameters.
**inputs: tensors corresponding to each key in key_definitions.
Returns:
Time derivative for each non-constant term in the state.
"""
raise NotImplementedError
def take_time_step(
self, grid: grids.Grid, **inputs: tf.Tensor
) -> Dict[str, tf.Tensor]:
"""Take single time-step.
The time step will be of size self.get_time_step().
The default implementation is an (explicit) forward Euler method.
Args:
grid: description of discretization parameters.
**inputs: tensors corresponding to each key in key_definitions.
Returns:
Updated values for each non-constant term in the state.
"""
time_derivs = self.time_derivative(grid, **inputs)
dt = self.get_time_step(grid)
new_state = {k: inputs[k] + dt * time_derivs[k]
for k in self.evolving_keys}
return new_state
def random_state(
self,
grid: grids.Grid,
params: Dict[str, Dict[str, Any]] = None,
size: Shape = (),
seed: int = None,
dtype: Any = np.float32,
) -> Dict[str, np.ndarray]:
"""Returns a state with fully parametrized initial conditions.
Generates initial conditions of `init_type`. All parameters of
initialization will be overridden with values from `kwargs`.
(e.g. position of the gaussian for InitialConditionMethods.GAUSSIAN is
given by x_position and y_position arguments.) The intended use of this
method is for testing evaluation on particular initial values. To generate
random ensemble of initial conditions use initial_random_state.
Args:
grid: Grid object holding discretization parameters.
params: initialization parameters.
size: size of the batch dimension.
seed: random seed.
dtype: dtype of the resulting numpy arrays.
Returns:
State with initial values.
"""
raise NotImplementedError
def get_time_step(self, grid: grids.Grid) -> float:
"""Returns appropriate time step for time marching the equation on grid.
Equation should implement custom logic for choosing the appropriate dt.
Args:
grid: Grid object holding discretization parameters.
Returns:
The value of an appropriate time step.
"""
raise NotImplementedError
def regrid(
self,
state: Dict[str, tf.Tensor],
source: grids.Grid,
destination: grids.Grid,
) -> Dict[str, tf.Tensor]:
"""Regrid this state to a coarser resolution.
Equations should override this method if the default regridding logic
(designed for finite volume methods) is not appropriate.
Args:
state: state(s) representing the initial configuration of the system
source: fine resolution Grid.
destination: coarse resolution Grid.
Returns:
Tensor(s) representing the input state at lower resolution.
"""
return tensor_ops.regrid(state, self.key_definitions, source, destination) # pytype: disable=bad-return-type
def to_config(self) -> Dict[str, Any]:
"""Creates a configuration dict representing this equation."""
return dict(
continuous_equation=self.CONTINUOUS_EQUATION_NAME,
discretization=self.DISCRETIZATION_NAME,
parameters=self.get_parameters(),
)
@classmethod
def from_config(cls: Type[T], config: Mapping[str, Any]) -> T:
"""Construct an equation from a configuration dict."""
continuous_equation = config['continuous_equation']
if continuous_equation != cls.CONTINUOUS_EQUATION_NAME:
raise ValueError(
'wrong continuous equation {} != {}'
.format(continuous_equation, cls.CONTINUOUS_EQUATION_NAME))
discretization = config['discretization']
if discretization != cls.DISCRETIZATION_NAME:
raise ValueError(
'wrong discretization {} != {}'
.format(discretization, cls.DISCRETIZATION_NAME))
return cls(**config['parameters'])
def _breadth_first_subclasses(base: Type[T]) -> Iterator[Type[T]]:
"""Yields all subclasses of a given class in breadth-first order."""
# https://stackoverflow.com/questions/3862310
subclasses = collections.deque([base])
while subclasses:
subclass = subclasses.popleft()
yield subclass
subclasses.extend(subclass.__subclasses__())
def matching_equation_type(
continuous_equation: str,
discretization: str,
) -> Type[Equation]:
"""Find the matching equation type."""
matches = []
candidates = list(_breadth_first_subclasses(Equation))
for subclass in candidates:
if (subclass.CONTINUOUS_EQUATION_NAME == continuous_equation
and subclass.DISCRETIZATION_NAME == discretization):
matches.append(subclass)
if not matches:
equations_list = [c.__name__ for c in candidates]
raise ValueError(
'continuous equation {!r} and discretization {!r} not found '
'in equations list {}. Maybe you forgot to import the '
'module that defines the equation first?'
.format(continuous_equation, discretization, equations_list))
elif len(matches) > 1:
raise ValueError('too many matches found: {}'.format(matches))
return matches[0]
def equation_from_config(
config: Mapping[str, Any],
discretization: str = None,
) -> Equation:
"""Constructs an equation from the Equation protocol buffer.
Args:
config: equation specific configuration dictionary.
discretization: override the discretization scheme for the equation. Needed
for testing different implementation in training and evaluation.
Returns:
Equation object.
Raises:
ValueError: Provided protocol buffer was not recognized, check proto names.
"""
continuous_equation = config['continuous_equation']
if discretization is None:
discretization = config['discretization']
equation_type = matching_equation_type(continuous_equation, discretization)
return equation_type.from_config(config)
| true
|
b74ed96fba5269c1c5be9626e7cb96074710d110
|
Python
|
MaratAG/HackerRankPy
|
/HR_PY_Sets_4.py
|
UTF-8
| 438
| 3.84375
| 4
|
[] |
no_license
|
"""HackerRank Sets 4 Set.add()"""
def problem_solution(N):
# Task function
country = 0
countries = set()
while not country == N:
country += 1
countries.add(input())
return len(countries)
def main():
# Initialization
N = 0
min_n = 0
max_n = 1000
while not N > min_n and N < max_n:
N = int(input())
print(problem_solution(N))
if __name__ == '__main__':
main()
| true
|
7cbbd0683121854f33a5410a5726615417431758
|
Python
|
cliffporter/lunarknights.github.io
|
/watch.py
|
UTF-8
| 878
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
'''
Watch a file for updates and run a callback.
Author: Sachin
Date created: 10/02/2021
'''
from os import stat
from sys import argv
from time import sleep
from compile import note
def watch(file, callback, wait=.5):
'''
Run a callback every time a file is modified.
:param file: path to file
:param callback: function to run on a filename
:param wait: time in seconds to wait between watches. default=0.5s
'''
cache = 0
while True:
mtime = stat(file).st_mtime
if cache < mtime:
cache = mtime
print(f'Updating {file}...')
callback(file)
sleep(wait)
if __name__ == '__main__':
if len(argv) == 1: print('Pass a file as an argument')
elif not argv[1].endswith('.md'): print('Must be a .md file')
else: watch(file=argv[1], callback=note)
| true
|
a5b1f962f5c15470f746e6300637dd60821b5923
|
Python
|
ArcKnight01/bwsix
|
/navigation_block/6_simulating_sensors/1_testing_the_laser/BWSI_BuoyField.py
|
UTF-8
| 9,584
| 2.90625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 4 12:39:28 2021
@author: JO20993
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import utm
## Utility functions
def corridor_check(A, G, R):
GR = np.array((R[0]-G[0], R[1]-G[1]))
GA = np.array((A[0]-G[0], A[1]-G[1]))
RA = np.array((A[0]-R[0], A[1]-R[1]))
GRGA = np.dot(GR, GA)
GRRA = np.dot(GR, RA)
return (GRGA*GRRA < 0)
def gate_check(B, A, G, R):
if not (corridor_check(A, G, R) and corridor_check(B, G, R) ):
return False
GR = np.array((R[0]-G[0], R[1]-G[1], 0))
GA = np.array((A[0]-G[0], A[1]-G[1], 0))
GB = np.array((B[0]-G[0], B[1]-G[1], 0))
GRGA = np.cross(GR, GA)
GRGB = np.cross(GR, GB)
return (GRGA[2]*GRGB[2] < 0)
class Buoy(object):
def __init__(self,
datum,
position=[],
latlon=[]):
assert (latlon or position) and not (latlon and position), "Buoy.__init__: Must define either latlon or position!"
self.__datum = datum
# returns easting, northing, section #, section letter
self.__datum_position = utm.from_latlon(self.__datum[0], self.__datum[1])
if not latlon:
self.__position = position
# calculate its latlon
self.__latlon = utm.to_latlon(self.__position[0] + self.__datum_position[0],
self.__position[1] + self.__datum_position[1],
self.__datum_position[2],
self.__datum_position[3])
else:
self.__latlon = latlon
position = utm.from_latlon(self.__latlon[0],
self.__latlon[1],
force_zone_number=self.__datum_position[2],
force_zone_letter=self.__datum_position[3])
self.__position = (position[0]-self.__datum_position[0],
position[1]-self.__datum_position[1])
def update_position(self, newpos):
postn = (newpos[0], newpos[1], self.__position[2], self.__position[3])
self.__position = postn
self.__latlon = utm.to_latlon(self.__position[0] + self.__datum_position[0],
self.__position[1] + self.__datum_position[1],
self.__datum_position[2],
self.__datum_position[3])
def update_latlon(self, newlatlon):
self.__latlon = newlatlon
self.__position = utm.from_latlon(self.__latlon[0],
self.__latlon[1],
force_zone_numer=self.__datum_position[2],
force_zone_letter=self.__datum_position[3])
# accessor functions
def get_position(self):
return self.__position
def get_latlon(self):
return self.__latlon
## Buoy field class
class BuoyField(object):
def __init__(self,
datum,
green_buoys = [],
red_buoys = [],
position_style='P'):
# position_style = 'P' for position, 'L' for latlon
self.__datum = datum
self.__datum_position = utm.from_latlon(self.__datum[0], self.__datum[1])
self.add_buoy_gates(green_buoys, red_buoys, position_style)
def add_buoy_gates(self, green, red, position_style='P'):
assert len(green) == len(red), "Should be equal number of green and red buoys"
assert position_style=='P' or position_style=='L', f"Unknown position style {position_style}"
self.__green_buoys = list()
self.__red_buoys = list()
for i in range(len(green)):
if position_style == 'P':
self.__green_buoys.append(Buoy(self.__datum, position=green[i]))
self.__red_buoys.append(Buoy(self.__datum, position=red[i]))
else:
self.__green_buoys.append(Buoy(self.__datum, latlon=green[i]))
self.__red_buoys.append(Buoy(self.__datum, latlon=red[i]))
self.gates_passed = np.zeros( (len(red),), dtype=bool)
def get_buoy_positions(self):
G = list()
R = list()
for green in self.__green_buoys:
G.append(green.get_position())
for red in self.__red_buoys:
R.append(red.get_position())
return (G,R)
def get_buoy_latlon(self):
G = list()
R = list()
for green in self.__green_buoys:
G.append(green.get_latlon())
for red in self.__red_buoys:
R.append(red.get_latlon())
return (G,R)
def check_buoy_gates(self, prev_pos, new_pos):
for i in range(self.gates_passed.size):
if (self.gates_passed[i] == False):
self.gates_passed[i] = gate_check(new_pos,
prev_pos,
self.__green_buoys[i].get_position(),
self.__red_buoys[i].get_position())
def clearedBuoys(self):
return np.count_nonzero(self.gates_passed)
def isClear(self):
return (np.count_nonzero(self.gates_passed) == self.gates_passed.size)
# return all the buoys in the field that are within max_range of the platform,
# and between angle_left and angle_right (in absolute bearing)
def detectable_buoys(self,
position,
max_range,
angle_left,
angle_right,
sensor_type='POSITION'):
# note: angle_left and angle_right are mod 360
G = list()
for green in self.green_buoys:
rng = np.sqrt( (position[0]-green[0])**2 + (position[1]-green[1])**2 )
if rng < max_range:
# now check if it's within the angle range atan2 returns -pi:pi
angl = np.mod(np.degrees(np.arctan2(green[0]-position[0], green[1]-position[1]) ), 360)
if (angle_left < angle_right):
if ( angl >= angle_left and angl <= angle_right ):
if sensor_type == 'POSITION':
G.append(green)
elif sensor_type == 'RANGE_ANGLE':
G.append((rng, angl))
elif sensor_type == 'ANGLE':
G.append(angl)
elif sensor_type == 'RANGE':
G.append(rng)
else:
sys.exit()
else:
if ( angl >= angle_left or angl <= angle_right):
if sensor_type == 'POSITION':
G.append(green)
elif sensor_type == 'RANGE_ANGLE':
G.append((rng, angl))
elif sensor_type == 'ANGLE':
G.append(angl)
elif sensor_type == 'RANGE':
G.append(rng)
else:
sys.exit()
R = list()
for red in self.red_buoys:
rng = np.sqrt( (position[0]-red[0])**2 + (position[1]-red[1])**2 )
if rng < max_range:
# now check if it's within the angle range atan2 returns -pi:pi
angl = np.mod(np.degrees(np.arctan2(red[0]-position[0], red[1]-position[1]) ), 360)
if (angle_left < angle_right):
if (angl >= angle_left and angl <= angle_right):
if sensor_type == 'POSITION':
R.append(red)
elif sensor_type == 'RANGE_ANGLE':
R.append((rng, angl))
elif sensor_type == 'ANGLE':
R.append(angl)
elif sensor_type == 'RANGE':
R.append(rng)
else:
sys.exit()
else:
if (angl >= angle_left or angl <= angle_right):
if sensor_type == 'POSITION':
R.append(red)
elif sensor_type == 'RANGE_ANGLE':
R.append((rng, angl))
elif sensor_type == 'ANGLE':
R.append(angl)
elif sensor_type == 'RANGE':
R.append(rng)
else:
sys.exit()
return G, R
def show_field(self):
for i in range(len(self.__green_buoys)):
Gpos = self.__green_buoys[i].get_position()
plt.plot( Gpos[0], Gpos[1], 'go')
Rpos = self.__red_buoys[i].get_position()
plt.plot( Rpos[0], Rpos[1], 'ro')
plt.show()
## return if all the gates are cleared
def all_gates_cleared(self):
if all(self.gates_passed == True):
return True
else:
return False
| true
|
a5ec3bd43cede3be63b3861927525ecfe0537fb1
|
Python
|
ikalista/Stock-Data-Analysis
|
/tushare_test.py
|
UTF-8
| 4,961
| 2.53125
| 3
|
[] |
no_license
|
#coding=utf-8
import tushare as ts
import talib as ta
import numpy as np
import pandas as pd
import os,time,sys,re,datetime
import csv
import scipy
# 两个函数
# 1、get_stock_basics(),这里得到是对应的dataframe数据结构,code,代码 name,名称 industry,所属行业 area,地区 pe,市盈率
# outstanding,流通股本 totals,总股本(万) totalAssets,总资产(万)liquidAssets,流动资产 fixedAssets,固定资产 reserved,公积金
# reservedPerShare,每股公积金 eps,每股收益 bvps,每股净资 pb,市净率 timeToMarket,上市日期
# 2、get_hist_data(),获取每只股票的历史价格和成交量 对应的列有index列,0 - 6列是 date:日期 open:开盘价 high:最高价 close:收盘价
# low:最低价 volume:成交量 price_change:价格变动 p_change:涨跌幅
# 7-12列是 ma5:5日均价 ma10:10日均价 ma20:20日均价 v_ma5:5日均量v_ma10:10日均量 v_ma20:20日均量
# 1 、首先是获取沪深两市的股票列表
def Get_Stock_List():
df = ts.get_stock_basics()
return df
# 然后定义通过MACD判断买入卖出
def Get_MACD(df_Code):
operate_array = []
stcok_code = df_Code.index
for code in df_Code.index:
df = ts.get_hist_data(code, start='2016-01-03')
if df is None: # 如code代表的是一个未上市的新股
stcok_code = stcok_code.drop(code)
continue
dflen = df.shape[0]
#print("11111111111111",df.head(10))
operate = 0
if dflen > 35:
macd, macdsignal, macdhist = ta.MACD(np.array(df['close']), fastperiod=12, slowperiod=26, signalperiod=9)
'''
print("111",macd)
print("222",macdsignal)
print("333",macdhist)'''
SignalMA5 = ta.MA(macdsignal, timeperiod=5, matype=0)
SignalMA10 = ta.MA(macdsignal, timeperiod=10, matype=0)
SignalMA20 = ta.MA(macdsignal, timeperiod=20, matype=0)
# 在后面增加3列,分别是13-15列,对应的是 DIFF DEA DIFF-DEA
df['macd'] = pd.Series(macd, index=df.index) # DIFF
df['macdsignal'] = pd.Series(macdsignal, index=df.index) # DEA
df['macdhist'] = pd.Series(macdhist, index=df.index) # DIFF-DEA
MAlen = len(SignalMA5)
print(df.head(100))
# 2个数组 1.DIFF、DEA均为正,DIFF向上突破DEA,买入信号。 2.DIFF、DEA均为负,DIFF向下跌破DEA,卖出信号。
if df.iat[(dflen - 1), 13] > 0:
if df.iat[(dflen - 1), 14] > 0:
if df.iat[(dflen - 1), 13] > df.iat[(dflen - 1), 14]:
operate = operate + 1 # 买入
else:
if df.iat[(dflen - 1), 14] < 0:
if df.iat[(dflen - 1), 13] < 0:
operate = operate - 1 # 卖出
# 3.DEA线与K线发生背离,行情反转信号。
if df.iat[(dflen - 1), 7] >= df.iat[(dflen - 1), 8] and df.iat[(dflen - 1), 8] >= df.iat[
(dflen - 1), 9]: # K线上涨
if SignalMA5[MAlen - 1] <= SignalMA10[MAlen - 1] and SignalMA10[MAlen - 1] <= SignalMA20[
MAlen - 1]: # DEA下降
operate = operate - 1
elif df.iat[(dflen - 1), 7] <= df.iat[(dflen - 1), 8] and df.iat[(dflen - 1), 8] <= df.iat[
(dflen - 1), 9]: # K线下降
if SignalMA5[MAlen - 1] >= SignalMA10[MAlen - 1] and SignalMA10[MAlen - 1] >= SignalMA20[
MAlen - 1]: # DEA上涨
operate = operate + 1
# 4.分析MACD柱状线,由负变正,买入信号。
if df.iat[(dflen - 1), 15] > 0 and dflen > 30:
for i in range(1, 26):
if df.iat[(dflen - 1 - i), 15] <= 0: #
operate = operate + 1
break
# 由正变负,卖出信号
if df.iat[(dflen - 1), 15] < 0 and dflen > 30:
for i in range(1, 26):
if df.iat[(dflen - 1 - i), 15] >= 0: #
operate = operate - 1
break
operate_array.append(operate)
df_Code['MACD'] = pd.Series(operate_array, index=stcok_code)
return df_Code
# 输出CSV文件
def Output_Csv(df, Dist, industry_name):
TODAY = datetime.date.today()
CURRENTDAY = TODAY.strftime('%Y-%m-%d')
df.to_csv(Dist + industry_name + CURRENTDAY + 'stock.csv', encoding='gbk') # 选择保存
df = Get_Stock_List()
df1=df[df['industry']=='酒店餐饮']
df2=df[df['industry']=='电信运营']
df11 = Get_MACD(df1)
df22 = Get_MACD(df2)
Dist = './data/'
name1 = '酒店餐饮'
name2 = '电信运营'
Output_Csv(df11,Dist,name1)
Output_Csv(df22,Dist,name2)
| true
|
04266d69d0c9c5fab3a50e8f57b11d4490b895a0
|
Python
|
Grrrigori/pythonProjectFiles
|
/try.py
|
UTF-8
| 1,975
| 3.0625
| 3
|
[] |
no_license
|
punctuation = "?,.!"
# with open('files/Descartes.txt', 'r', encoding='ANSI') as file:
# file_content = file.read()
# print(len(file_content))
# # print(set(file_content))
# print(len(set(file_content)))
# # file_ammount = [line.split(',')for line in file_content]
# # print (file_ammount)
# text = str(file_content)
# unique_text = text.strip("','?!.;")
# print(len(set(unique_text.split())))
# print(unique_text)
with open('files/Descartes.txt', 'r', encoding='ANSI') as file:
file_content = file.read()
print(len(file_content))
# print(set(file_content))
print(len(set(file_content)))
# file_ammount = [line.split(',')for line in file_content]
# print (file_ammount)
text = str(file_content)
unique_text = text.replace(',', '').replace('!', '').replace('.', '').replace('«','').replace('»','')
lowertext = unique_text.lower()
print(len(set(lowertext.split())))
print(lowertext)
#
# text_punct_removed = raw_text.replace(".", "")
# text_punct_removed = raw_text.replace("!", "")
# print("\ntext with punctuation characters removed:\n", text_punct_removed)
# print(file_content.split( ))
# print(len(set(file_content.split())))
unique_words = set(file_content.split())
# print(unique_words)
# for letter in descartes:
# if letter == '"' or letter in punctuation:
# descartes = descartes.replace(letter, '')
# text= set(descartes.split( ))
# print(len(text))
#
# with open('files/Descartes.txt', 'r', encoding='ANSI') as file:
# file_content = file.read()
# print(len(file_content))
# # print(set(file_content))
# # print(len(set(file_content)))
# file_ammount= [line.split(',')for line in file_content]
# print (file_ammount)
#
# ascend = "bob, mod, pot, rot, cod, nod"
# for letter in ascend:
# if letter == '"' or letter in punctuation:
# descartes = ascend.replace(letter, '')
# text= set(ascend.split( ))
# print(len(text))
#
# print (ascend.split( ))
| true
|
def74606056fac0e604682a48965d80844320517
|
Python
|
AnMora/Master-Python-exercise
|
/exercises/12-Last_two_digits/app.py
|
UTF-8
| 331
| 4.5625
| 5
|
[] |
no_license
|
#Complete the function to print the last two digits of an interger greater than 9.
def last_two_digits(num):
numero = int(num)
if numero > 9:
return int(str(numero)[-2:])
else:
print("Ingrese un numero mayor a 9")
#Invoke the function with any interger greater than 9.
print(last_two_digits("1234"))
| true
|
1fa502ddd94b47ce4018ac95b211c4a3f7d37001
|
Python
|
IgorBavand/CODIGOS-URI
|
/Python/falta gravar/1113.py
|
UTF-8
| 187
| 4
| 4
|
[] |
no_license
|
x=0
y=1
while x!=y:
x,y=input().split()
x=int(x)
y=int(y)
if x==y:
break
if x>y:
print("Decrescente")
if x<y:
print("Crescente")
| true
|
15728437ab4bbe702b1a35a40c337a97560e5489
|
Python
|
ahoetker/pinch-analysis
|
/tests/test_intake.py
|
UTF-8
| 1,738
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
import pytest
import pkg_resources
import numpy as np
import pandas as pd
from pathlib import Path
from pinch import ureg, Q_
from pinch.intake import parse_column_units, attach_units, df_with_units
resources = Path(pkg_resources.resource_filename("tests", "resources"))
def test_parse_column_units():
csv = Path(resources / "input_table.csv")
xlsx = Path(resources / "input_table.xlsx")
correct_units = {
"Supply Temperature": ureg.celsius,
"Target Temperature": ureg.celsius,
"Heat Capacity Flowrate": ureg.kW / ureg.K,
"Enthalpy": ureg.MJ / ureg.hour,
}
assert parse_column_units(csv) == correct_units
assert parse_column_units(xlsx) == correct_units
def test_attach_units():
column = pd.Series([1, 2, 3, 4, 5])
correct_series = Q_(np.array([1, 2, 3, 4, 5]), "watt")
with_units = attach_units(column, ureg["watt"])
assert np.array_equal(with_units, correct_series)
def test_df_with_units():
csv = Path(resources / "input_table.csv")
xlsx = Path(resources / "input_table.xlsx")
csv_mks = df_with_units(csv, "mks")
csv_english = df_with_units(csv, "English")
xlsx_si = df_with_units(xlsx, "SI")
xlsx_imperial = df_with_units(xlsx, "Imperial")
assert np.isclose(csv_mks["Supply Temperature"]["Compressor 1 out"], 159.2)
assert np.isclose(csv_mks["Enthalpy"]["Compressor 1 out"], 41605.3)
assert np.isclose(xlsx_si["Supply Temperature"]["Compressor 1 out"], 159.2)
assert np.isclose(xlsx_si["Heat Capacity Flowrate"]["Compressor 1 out"], 101.2)
assert np.isclose(csv_english["Supply Temperature"]["Compressor 1 out"], 318.56)
assert np.isclose(xlsx_imperial["Enthalpy"]["Compressor 1 out"], 39434215.63577166)
| true
|
35918583c74b412840ad2b687fecc74c1e139738
|
Python
|
vikrantpotnis123/DS
|
/samples/sk1.py
|
UTF-8
| 1,310
| 2.90625
| 3
|
[] |
no_license
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn import svm
from sklearn import datasets
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import train_test_split
#%matplotlib inline
# wth is this?
plt.style.use('ggplot')
iris = datasets.load_iris()
print(iris.target_names)
print(iris.keys())
# see the type is numpy array..yoo hoo!
print("iris data = ", str(type(iris.data)), type(iris.target))
# let's do some EDA
df = pd.DataFrame(iris.data, columns=iris.feature_names)
print(df.head())
#X, y = datasets.load_iris(return_X_y=True)
#print(type(X))
#print(type(y))
#exit(0)
# now let's do visual EDA
_ = pd.plotting.scatter_matrix(df, c=iris.target, figsize=(8,8), s=200, marker='G')
plt.show()
# let's do titanic using count plot
sns.set(style="darkgrid")
titanic = sns.load_dataset("titanic")
ax = sns.countplot(x="class", data=titanic)
# let's do tips (wth is tips?_
tips = sns.load_dataset("tips")
sns.catplot(x="day", y="total_bill", data=tips)
# note in pycharm you need to do this
plt.show()
| true
|
26268795dee033b7f174b9e58949c23da32e6ba4
|
Python
|
kreativitea/Rosalind
|
/solutions/revc.py
|
UTF-8
| 363
| 3.6875
| 4
|
[] |
no_license
|
from string import translate, maketrans
def revc(dnastring):
''' The Secondary and Tertiary Structures of DNA
Given: A DNA string s of length at most 1000 bp.
Return: The reverse complement sc of s.
>>> revc("AAAACCCGGT")
'ACCGGGTTTT'
'''
translation = maketrans("ATCG", "TAGC")
return translate(dnastring, translation)[::-1]
| true
|
c10357ad1d0b91d7a554757e40b3d7d9a4a158fa
|
Python
|
sgiardl/LeafClassification
|
/data/DataHandler.py
|
UTF-8
| 8,908
| 3.515625
| 4
|
[] |
no_license
|
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
class DataHandler:
"""
CLASS NAME:
DataHandler
DESCRIPTION:
This class is used to read the input database and manage
the data, which then can be sent to the classifiers.
"""
def __init__(self, path):
"""
PARAMETERS:
path : file path to the train.csv file
RETURNS:
None.
DESCRIPTION:
The data in the csv file located in the specified path
is read and saved in the attributes self.data and self.X.
self.data : the original data with all features and labels,
pandas dataframe
self.X : the X data with no labels, numpy array
"""
# Read the csv file specified in path
self.data = pd.read_csv(path)
# Add a new column to the data pandas dataframe using the
# genera text found before the first _ in the species
# column
self.data['genera'] = self.data.species.str.split('_').str[0]
# Store the X values as a numpy array, removing the
# 'species', 'id' and 'genera' columns
self.X = np.array(self.data.drop(['species', 'id', 'genera'], axis=1) )
def get_y(self, label_col):
"""
PARAMETERS:
label_col : column containing the labels in self.data,
pandas dataframe column, either
self.data.species to group classes by species
or self.data.genera to group classes by genera
RETURNS:
y : labels encoded into numerical values, numpy array
DESCRIPTION:
The labels in the label_col column are encoded into
numerical values and the function returns these
numerical values.
"""
# Transform the labels into numerical values
return LabelEncoder().fit(label_col).transform(label_col)
def split_data(self, X, y, test_size, norm):
"""
PARAMETERS:
X : features data, 2D numpy array
y : labels data, 1D numpy array
test_size : size of test set as floating point value (ex. 0.2 for 20%)
norm : normalizing method to use for X, choices :
'none' : no normalizing
'min-max': normalizing using the min-max method
'mean': normalizing using the mean-std deviation method
RETURNS:
None.
DESCRIPTION:
This method splits the X and y data into stratified shuffled
K-fold splits for training and testing sets. All splits are
saved in the following list member attributes of the class,
as lists of numpy arrays:
self.X_train_list
self.X_test_list
self.y_train_list
self.y_test_list
"""
# Calculate number of splits (inverse of test_size)
n_splits = int(1 / test_size)
self.n_splits = n_splits
# Declaring the StratifiedKFold object
stratified_split = StratifiedKFold(n_splits=n_splits,
shuffle=True,
random_state=0)
# Declaring empty lists to store each split data and labels array
self.X_train_list = []
self.X_test_list = []
self.y_train_list = []
self.y_test_list = []
# Main loop to generate stratified K-Fold splits of the
# training and testing data
for index_train, index_test in stratified_split.split(X, y):
# Get X values based on generated indices
X_train, X_test = X[index_train], X[index_test]
# Get y values based on generated indices
y_train, y_test = y[index_train], y[index_test]
# Calculate the length of the training and testing datasetss
train_size = len(X_train)
test_size = len(X_test)
# Apply the specified normalization method on the X data
if norm == 'min-max':
# Calculate the minimum column-wise values for the training data
min_norm_train = self.__make_array(X_train.min(axis=0), train_size)
# Calculate the maximum column-wise values for the training data
max_norm_train = self.__make_array(X_train.max(axis=0), train_size)
# Calculate the minimum column-wise values for the testing data
min_norm_test = self.__make_array(X_train.min(axis=0), test_size)
# Calculate the maximum column-wise values for the testing data
max_norm_test = self.__make_array(X_train.max(axis=0), test_size)
# Normalize the training data using the min-max method
X_train = self.__normalize_min_max(X_train, min_norm_train, max_norm_train)
# Normalize the testing data using the min-max method
X_test = self.__normalize_min_max(X_test, min_norm_test, max_norm_test)
elif norm == 'mean':
# Calculate the mean column-wise values for the training data
mean_norm_train = self.__make_array(X_train.mean(axis=0), train_size)
# Calculate the standard deviation column-wise values for the training data
std_norm_train = self.__make_array(X_train.std(axis=0), train_size)
# Calculate the mean column-wise values for the testing data
mean_norm_test = self.__make_array(X_train.mean(axis=0), test_size)
# Calculate the standard deviation column-wise values for the testing data
std_norm_test = self.__make_array(X_train.std(axis=0), test_size)
# Normalize the training data using the mean method
X_train = self.__normalize_mean(X_train, mean_norm_train, std_norm_train)
# Normalize the testing data using the mean method
X_test = self.__normalize_mean(X_test, mean_norm_test, std_norm_test)
# Add the training and testing data sets into the data set lists
self.X_train_list.append(X_train)
self.X_test_list.append(X_test)
self.y_train_list.append(y_train)
self.y_test_list.append(y_test)
def __make_array(self, X, size):
"""
PARAMETERS:
X : 1D numpy array of column-wise normalization parameters
size : number of times to duplicate X, integer
RETURNS:
array : 2D numpy array of wanted size
DESCRIPTION:
This private method takes a horizontal numpy array X
and duplicates it 'size' times vertically to convert a 1D
vector X to a 2D array of height 'size'
"""
# Convert 1D horizontal array to 2D array of height 'size'
return np.array([X] * size)
def __normalize_min_max(self, X, X_min, X_max):
"""
PARAMETERS:
X : values, 2D numpy array
X_min : column-wise minimum values, 2D numpy array
X_max : column-wise maximum values, 2D numpy array
RETURNS:
x_norm : normalized values, 2D numpy array
DESCRIPTION:
This private method normalizes the X values and
returns a new array with normalized values using
the 'min-max' normalization method
"""
# Apply max-min normalization array-wise
return (X - X_min) / (X_max - X_min)
def __normalize_mean(self, X, X_mean, X_std):
"""
PARAMETERS:
X : values, 2D numpy array
X_mean : column-wise mean values, 2D numpy array
X_std : column-wise standard deviation values,
2D numpy array
RETURNS:
x_norm : normalized values, 2D numpy array
DESCRIPTION:
This private method normalizes the X values and
returns a new array with normalized values using
the 'mean' normalization method
"""
# Apply mean normalization array-wise
return (X - X_mean) / X_std
| true
|
d79a9c059871cb9605be8403bec64ddb310ecc93
|
Python
|
pfuntner/toys
|
/bin/files-by-date-range
|
UTF-8
| 1,828
| 2.75
| 3
|
[] |
no_license
|
#! /usr/bin/env python3
import os
import re
import signal
import logging
import datetime
import argparse
def decode_date(s):
ret = None
if s is not None:
if re.match(r'^\d{4}-\d{2}-\d{2}$', s):
ret = datetime.datetime.strptime(s, '%Y-%m-%d')
elif re.match(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$', s):
ret = datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
else:
parser.error(f'{s!r} is not a valid date date/time')
return ret
parser = argparse.ArgumentParser(description='List files after and before specified dates')
parser.add_argument('-a', '--after', help='Select files on or after the specified date')
parser.add_argument('-b', '--before', help='Select files on or before the specified date')
parser.add_argument('files', nargs='+', help='One or more files to search')
parser.add_argument('-v', '--verbose', action='count', help='Enable debugging')
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s')
log = logging.getLogger()
log.setLevel(logging.WARNING - (args.verbose or 0)*10)
signal.signal(signal.SIGPIPE, lambda signum, stack_frame: exit(0))
after = decode_date(args.after)
before = decode_date(args.before)
if not after and not before:
parser.error('Specify --after and/or --before')
if after is not None and before is not None and after > before:
parser.error(f'{args.after!r} is after {args.before!r}')
for filename in args.files:
if os.path.exists(filename):
if os.path.isfile(filename):
curr = datetime.datetime.fromtimestamp(os.path.getmtime(filename))
if (after is None or after <= curr) and (before is None or curr <= before):
print(filename)
else:
log.warning(f'{filename!r} is not a regular file')
else:
log.warning(f'{filename!r} cannot be found')
| true
|
8d4d7bb6761cebbe9f9932bfa50906f86762f7e6
|
Python
|
d9yuan/stock_pattern
|
/src/stocks/utils.py
|
UTF-8
| 4,465
| 2.53125
| 3
|
[] |
no_license
|
import pandas as pd
import yfinance as yf
import numpy as np
import operator
import io
import math
import urllib, base64
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('agg')
import matplotlib.dates as mdates
from datetime import date, time, datetime, timedelta
from .models import Stock, DetailPage, Price
def stock_find_single(stk_name, lower_day, lowest_day, lower_control, lowest_control, allow_duplicates):
stk = Stock.objects.get(stock=stk_name)
historical_data = stk.get_prices().all()
historical_data = pd.DataFrame.from_dict(historical_data.values())
young = []
l = len(historical_data)
i = -1
while i < l - 1:
i += 1
if ((historical_data.iloc[i]["close_price"] - historical_data.iloc[i]["open_price"]) > 0):
m = "Rise"
else:
m = "Fall"
if (m == "Fall"):
open_price_at_fall = historical_data.iloc[i]["open_price"]
id_list = [ historical_data.iloc[i]["id"] ]
for k in range(i + 1, l):
id_list.append(historical_data.iloc[k]["id"])
open_price_today = historical_data.iloc[k]["open_price"]
open_price_at_fall = max(open_price_today, open_price_at_fall)
if ((historical_data.iloc[k]["close_price"] - historical_data.iloc[k]["open_price"]) > 0):
m1 = "Rise"
else:
m1 = "Fall"
if (k - i < lower_day):
if (m1 == "Rise"):
i = k + 1
break
if (m1 == "Rise"):
if (lowest_control == True):
if (k >= lowest_day):
b = []
for m in range(1, lowest_day + 1):
b.append(
historical_data.iloc[k - m]["low_price"])
bmin = min(b)
if (open_price_today >= bmin):
continue
if (lower_control == False):
a = [-1]
for s in range(k + 1, min(k + 8, l)):
a.append(historical_data.iloc[s]["high_price"])
a = max(a)
young.append(
[historical_data.iloc[k]["date"],
historical_data.iloc[k]["close_price"], a, id_list])
else:
if ((historical_data.iloc[k]["close_price"] - open_price_at_fall) > 0):
a = [-1]
for s in range(k + 1, min(k + 8, l)):
a.append(historical_data.iloc[s]["high_price"])
a = max(a)
young.append([historical_data.iloc[k]["date"],
historical_data.iloc[k]["close_price"], a, id_list])
if (not allow_duplicates):
i = k + 1
break
return young
def create_stock_bar_chart(dates, y_open, y_close, days):
plt.close()
y_open = list(y_open)
y_close = list(y_close)
y_open = list(map(float, y_open))
y_close = list(map(float, y_close))
height_list = list(map(operator.sub, y_close, y_open))
color_list = list(map(lambda x: 'red' if x >= 0 else 'green',
height_list))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y/%m/%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
print(dates)
print(y_open)
print(y_close)
plt.bar(x=range(len(dates)), height=height_list, bottom=y_open, color=color_list, width=0.5)
ymax = math.ceil(max(max(y_open), max(y_close)) * 1.0005)
ymin = math.floor(min(min(y_open), min(y_close)) / 1.0005)
print((ymin, ymax))
axes = plt.gca()
axes.set_ylim([ymin, ymax])
date_strings = []
fmt = '%Y/%m/%d'
for date in dates:
date_strings.append(date.strftime(fmt))
axes.set_xticks(range(len(dates)))
axes.set_xticklabels(date_strings, rotation=45, ha='right')
plt.gcf().autofmt_xdate()
fig = plt.gcf()
# convert into dtring buffer
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
string =base64.b64encode(buf.read())
uri = urllib.parse.quote(string)
return uri
| true
|
b39a0fb7dc3eeee57b7637e39f072a2ced024508
|
Python
|
nancyvuong/DocumentGenerator
|
/main.py
|
UTF-8
| 370
| 3.078125
| 3
|
[] |
no_license
|
from DocumentGenerator import *
if __name__ == '__main__':
fp = input("Enter filename: ")
dc = DocumentGenerator(fp)
while dc == None:
fp = input("Enter another filename: ")
dc = DocumentGenerator(fp)
file1 = open("generated.txt", "w")
wc = int(input("Enter word count: "))
file1.write(dc.generateDocument(wc))
file1.close()
| true
|
8c739768d8248e0f344b32656bdb01a935db51aa
|
Python
|
shivakrishnap/gitdemo
|
/Testdata/Hompagedata.py
|
UTF-8
| 808
| 2.84375
| 3
|
[] |
no_license
|
import openpyxl
class Hompagedata:
test_homepage_data = [{"firstname": "shiva", "email": "shivakrishna@gmail.com", "gender": "Male"},{"firstname": "sathya", "email": "sathyakrishna@gmail.com", "gender": "Female"}]
@staticmethod
def getstdata(test_case_name):
Dict = {}
book = openpyxl.load_workbook("C:\\Users\\shiva\\OneDrive\\Documents\\pyexceldemo.xlsx")
sheet = book.active
cell = sheet.cell(row=1, column=2)
print(cell.value)
for i in range(1, sheet.max_row + 1):
if sheet.cell(row=i, column=1).value == test_case_name:
for j in range(2, sheet.max_column + 1):
# print(sheet.cell(row=i,column=j).value)
Dict[sheet.cell(row=1, column=j).value] = sheet.cell(row=i, column=j).value
return [Dict]
| true
|
ade2fe17b92142a7e5ec6ba56b2baec0fba62560
|
Python
|
hbtech-ai/ARPS
|
/tmsvm/src/example.py
|
UTF-8
| 3,734
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python
#_*_ coding: utf-8 _*_
#author:张知临 zhzhl202@163.com
#Filename: example.py
import tms
'''采用程序默认参数对模型训练、预测、结果分析'''
#模型训练,输入的文件为binary_seged.train,需要训练的为文件中的第1个字段(第0个字段为lablel),保存在data文件夹中。特征选择保留top %10的词,使用liblinear
#tms.tms_train("../data/binary_seged.train")
#模型预测,
#tms.tms_predict("../data/binary_seged.test","../data/model/tms.config",result_save_path="../data/binary_seged.result")
#对结果进行分析
#tms.tms_analysis("../data/binary_seged.result")
'''配置多个模型进行预测'''
#tms.tms_predict_multi("../data/binary_seged.test", ["../data/libsvm_model/tms.config","../data/libsvm_model/tms.config"],indexes_lists=[[1],[1]],result_save_path="../data/binary_seged.result")
#tms.tms_analysis("../data/binary_seged.result",indexes=[0,1,2,3,4],true_label_index=4)
'''对文件进行分词'''
#tms.tms_segment("../data/binary.train", indexes=[1])
'''特征选择'''
#tms.tms_feature_select("../data/binary_seged.train", indexes=[1], global_fun="idf", dic_name="test.key", ratio=0.05, stopword_filename="")
'''将输入文件构造为libsvm和liblinear的输入格式'''
#tms.cons_train_sample_for_svm("../data/binary_seged.train", "../data/model/dic.key", "../data/tms.train", [1])
'''对SVM模型选择最优的参数'''
'''对没有经过分词的文件进行训练'''
#tms.tms_train("../data/binary.train",seg=1)
'''假设data文件夹下有一个post.train和post.test的训练样本和测试样本,每一行有3个字段:label title content。样本都没有分词
该例子需要完成:
1、对title进行分词、训练,模型保存在../data/post/ 下,所有的文件都有title命名,SVM模型选择使用libsvm,核函数使用rbf,选择选择保留top 40%的词,特征权重使用tf*idf
2、对title和content一起进行分词、训练,模型保存在../data/post/ 下,所有的文件都有title_content命名,SVM模型选择使用libsvm,选择选择保留top 20%的词,特征权重使用tf
3、先对post.test进行分词,然后使用已经训练好的模型对post.test进行预测。结果以post.result命名,将原label与结果一同输出。
4、计算模型的预测F值、Recall、Precision,并将结果输出在屏幕上。
5、计算从[0,1]区间内各个阈值下对应的F值、Recall、Precision,将结果保存在post.analysis
'''
tms.tms_train("../data/post.train",indexes=[1],main_save_path="../data/",stopword_filename="../data/stopwords.txt",svm_type="libsvm",svm_param="-t 2",config_name="title.config",dic_name="title.key",model_name="title.model",train_name="title.train",param_name="title.param",ratio=0.4,seg=1,local_fun="tf",global_fun="idf")
#tms.tms_train("../data/post.train",indexes=[1,2],main_save_path="../data/",stopword_filename="../data/stopwords.txt",svm_type="libsvm",config_name="title_content.config",dic_name="title_content.key",model_name="title_content.model",train_name="title_content.train",param_name="title_content.param",ratio=0.2,seg=1,local_fun="tf",global_fun="one")
#tms.tms_predict_multi("../data/post.test",config_files=["../data/model/title.config","../data/model/title_content.config"],indexes_lists=[[1],[1,2]],result_save_path="../data/post.result",result_indexes=[0],seg=1)
#tms.tms_analysis("../data/post.result",step=2,output_file="",indexes=[0,1,2],predicted_label_index=0,predicted_value_index=1,true_label_index=2)
#tms.tms_analysis("../data/post.result",step=4,output_file="../data/post.analysis",min=0,max=1,indexes=[0,1,2],predicted_label_index=0,predicted_value_index=1,true_label_index=2)
| true
|
4916af2abeae90699f99a8bc8541e8b194404562
|
Python
|
wenxinjie/leetcode
|
/bit manipulation/python/leetcode190_Reverse_Bits.py
|
UTF-8
| 731
| 3.6875
| 4
|
[
"Apache-2.0"
] |
permissive
|
# Reverse bits of a given 32 bits unsigned integer.
# Example:
# Input: 43261596
# Output: 964176192
# Explanation: 43261596 represented in binary as 00000010100101000001111010011100,
# return 964176192 represented in binary as 00111001011110000010100101000000.
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
n = bin(n)[2:]
n = (32 - len(n)) * "0" + n
n.rstrip("0")
return int(n[::-1], 2)
def reverseBits2(self, n):
res = 0
for i in range(32):
tem = n & 1
n >>= 1
res <<= 1
res = res | tem
return res
# Time: O(k)
# Space: O(k)
# Difficulty: easy
| true
|
7a56d14e8fb8743756ce3fdadd342f6c709e890e
|
Python
|
prasannamondhe/Python
|
/Practice/FileOperations.py
|
UTF-8
| 999
| 3.265625
| 3
|
[] |
no_license
|
import os
#Way to specify path in python
#print(os.path.join('home','prasanna','PythonRepo','Python','Game'))
#Open CSV file => remove comma => write data into another CSV file
with open('Practice/insurance.csv') as infile, open('Practice/insuranceWithoutComma.csv','w') as outfile:
for line in infile:
outfile.write(line.replace(","," "))
readTarget = open('Practice/insuranceWithoutComma.csv')
print(readTarget.read())
# #Open CSV file => remove comma => write data into PDF file
with open('Practice/insurance.csv') as infile, open('Practice/target.pdf','w') as outfile:
for line in infile:
outfile.write(line.replace(","," "))
readTarget = open('Practice/target.pdf')
print(readTarget.read())
#Open CSV and write details of itnto text file
with open('Practice/insurance.csv') as infile, open('Practice/hello.txt','w') as outfile:
for line in infile:
outfile.write(line.replace(","," "))
readTarget = open('Practice/hello.txt')
print(readTarget.read())
| true
|
b4f095b0dcb08dc7e52fe0d5d11b4c31a1dfdafc
|
Python
|
eicksl/PAC-Free-Searcher
|
/pacfree_searcher/campaignsite.py
|
UTF-8
| 1,445
| 2.96875
| 3
|
[] |
no_license
|
import requests
from requests.exceptions import ConnectionError
from bs4 import BeautifulSoup
from pacfree_searcher.constants import FACEBOOK, TWITTER
from pacfree_searcher.dbmanager import DBManager
class CampaignSiteParser():
"""Handles the details of parsing through a candidate's campaign website
to obtain additional info. Currently it will simply look for and add
Facebook and Twitter URLs and then pass all the data to `DBManager`."""
def __init__(self, data, db):
"""Initializes campaign website search process."""
self.data = data
self.get_social_media_urls()
DBManager(self.data, db)
def get_social_media_urls(self):
"""Grabs Facebook and Twitter URLs from the candidate's homepage."""
self.data['facebook'] = None
self.data['twitter'] = None
try:
resp = requests.get(self.data['campaign_url'], verify=False)
except ConnectionError:
return
html = BeautifulSoup(resp.text, 'lxml')
for anchor in html.find_all('a'):
try:
url = anchor['href'].lower()
except KeyError:
continue
last_name = self.data['name'].rsplit(' ', 1)[1].lower()
if last_name in url:
if FACEBOOK in url:
self.data['facebook'] = url
elif TWITTER in url:
self.data['twitter'] = url
| true
|
80e5afa56408b64d93c512222faa18d148c7256d
|
Python
|
lucashsouza/Desafios-Python
|
/CursoEmVideo/Aula09/ex022.py
|
UTF-8
| 316
| 4.40625
| 4
|
[
"MIT"
] |
permissive
|
nome = (input('Digite seu nome completo: ')).strip()
print('Analisando seu nome..')
print('Em maiusculas: {}'.format(nome.upper()))
print('Em minusculas: {}'.format(nome.lower()))
print('Que possui {} letras'.format(len(nome) - nome.count(' ')))
print('E seu primeiro nome tem {} letras'.format(nome.find(' ')))
| true
|
4f8dfa4d015950c9739aff61b3e10d1ac21fce83
|
Python
|
T0aD/pyawstats
|
/lock.py
|
UTF-8
| 2,071
| 3.046875
| 3
|
[] |
no_license
|
#! /usr/bin/python3.1
# This module merely creates a lock file in order to lock the execution of a python script
# (avoids running twice the same script at the same time..)
import os.path
import sys
"""
=========================== Usage:
import lock
with lock.Lock(__file__):
main program
=========================== Or:
from lock import Lock
with Lock():
main program
"""
class Lock():
path = '/var/tmp'
# Just generate the full sexy path to the lockfile
def __init__(self, name = False, path = False):
# Leet hack if no name was specified:
if name is False:
name = sys.argv[0]
# Seems overkill now that argv[0] seems to be OK (why did I want that anyway?)
# name = sys._getframe(1).f_code.co_filename
name = os.path.basename(name)
if not path is False:
self.path = path
if name.endswith('.py'):
name = name[:len(name)-3]
self.lockfile = os.path.realpath(os.path.join(self.path, name + '.lock'))
# Create the lockfile and writes the PID in it
def __enter__(self):
try:
fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except:
fd = open(self.lockfile)
pid = fd.read()
fd.close()
# Try to see if a process is actually running with this PID:
# (Linux)
if os.path.exists('/proc/%s/cmdline' % pid):
fd = open('/proc/%s/cmdline' % pid)
prog = fd.read()
fd.close()
running = 'Program still running: %s' % prog.replace('\0', ' ')
else:
running = 'No process running'
print("lockfile %s exists with PID %s (%s)" % (self.lockfile, pid, running))
exit(1)
os.write(fd, bytes(str(os.getpid()), 'ascii'))
os.close(fd)
# Only remove the lockfile when no exception was raised
def __exit__(self, etype, evalue, traceback):
if etype is None:
os.unlink(self.lockfile)
| true
|
461eb81b21fc3070862531b7a122ea7c913fa73b
|
Python
|
carleton-cs257-spring18/books-alejandro-isaac
|
/books/booksdatasourcetest.py
|
UTF-8
| 4,169
| 3.0625
| 3
|
[] |
no_license
|
import booksdatasource
import unittest
class BooksDataSourceTest(unittest.TestCase):
def setUp(self):
self.books_data_source = booksdatasource.BooksDataSource('books.csv', 'authors.csv', 'books_authors.csv')
pass
def tearDown(self):
pass
def test_return_correct_book(self):
self.assertEqual({'id':0, 'title':'All Clear', 'publication_year':2010},self.books_data_source.book(0))
pass
def test_invalid_book_id(self):
self.assertRaises(ValueError, self.books_data_source.book, -3)
pass
def test_illegal_type_for_book(self):
self.assertRaises(TypeError, self.books_data_source.book, 'hello')
pass
def test_books_all_parameters_none(self):
self.assertEqual(47,len(self.books_data_source.books()))
pass
def test_books_search_by_author(self):
book = self.books_data_source.books(author_id=10)
self.assertEqual({'id':10,'title':'Main Street','publication_year':1920}, book[0])
pass
def test_books_invalid_author_id(self):
self.assertRaises(ValueError, self.books_data_source.books, author_id = -3)
pass
def test_books_search_by_search_text(self):
books = self.books_data_source.books(search_text='clear')
for book in books:
self.assertIn('clear',book['title'].lower())
pass
def test_books_search_by_end_year(self):
books = self.books_data_source.books(end_year=1930)
for book in books:
self.assertTrue(book['publication_year'] <= 1930)
pass
def test_correct_author(self):
austen_correct = {'id':4, 'last_name':'Austen', 'first_name':'Jane', 'birth_year':1775, 'death_year':1817}
self.assertEqual(austen_correct, self.books_data_source.author(author_id=4))
def test_invalid_author_id(self):
self.assertRaises(ValueError, self.books_data_source.author, -3)
pass
def test_illegal_type_for_author(self):
self.assertRaises(TypeError, self.books_data_source.author, 'hello')
pass
def test_authors_all_parameters_none(self):
self.assertEqual(25, len(self.books_data_source.authors()))
pass
def test_authors_search_by_book_id(self):
haruki_murakami = self.books_data_source.authors(book_id=30)
self.assertEqual(16, haruki_murakami[0]['id'])
pass
def test_authors_search_by_seach_text(self):
bronte_sisters = self.books_data_source.authors(search_text='Brontë')
for sister in bronte_sisters:
self.assertEqual('Brontë', sister['last_name'])
pass
def test_authors_search_by_start_year(self):
authors_after_1875 = self.books_data_source.authors(start_year=1875)
for author in authors_after_1875:
try:
self.assertTrue(author['death_year'] >= 1875)
except TypeError:
self.assertTrue(author['death_year'] == None)
def test_authors_search_by_end_year(self):
authors_before_1875 = self.books_data_source.authors(end_year=1875)
for author in authors_before_1875:
self.assertTrue(author['birth_year'] <= 1875)
pass
def test_authors_sort_by_last_name(self):
authors_sorted_by_last_name = self.books_data_source.authors(sort_by="last_name")
previous_author = authors_sorted_by_last_name[0]
for author in authors_sorted_by_last_name:
self.assertTrue(author['last_name'] >= previous_author['last_name'])
previous_author = author
pass
def test_authors_sort_by_birth_year(self):
authors_sorted_by_birth_year = self.books_data_source.authors(sort_by="birth_year")
previous_author = authors_sorted_by_birth_year[0]
for author in authors_sorted_by_birth_year:
self.assertTrue(author['birth_year'] <= previous_author['birth_year'])
previous_author = author
pass
def test_authors_invalid_book_id(self):
self.assertRaises(ValueError, self.books_data_source.authors, book_id = -3)
pass
if __name__ == '__main__':
unittest.main()
| true
|
6a02ce4142a950d84c7830318e7e11f3f72b5c35
|
Python
|
atsss/nyu_deep_learning
|
/examples/contests/205/D.py
|
UTF-8
| 1,284
| 2.59375
| 3
|
[] |
no_license
|
# from collections import Counter
#
# N, Q = map(int, input().split())
# A = list(map(int, input().split()))
# K = [int(input()) for _ in range(Q)]
#
# st = set(K)
# col = Counter(A)
# col_index = 1
# k_index = 0
# length = 0
# ans = {}
#
# while True:
# if col[col_index] == 0:
# k_index += 1
#
# if k_index in st:
# length += 1
# ans[k_index] = col_index
#
# if length == Q:
# break
#
# col_index += 1
#
# for k in K: print(ans[k])
# N, Q = map(int, input().split())
# A = list(map(int, input().split()))
# K = [int(input()) for _ in range(Q)]
#
# arr = []
# A.sort()
# prev = 0
# for current in A:
# arr += list(range(prev+1, current))
# prev = current
#
# length = len(arr)
# ans = {}
# for k in K:
# if k > length:
# ans[k] = A[-1] + (k - length)
# else:
# ans[k] = arr[k-1]
#
# for k in K: print(ans[k])
import bisect
n, q = map(int, input().split())
arr = list(map(int, input().split()))
dp = []
dp.append(arr[0] - 1)
for i in range(1, n):
dp.append(dp[-1] + arr[i] - arr[i - 1] - 1)
dp.append(float("inf"))
for _ in range(q):
cq = int(input())
k = bisect.bisect_left(dp, cq)
if k == 0:
print(cq)
else:
print(arr[k - 1] + cq - dp[k - 1])
| true
|
e1f7546cb66a39f4cfdd8f58e1dbd9965cca15f7
|
Python
|
AlexSath/Fastq_Pipeline
|
/Archive/parseFastq.py
|
UTF-8
| 3,937
| 3.578125
| 4
|
[] |
no_license
|
import argparse
#Example use is
# python parseFastq.py --fastq /home/rbif/week6/hawkins_pooled_sequences.fastq
################################################
# You can use this code and put it in your own script
class ParseFastQ(object):
"""Returns a read-by-read fastQ parser analogous to file.readline()"""
def __init__(self,filePath,headerSymbols=['@','+']):
"""Returns a read-by-read fastQ parser analogous to file.readline().
Exmpl: parser.next()
-OR-
Its an iterator so you can do:
for rec in parser:
... do something with rec ...
rec is tuple: (seqHeader,seqStr,qualHeader,qualStr)
"""
if filePath.endswith('.gz'):
self._file = gzip.open(filePath)
else:
self._file = open(filePath, 'rU')
self._currentLineNumber = 0
self._hdSyms = headerSymbols
def __iter__(self):
return self
def next(self):
"""Reads in next element, parses, and does minimal verification.
Returns: tuple: (seqHeader,seqStr,qualHeader,qualStr)"""
# ++++ Get Next Four Lines ++++
elemList = []
for i in range(4):
line = self._file.readline()
self._currentLineNumber += 1 ## increment file position
if line:
elemList.append(line.strip('\n'))
else:
elemList.append(None)
# ++++ Check Lines For Expected Form ++++
trues = [bool(x) for x in elemList].count(True)
nones = elemList.count(None)
# -- Check for acceptable end of file --
if nones == 4:
raise StopIteration
# -- Make sure we got 4 full lines of data --
assert trues == 4,\
"** ERROR: It looks like I encountered a premature EOF or empty line.\n\
Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**" % (self._currentLineNumber)
# -- Make sure we are in the correct "register" --
assert elemList[0].startswith(self._hdSyms[0]),\
"** ERROR: The 1st line in fastq element does not start with '%s'.\n\
Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**" % (self._hdSyms[0],self._currentLineNumber)
assert elemList[2].startswith(self._hdSyms[1]),\
"** ERROR: The 3rd line in fastq element does not start with '%s'.\n\
Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**" % (self._hdSyms[1],self._currentLineNumber)
# -- Make sure the seq line and qual line have equal lengths --
assert len(elemList[1]) == len(elemList[3]), "** ERROR: The length of Sequence data and Quality data of the last record aren't equal.\n\
Please check FastQ file near line number %s (plus or minus ~4 lines) and try again**" % (self._currentLineNumber)
# ++++ Return fatsQ data as tuple ++++
return tuple(elemList)
##########################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--fastq", required=True, help="Place fastq inside here")
args = parser.parse_args()
#This is an example of how to use the function in your own code
fastqfile = ParseFastQ(args.fastq)
#A fastq read contains 4 lines
for fastq_obj in fastqfile:
#This fastq_obj is a tuple that has length of 4 and corresponds to those 4 lines
#This is the header
print(fastq_obj[0])
#This is the sequence
print(fastq_obj[1])
#This is the separator
print(fastq_obj[2])
#This is the quality score
print(fastq_obj[3])
#Just an indicator showing the fastq "blocks"
print('*'*10 + '==='*10 + '*' * 10)
| true
|
76f0026086f6695cd09221ab8f08527c7d9ac3d7
|
Python
|
Rajahx366/Codewars_challenges
|
/6kyu_Constant_value.py
|
UTF-8
| 1,094
| 4.28125
| 4
|
[] |
no_license
|
"""
Given a lowercase string that has alphabetic characters only and no spaces, return the highest value of consonant substrings. Consonants are any letters of the alphabet except "aeiou".
We shall assign the following values: a = 1, b = 2, c = 3, .... z = 26.
For example, for the word "zodiacs", let's cross out the vowels. We get: "z o d ia cs"
-- The consonant substrings are: "z", "d" and "cs" and the values are z = 26, d = 4 and cs = 3 + 19 = 22. The highest is 26.
solve("zodiacs") = 26
For the word "strength", solve("strength") = 57
-- The consonant substrings are: "str" and "ngth" with values "str" = 19 + 20 + 18 = 57 and "ngth" = 14 + 7 + 20 + 8 = 49. The highest is 57.
For C: do not mutate input.
More examples in test cases. Good luck!
If you like this Kata, please try:
Word values
"""
import string
def solve(s):
for i in "aeiou":
s = s.replace(i," ")
val = 0
max_val = 0
for i in s.split():
for j in i:
val += string.ascii_lowercase.index(j)+1
max_val = max(val,max_val)
val = 0
return max_val
| true
|
8e5be679ba3661d1faccce1bccff70c55c4a1455
|
Python
|
gmagannaDevelop/MorphoImg
|
/hit_or_miss.py
|
UTF-8
| 1,406
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
"""
Example taken from the docs :
https://docs.opencv.org/master/db/d06/tutorial_hitOrMiss.html
"""
import cv2 as cv
import numpy as np
def main():
input_image = np.array((
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 255, 255, 255, 0, 0, 0, 255],
[0, 255, 255, 255, 0, 0, 0, 0],
[0, 255, 255, 255, 0, 255, 0, 0],
[0, 0, 255, 0, 0, 0, 0, 0],
[0, 0, 255, 0, 0, 255, 255, 0],
[0,255, 0, 255, 0, 0, 255, 0],
[0, 255, 255, 255, 0, 0, 0, 0]), dtype="uint8")
kernel = np.array((
[0, 1, 0],
[1, -1, 1],
[0, 1, 0]), dtype="int")
output_image = cv.morphologyEx(input_image, cv.MORPH_HITMISS, kernel)
rate = 50
kernel = (kernel + 1) * 127
kernel = np.uint8(kernel)
kernel = cv.resize(kernel, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow("kernel", kernel)
cv.moveWindow("kernel", 0, 0)
input_image = cv.resize(input_image, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow("Original", input_image)
cv.moveWindow("Original", 0, 200)
output_image = cv.resize(output_image, None , fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow("Hit or Miss", output_image)
cv.moveWindow("Hit or Miss", 500, 200)
cv.waitKey(0)
cv.destroyAllWindows()
if __name__ == "__main__":
main()
| true
|
76c87d1fbe0cf0ace370bbefb48ac7374a1add68
|
Python
|
udayt-7/Data-Structures-Python
|
/Linked_Lists/p6.py
|
UTF-8
| 758
| 3.9375
| 4
|
[] |
no_license
|
#6. Create two separate single lists. Check two list are same.
##If the two lists have the same number of elements in the same order, then they are treated as same.
import link1
l1 = link1.Llist()
l2 = link1.Llist()
l1.addfirst(1)
l1.addfirst(11)
l1.addlast(33)
l1.addlast(43)
l1.addlast(53)
l1.addlast(63)
print("LIST 1")
l1.displaylist()
l2.addfirst(1)
l2.addfirst(11)
l2.addlast(33)
l2.addlast(43)
l2.addlast(53)
l2.addlast(63)
print("LIST 2")
l2.displaylist()
def lcheck(l1,l2):
cur1 = l1.head
cur2 = l2.head
while (cur1 != None and cur2 != None):
if cur1.data != cur2.data:
print("\nDifferent Linked lists")
break
else:
cur1 = cur1.next
cur2 = cur2.next
else:
print("\nlists are equal")
lcheck(l1,l2)
| true
|
72b6902a63e5ea18efc88caa68cd3631af0addae
|
Python
|
ComiteMexicanoDeInformatica/OMI-Archive
|
/2022/OMIPS-presencial/OMIPS-2022-mundo-espejo/validator.py3
|
UTF-8
| 1,813
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import libkarel
def valida(mundo, salida) -> int:
ancho = mundo.w + 1
mitad = ancho // 2
cambios = 0
minimo = 0
total = 0
# Valida primero que el mundo de salida este correctamente reflejado
for fil in range(1, mundo.h + 1):
for col in range(1, mitad):
if salida.zumbadores(col, fil) != salida.zumbadores(ancho - col, fil):
return 0
if salida.zumbadores(mitad, fil) > 0:
return 0
# Si esta bien, cuenta los cambios
cambios += abs(salida.zumbadores(col, fil) - mundo.zumbadores(col, fil))
cambios += abs(salida.zumbadores(ancho - col, fil) - mundo.zumbadores(ancho - col, fil))
minimo += abs(mundo.zumbadores(col, fil) - mundo.zumbadores(ancho - col, fil))
total += mundo.zumbadores(col, fil) + mundo.zumbadores(ancho - col, fil)
logging.debug("%s", cambios)
logging.debug("%s", minimo)
logging.debug("%s", total)
# Si llego hasta aqui hay que calcular el puntaje en base a los cambios
if cambios <= minimo:
return 100
elif cambios >= total:
return 0
else:
return (cambios - minimo) * 100 / (total - minimo)
return 100
def _main():
logging.basicConfig(level=logging.DEBUG)
score = 0
try:
result = libkarel.load_dict()
mundo = result['case_input']
contestant = result['contestant_output']
if contestant.error:
score = 0
else:
score = valida(mundo, contestant)
except Exception:
logging.exception('Karel Runtime Error')
finally:
print(score / 100)
logging.shutdown()
if __name__ == '__main__':
_main()
| true
|
3e6fa1998a890be1da61eb0798254226305028b3
|
Python
|
msvilaite/Naive-Bayes
|
/pre-process.py
|
UTF-8
| 3,697
| 3.328125
| 3
|
[] |
no_license
|
import string
import os
import json
givenVocabulary = {}
trainDict = {}
testList = []
# This function reads all the files in a given directory that contains movie reviews
# Then the reviews are cleaned up ("<br /><br />" symbols removed, some punctuation removed, ? and ! separated)
# Everything is lowercased
# And then all the reviews from a given directory are concatenated into one .txt file
# Absolute paths have to be used when looping through filenames and opening files
def concatenateReviews(reviewDir, outputDir):
print(os.path.expanduser("~/Desktop/" + reviewDir + "/"))
outputFile = open(os.path.expanduser("~/Desktop/" + outputDir), 'a')
for filename in os.listdir(os.path.expanduser("~/Desktop/" + reviewDir)):
path = os.path.expanduser("~/Desktop/" + reviewDir + "/" + filename)
file = open(path)
text = file.read()
text = text.replace("<br /><br />", " ")
text = text.replace("\"", "")
text = text.replace(".", "")
text = text.replace(",", "")
text = text.replace(";", "")
text = text.replace("(", "")
text = text.replace(")", "")
text = text.replace("?", " ?")
text = text.replace("!", " !")
words = text.split()
for i in range (len(words)):
outputFile.write(words[i].lower() + " ")
file.close()
outputFile.write("\n")
outputFile.close()
# The vocabulary from the imdb.vocab file is stored in a global dictionary, for efficient queries
def getVocab():
with open("aclImdb/imdb.vocab") as vocFile:
for line in vocFile:
givenVocabulary[line.replace("\n","")] = 1
vocFile.close()
# A structure of training data is produced, to be later stored in "movie-review-BOW.JSON" file, and to be passed to NB.py
def makeTrainDict(reviewFile, classIndex):
f1 = open(reviewFile)
listOfWords = f1.read().split()
f1.close()
for i in range(len(listOfWords)):
if listOfWords[i] in trainDict:
trainDict[listOfWords[i]][classIndex] += 1
else:
if listOfWords[i] in givenVocabulary:
if classIndex == 1:
trainDict[listOfWords[i]] = [1,0]
else:
trainDict[listOfWords[i]] = [0,1]
# Two testing data structures are produced, one for pos, one for neg
# They will also be stored in JSON files and passed to NB.py
def makeTestList(reviewFile):
f1 = open(reviewFile)
listOfReviews = f1.read().splitlines()
f1.close()
for i in range(len(listOfReviews)):
review = {}
words = listOfReviews[i].split()
for j in range(len(words)):
if words[j] in review:
review[words[j]] += 1
else:
review[words[j]] = 1
testList.append(review)
# This function is used to export data structures into JSON files
def createJSON(JSONfile, structure):
with open(JSONfile, 'w') as outfile:
json.dump(structure, outfile)
outfile.close()
concatenateReviews("aclImdb/train/pos", "movie-review-BOW-train-pos.txt")
concatenateReviews("aclImdb/train/neg", "movie-review-BOW-train-neg.txt")
getVocab()
makeTrainDict("movie-review-BOW-train-pos.txt", 0)
makeTrainDict("movie-review-BOW-train-neg.txt", 1)
createJSON("movie-review-BOW.JSON", trainDict)
concatenateReviews("aclImdb/test/pos", "movie-review-BOW-test-pos.txt")
concatenateReviews("aclImdb/test/neg", "movie-review-BOW-test-neg.txt")
makeTestList("movie-review-BOW-test-pos.txt")
createJSON("movie-review-test-pos.JSON", testList)
makeTestList("movie-review-BOW-test-neg.txt")
createJSON("movie-review-test-neg.JSON", testList)
| true
|
9041cfe61e8df0f08d57c32fb169c5c8c4c71793
|
Python
|
ccmoradia/fastbt
|
/utils.py
|
UTF-8
| 1,544
| 3
| 3
|
[] |
no_license
|
import pandas as pd
import itertools
def multi_args(function, constants, variables, isProduct=None):
"""
Run a function on different parameters and
aggregate results
function
function to be parametrized
constants
arguments that would remain constant
throughtout all the scenarios
dictionary with key being argument name
and value being the argument value
variables
arguments that need to be varied
dictionary with key being argument name
and value being list of argument values
to substitute
isProduct
list of variables for which all combinations
are to be tried out. Not implemented
By default, this function zips through each of the
variables but if you need to have the Cartesian
product, specify those variables in isProduct
returns a dataframe with different variables and
the results
"""
from functools import partial
import concurrent.futures
func = partial(function, **constants)
args = [x for x in zip(*variables.values())]
keys = variables.keys()
with concurrent.futures.ProcessPoolExecutor() as executor:
tasks = []
for arg in args:
kwds = {a:b for a,b in zip(keys, arg)}
tasks.append(executor.submit(func, **kwds))
result = [task.result() for task in tasks]
s = pd.Series(result)
s.name = 'values'
s.index = pd.MultiIndex.from_tuples(args, names=keys)
return s
if __name__ == "__main__":
pass
| true
|
d80dedec7a371396b045fa289bd267ed07a59729
|
Python
|
vaibhavyesalwad/Basic-Python-and-Data-Structure
|
/Logical Programs/FindNumsForSum.py
|
UTF-8
| 344
| 4.0625
| 4
|
[] |
no_license
|
"""Find pairs of numbers adding to given sum"""
arr = [10, 20, 25, 30, 40, 50, 60]
print(arr)
summation = int(input('Enter sum to find numbers:'))
for i in range(len(arr)-1): # this block find combinations of 2 terms
for j in range(i+1, len(arr)):
if arr[i]+arr[j] == summation:
print(arr[i], arr[j])
| true
|
e24c750f0e885c968f7b1d36bc636cbbae54d705
|
Python
|
muzaferxp/PythonProgramming
|
/Day8/app.py
|
UTF-8
| 461
| 3.203125
| 3
|
[] |
no_license
|
#method 1 #it imports every thing from the module
'''
import sample_module
print(sample_module.state)
print(sample_module.name)
num = sample_module.mult(10,5)
print(num)
'''
#method 2
'''
#from sample_module import mult,sub
from sample_module import *
num = mult(5,6)
print(num)
print(sub(5,4))
print(add(6,8))
'''
#method 3
from sample_module import division as dv
from datetime import *
date = datetime.now()
print(date)
print(dv(8,4))
| true
|
9461d901f79e28fe9ead42f882824a1fdb3f80d6
|
Python
|
rohanpahwa1/hacker_rank_solutions
|
/library_fine.py
|
UTF-8
| 278
| 3.140625
| 3
|
[] |
no_license
|
d1,m1,y1=map(int,input().split()) #returning date
d2,m2,y2=map(int,input().split()) #book to be returned so tht fine is not paid
fine=0
if d1>d2 and m1==m2 and y1==y2:
fine=(d1-d2)*15
elif m1>m2 and y1==y2:
fine=(m1-m2)*500
elif y1>y2:
fine=(y1-y2)*10000
print(fine)
| true
|
cb40a8796587e4f9579f87ad792801bbbb28e7dc
|
Python
|
user01/dask-spark-experiment
|
/nnpair_nump.py
|
UTF-8
| 32,454
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
import pandas as pd
import numpy as np
import json
from numba import jit
# #############################################################################
# Heavily modified from: https://github.com/fhirschmann/rdp
# Copyright (c) 2014 Fabian Hirschmann <fabian@hirschmann.email>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def pldist(point, start, end):
"""
Calculates the distance from ``point`` to the line given
by the points ``start`` and ``end``.
:param point: a point
:type point: numpy array
:param start: a point of the line
:type start: numpy array
:param end: another point of the line
:type end: numpy array
"""
if np.all(np.equal(start, end)):
return np.linalg.norm(point - start)
segment = end - start
point_start = point - start
segment_sqr_length = np.sum(segment * segment)
t = np.sum(segment * point_start) / segment_sqr_length
if t < 0.0:
near_pt = start
elif t >= 1.0:
near_pt = end
else:
near_pt = start + t * segment
return np.linalg.norm(point - near_pt)
# pldist(np.array([0,0.]),np.array([0,0.]),np.array([1,0.]))
# pldist(np.array([1,0.]),np.array([0,0.]),np.array([1,0.]))
# pldist(np.array([0.5,0.]),np.array([0,0.]),np.array([1,0.]))
# pldist(np.array([2,0.]),np.array([0,0.]),np.array([1,0.]))
# pldist(np.array([2,1.]),np.array([0,0.]),np.array([1,0.]))
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def _rdp_iter(M, start_index, last_index, epsilon):
stk = []
stk.append([start_index, last_index])
global_start_index = start_index
indices = np.ones(last_index - start_index + 1) > 0
while stk:
start_index, last_index = stk.pop()
dmax = 0.0
index = start_index
for i in range(index + 1, last_index):
if indices[i - global_start_index]:
d = pldist(M[i], M[start_index], M[last_index])
if d > dmax:
index = i
dmax = d
if dmax > epsilon:
stk.append([start_index, index])
stk.append([index, last_index])
else:
for i in range(start_index + 1, last_index):
indices[i - global_start_index] = False
return indices
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def rdp_iter(M, epsilon):
"""
Simplifies a given array of points.
Iterative version.
:param M: an array
:type M: numpy array
:param epsilon: epsilon in the rdp algorithm
:type epsilon: float
:param dist: distance function
:type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pldist`
:param return_mask: return the mask of points to keep instead
:type return_mask: bool
"""
mask = rdp_mask(M, epsilon)
return M[mask]
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def rdp_mask(M, epsilon):
"""
Simplifies a given array of points.
Iterative version.
:param M: an array
:type M: numpy array
:param epsilon: epsilon in the rdp algorithm
:type epsilon: float
:param dist: distance function
:type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pldist`
:param return_mask: return the mask of points to keep instead
:type return_mask: bool
"""
mask = _rdp_iter(M, 0, len(M) - 1, epsilon)
return mask
# #############################################################################
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def np_dot(x, y, axis=1):
"""
Axis based dot product of vectors
"""
return np.sum(x * y, axis=axis)
#
# @jit(nopython=True, fastmath=True, cache=True, parallel=False)
# def plane_masks(normals, pts_plane, pts_forward, pts_test):
# """
# """
# correct_side_signs = np_dot(pts_forward - pts_plane, normals, axis=1)
# diff = pts_test.reshape(-1, 1, 3) - pts_plane.reshape(1, -1, 3)
# masks = np_dot(diff, normals.reshape(1, -1, 3), axis=2) * correct_side_signs.reshape(1, -1) >= 0
# return masks
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def _plane_masks(pts_plane, pts_forward, pts_test):
"""
Given points on the plane, a point forward from the
plane (normal via the first), and points to test as forward of the plane,
return the mask of points that test as in front
"""
normals = pts_forward - pts_plane
correct_side_signs = np_dot(pts_forward - pts_plane, normals, axis=1)
diff = pts_test.reshape(-1, 1, 3) - pts_plane.reshape(1, -1, 3)
masks = np_dot(diff, normals.reshape(1, -1, 3), axis=2) * correct_side_signs.reshape(1, -1) >= 0
return masks
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def generate_all_masks(sequence, pts_test):
"""
Generate all the masks for the sequence (a series of 3d points)
To be 'adjacent' to a segment section, the point must be ahead of the
'ahead' planes (ie in front of the starting plane) and behind the 'behind'
planes (ie behind the ending plane)
The planes are both orthogonal (flat capped for just this segment) and
bisection (angled between the segment and it's neighboring segment). The
orthogonal are always valid, but bisection is only valid when there is a
neighboring segment
If a point is ahead of either ahead planes and behind either behind
planes, then the point is adjacent.
"""
assert pts_test.shape[1] == 3
# need to compute ahead masks and behind masks
# for each there's the orthogonal and the bisection
# orthogonal exist for every segment, while bisections for all the first and last
# so bisections automatically are false at the edges
mask_ahead_ortho = _plane_masks(pts_plane=sequence[:-1], pts_forward=sequence[1:], pts_test=pts_test)
mask_behind_ortho = _plane_masks(pts_plane=sequence[1:], pts_forward=sequence[:-1], pts_test=pts_test)
# f"There are {mask_behind_ortho.shape[1]} segments in this sequence"
pts_plane = sequence[:-2] + (0.5 * (sequence[2:] - sequence[:-2]))
mask_ahead_bisect = np.concatenate(
(
np.array([False] * mask_ahead_ortho.shape[0]).reshape(-1, 1),
_plane_masks(pts_plane=pts_plane, pts_forward=sequence[:-2], pts_test=pts_test),
),
axis=1,
)
mask_behind_bisect = np.concatenate(
(
_plane_masks(pts_plane=pts_plane, pts_forward=sequence[2:], pts_test=pts_test),
np.array([False] * mask_ahead_ortho.shape[0]).reshape(-1, 1),
),
axis=1,
)
# bisection masks at the extreme fail automatically - they have nothing to compare against
mask_points_per_segment = (mask_ahead_ortho | mask_ahead_bisect) & (mask_behind_ortho | mask_behind_bisect)
assert mask_points_per_segment.shape == (pts_test.shape[0], sequence.shape[0] - 1)
return mask_points_per_segment
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def closests_pt(p, a_s, b_s):
"""Find the closest point on series of segments to the point"""
# given multiple segments, pick the closest point on any to the point p
assert a_s.shape == b_s.shape
ab_s = b_s - a_s
ap_s = p.reshape(1, 3) - a_s
ab_sqr_len = np.sum(ab_s * ab_s, axis=1)
t_s = np.sum(ab_s * ap_s, axis=1) / ab_sqr_len
assert t_s.shape[0] == a_s.shape[0]
smallest_distance = -1.0
for idx in range(t_s.shape[0]):
t = t_s[idx]
if t < 0:
picked = a_s[idx] # start
elif t > 1:
picked = b_s[idx] # end
else:
# pick along segment
picked = a_s[idx] + t * (b_s[idx] - a_s[idx])
distance = np.linalg.norm(picked - p)
if smallest_distance < 0 or smallest_distance > distance:
smallest_distance = distance
best_point = picked
return smallest_distance, best_point
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def pick_points(sequence, points, wbt_id, api_ids, mds, threshold: float):
"""
Given a WBT sequence, finds points within the planer masks and finds
closest point on WBT
Returns wbt_id, api_id, distance (m), md (m), wbt_pt(xyz), nns_pt(xyz)
"""
assert points.shape[0] == mds.shape[0]
assert points.shape[1] == 3
assert len(mds.shape) == 1
mask_points_per_segment = generate_all_masks(sequence, pts_test=points)
results = np.ones((mask_points_per_segment.shape[0], 10)) * -1
vs = sequence[:-1]
ws = sequence[1:]
for idx in range(mask_points_per_segment.shape[0]):
mask_for_segments = mask_points_per_segment[idx]
if not np.any(mask_for_segments):
continue
distance, point = closests_pt(points[idx], vs[mask_for_segments], ws[mask_for_segments])
if distance <= threshold:
results[idx] = np.array(
[
wbt_id, # 0
api_ids[idx], # 1
distance, # 2
mds[idx], # 3
point[0], # 4
point[1], # 5
point[2], # 6
points[idx][0], # 7
points[idx][1], # 8
points[idx][2], # 9
]
)
return results[results[:, 0] > -1]
#
#
# x_ = np.linspace(-2., 7., 40)
#
# points = []
# for x in x_:
# for y in x_:
# for z in x_:
# points.append((x,y,z))
# points = np.stack(points)
#
# mds = np.random.RandomState(451).randn(points.shape[0])
# sequence = np.array([
# [0,0,0],
# [0,1,0],
# [0,2,1],
# [0,3,4],
# [1,4,5],
# ])
#
# # vector style
# np.stack([sequence[idx:idx+2] for idx in range(sequence.shape[0] - 1)]).reshape(-1, 3).tolist()
#
# # points.shape
# %timeit pick_points(sequence=sequence.astype(np.float64), mds=mds, api_ids=mds, points=points.astype(np.float64), threshold=1.5)
# # res = pick_points.py_func(sequence=sequence.astype(np.float64), mds=mds, api_ids=mds, points=points.astype(np.float64), threshold=1.5)
# # %timeit pick_points.py_func(sequence=sequence.astype(np.float64), mds=mds, points=points.astype(np.float64), threshold=1.5)
# # # results = pick_points(sequence=sequence.astype(np.float64), points=points.astype(np.float64), threshold=1.5)
# # # results = pick_points(sequence=sequence.astype(np.float64), points=points.astype(np.float64), threshold=1.5)
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def np_cross(a, b):
"""
Simple numba compatible cross product of vectors
"""
return np.array([
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0],
])
# a = np.array([-0.23193776, -0.70617841, -0.66896706])
# b = np.array([-0.14878152, -0.64968963, 0.74549812])
# np.allclose(np.cross(a, b), np_cross(a, b))
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def np_linalg_norm(data, axis=0):
"""
Simple numba compatible cross product of vectors
"""
return np.sqrt(np.sum(data * data, axis=axis))
# data = np.array([
# [0,0,0],
# [0,1,0],
# [1,1,0],
# [3,4,5],
# ])
# np.allclose(np_linalg_norm(data), np.sqrt(np.sum(data * data, axis=1)))
# np.allclose(np_linalg_norm(data), np.linalg.norm(data, axis=1))
# np.allclose(np_linalg_norm(np.array([3,4,0])), 5)
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def np_clip(arr, a_min, a_max):
"""
Simple numba compatible clipping of array. Both min and max are required
"""
lower = np.where(arr < a_min, a_min, arr)
upper = np.where(lower > a_max, a_max, lower)
return upper
# arr = np.arange(5)
# np.allclose(np.clip(arr, a_min=1, a_max=3), np.array([1, 1, 2, 3, 3]))
# np.allclose(np_clip(arr, a_min=1, a_max=3), np.array([1, 1, 2, 3, 3]))
# arr = np.array([
# [1,0,1],
# [4,-4,9.0],
# [90,-9000.023,6],
# ])
# expected = np.array([
# [1,0,1],
# [1,0,1.0],
# [1,0,1],
# ])
# np.allclose(np.clip(arr, a_min=0, a_max=1), expected)
# np.allclose(np_clip(arr, a_min=0, a_max=1), expected)
# ####################################################################
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def np_min(arr):
"""
np.min across axis 0
"""
# Interestingly, for small sets, this is 5x the default np.min(axis=0)
return np.array([arr[:, axis].min() for axis in range(arr.shape[1])], dtype=np.float32)
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def np_max(arr):
"""
np.max across axis 0
"""
# Interestingly, for small sets, this is 5x the default np.max(axis=0)
return np.array([arr[:, axis].max() for axis in range(arr.shape[1])], dtype=np.float32)
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def relevant_coordinates_mask(api_ids, points, sequence, threshold:float):
"""
Row-wise mask for the apis that are within the simple bounding box
for consideration
"""
buffer_min = np_min(sequence) - threshold
buffer_max = np_max(sequence) + threshold
msk = np.zeros(api_ids.shape) <= 0.0
for api_id in np.unique(api_ids):
msk_current = api_ids == api_id
points_api = points[msk_current]
local_min = np_min(points_api)
if np.any((buffer_min <= local_min) & (local_min <= buffer_max)):
msk = msk | msk_current
continue
local_max = np_max(points_api)
if np.any((buffer_min <= local_max) & (local_max <= buffer_max)):
msk = msk | msk_current
return msk
# ####################################################################
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def wbt_coordinate_prepare(wbts_api_id, coordinates_np, epsilon):
"""
Prepare the coordinates for processing
"""
wbt_mask = coordinates_np[:, 0] == wbts_api_id
coordinates_wbt = coordinates_np[wbt_mask, :]
coordinates_other = coordinates_np[~wbt_mask, :]
xyz_sequence = rdp_iter(coordinates_wbt[:, 2:], epsilon=epsilon)
apis_others = coordinates_other[:, 0]
md_others = coordinates_other[:, 1]
xyz_other = coordinates_other[:, 2:]
return (
np.ascontiguousarray(xyz_sequence),
np.ascontiguousarray(md_others),
np.ascontiguousarray(apis_others),
np.ascontiguousarray(xyz_other),
)
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def nnpairs(
wbts_api_ids: np.array,
coordinates: np.array,
spi: np.array,
threshold: float = 914.0,
segment_length: float = 15.0,
):
"""
Calculate the pairwise relationships between WBTs and NNs
Returns the vector relations of nns position to wbt and the derived statistics
Works only with float32
"""
spi_values = spi.astype(np.float32)
coordinates_np = interpolate_coords(coordinates.astype(np.float32), segment_length=segment_length)
vectors_lst = []
stats_lst = []
for wbts_api_id in wbts_api_ids.astype(np.float32):
sequence, mds, api_ids, points_all = wbt_coordinate_prepare(wbts_api_id, coordinates_np, epsilon=segment_length)
# # benchmarks find this to be slightly slower under a test set
# points_msk = relevant_coordinates_mask(api_ids, points_all, sequence, threshold=threshold)
# points = points_all[points_msk]
points = points_all
vectors = pick_points(
wbt_id=wbts_api_id,
sequence=sequence,
mds=mds,
api_ids=api_ids,
points=points,
threshold=threshold,
).astype(
np.float32
)
assert vectors.shape[1] == 10, "Improper vector size"
vectors_lst.append(vectors)
# vector style outputs - use this code to plot in view-points
# if False:
# vectors[vectors[:, 0] == 4, 3:9].reshape(-1, 3).round(0).tolist()
# Compute the common WBT values
spi_value = spi_values[spi_values[:, 0] == wbts_api_id][0]
vector_cos_angle_lat_wbt = spi_value[10] # angle in degrees
wellhead = spi_value[1:4]
east = spi_value[4:7]
north = spi_value[7:10]
east_delta = east - wellhead
north_delta = north - wellhead
local_up = np_cross(east_delta, north_delta)
local_up_len = np.linalg.norm(local_up)
local_up_unit = local_up / local_up_len
wbt = vectors[:, 4:7]
nns = vectors[:, 7:10]
delta = nns - wbt
distance_3d = np_linalg_norm(delta, axis=1)
distance_3d_valid = distance_3d > 1e-9
distance_3d_local_safe = np.where(distance_3d_valid, distance_3d, 1)
projected_vertical = local_up_unit.reshape(-1, 3) * (np.sum(local_up_unit * delta, axis=1)).reshape(-1, 1)
distance_vertical = np_linalg_norm(projected_vertical, axis=1)
assert not (distance_vertical > distance_3d_local_safe).any()
theta_valid = distance_3d_valid & (distance_3d > distance_vertical)
theta = np.where(
theta_valid, np.arcsin(np_clip(distance_vertical / distance_3d_local_safe, a_min=-1, a_max=1)), np.pi / 2
)
distance_2d = (distance_3d ** 2 - distance_vertical ** 2) ** 0.5
# Returns api_id, distance (m), md (m), wbt_pt(xyz), nns_pt(xyz)
wbt_heel_xyz = sequence[0]
wbt_toe_xyz = sequence[-1]
lateral = wbt_toe_xyz - wbt_heel_xyz
lateral_unit = lateral / np_linalg_norm(lateral)
lateral_normal = np_cross(lateral_unit, local_up_unit)
# correct_side_sign = np.dot(lateral_normal, lateral_normal)
# # this will always be positive - dotting a vector with itself
# # the correct side is the 'right' side. funny, right?
nns_ids = np.unique(vectors[:, 1])
stats = np.ones((nns_ids.shape[0], 34)) * -50
for idx, nns_id in enumerate(nns_ids):
stats[idx, 0] = nns_id
stats[idx, 1] = wbts_api_id
mask_nns = vectors[:, 1] == nns_id
vectors_nns = vectors[mask_nns]
# md diffs
distance = vectors_nns[-1, 3] - vectors_nns[0, 3]
stats[idx, 2] = distance
vector_cos_angle_lat_nns = spi_values[spi_values[:, 0] == nns_id][0, 10]
stats[idx, 3] = angle_diff(vector_cos_angle_lat_wbt, vector_cos_angle_lat_nns)
# TODO: Azimuth delta
nns_heel_xyz = vectors[0, 7:10]
nns_toe_xyz = vectors[-1, 7:10]
# the vector 'shadow' on the right facing normal is the distance from the
# lateral plane, the plane that touches the heel, toe, and a position up
# up being determined by the wellhead
# 1.0 == right
# 2.0 == left
sidenns_heel = 1.0 if np.dot(lateral_normal, nns_heel_xyz) > 0 else 2.0
sidenns_toe = 1.0 if np.dot(lateral_normal, nns_toe_xyz) > 0 else 2.0
stats[idx, 4] = sidenns_heel
stats[idx, 5] = sidenns_toe
PERCENTILES = [0, 25, 50, 75, 100]
distance_2d_nns = distance_2d[mask_nns]
stats[idx, 6] = np.mean(distance_2d_nns)
stats[idx, 7] = np.std(distance_2d_nns)
stats[idx, 8:13] = np.percentile(distance_2d_nns, PERCENTILES)
distance_3d_nns = distance_3d[mask_nns]
stats[idx, 13] = np.mean(distance_3d_nns)
stats[idx, 14] = np.std(distance_3d_nns)
stats[idx, 15:20] = np.percentile(distance_3d_nns, PERCENTILES)
distance_vertical_nns = distance_vertical[mask_nns]
stats[idx, 20] = np.mean(distance_vertical_nns)
stats[idx, 21] = np.std(distance_vertical_nns)
stats[idx, 22:27] = np.percentile(distance_vertical_nns, PERCENTILES)
theta_nns = theta[mask_nns]
stats[idx, 27] = np.mean(theta_nns)
stats[idx, 28] = np.std(theta_nns)
stats[idx, 29:34] = np.percentile(theta_nns, PERCENTILES)
stats_lst.append(stats.astype(np.float32))
# TODO: pull this out into numba function
# stats_all = np.concatenate(stats_lst)
size = 0
idx = 0
for arr in stats_lst:
size += arr.shape[0]
stats_all = np.empty(shape=(size, stats_lst[0].shape[1]), dtype=np.float32)
for arr in stats_lst:
size = arr.shape[0]
stats_all[idx:idx+size, :] = arr
idx += size
size = 0
idx = 0
for arr in vectors_lst:
size += arr.shape[0]
vectors_all = np.empty(shape=(size, vectors_lst[0].shape[1]), dtype=np.float32)
for arr in vectors_lst:
size = arr.shape[0]
vectors_all[idx:idx+size, :] = arr
idx += size
return vectors_all, stats_all
# ##################################################################
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def interpolate(coors, segment_length):
"""
Interpolate new points along the coordinate set based on the desired
segment length
"""
# roughly 10x in numba
md_min = coors[:,1][0]
md_max = coors[:,1][-1]
approx_count = int((md_max - md_min) / segment_length)
mds_new = np.linspace(md_min, md_max, approx_count + 1)
pts_new = np.empty((mds_new.shape[0], 5), dtype=np.float32)
pts_new[:, 0] = coors[0,0] # assign the id to the new points
pts_new[:, 1] = mds_new # assign the interpolated measure depths
for idx, md in enumerate(mds_new):
msk = coors[:,1] <= md
data_current = coors[msk,:][-1]
point_current = data_current[2:]
if msk.all():
pts_new[idx, 2:] = point_current
break
data_next = coors[~msk,:][0]
point_next = data_next[2:]
md_current = data_current[1]
md_next = data_next[1]
factor = (md - md_current) / (md_next - md_current)
new_point = factor * (point_next - point_current) + point_current
pts_new[idx, 2:] = new_point
return pts_new
# %timeit interpolate.py_func(coors)
# %timeit interpolate(coors)
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def malloc_interpolate_coords(coordinates, segment_length):
"""
Memory Allocation for the newly interpolated coordinates
"""
new_size = 0
for nns_id in np.unique(coordinates[:,0]):
coors = coordinates[coordinates[:, 0] == nns_id]
md_min = coors[:,1][0]
md_max = coors[:,1][-1]
new_size += 1 + int((md_max - md_min) / segment_length)
coordinates_new = np.empty((new_size, 5), dtype=np.float32)
return coordinates_new
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def interpolate_coords(coordinates, segment_length):
"""
Perform interpolation for all coordinates
"""
coords_new = malloc_interpolate_coords(coordinates, segment_length)
ptr = 0
for nns_id in np.unique(coordinates[:,0]):
coors = coordinates[coordinates[:, 0] == nns_id]
results = interpolate(coors, segment_length)
coords_new[ptr:ptr+results.shape[0],:] = results
ptr += results.shape[0]
return coords_new
# interpolate_coords(coordinates_np, segment_length=segment_length)
# %timeit interpolate_coords(coordinates_np, segment_length=segment_length)
# %timeit interpolate_coords(coordinates_np, segment_length=segment_length)
# #############################################################
def side_np_to_str(srs:pd.Series):
return np.where(srs.values < 1.5, "RIGHT", "LEFT")
# https://stackoverflow.com/a/7869457
# angle differences - in degrees
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def mod_flat(a, n):
return (a % n + n) % n
@jit(nopython=True, fastmath=True, cache=True, parallel=False)
def angle_diff(a, b):
"""Compute the absolute degree angle difference between two directions"""
res = a - b
return np.abs(mod_flat((res + 180), 360) - 180)
# #############################################################
def blank_stats():
"""A blank instance of the data set returned by the pairs operation"""
return pd.DataFrame({
"NNS": pd.Series([], dtype="object"),
"WBT": pd.Series([], dtype="object"),
"distance_segment": pd.Series([], dtype="float64"),
"azimuth_delta": pd.Series([], dtype="float64"),
"sidenns_heel": pd.Series([], dtype="object"),
"sidenns_toe": pd.Series([], dtype="object"),
"distance_2d_mean": pd.Series([], dtype="float64"),
"distance_2d_std": pd.Series([], dtype="float64"),
"distance_2d_min": pd.Series([], dtype="float64"),
"distance_2d_25percentile": pd.Series([], dtype="float64"),
"distance_2d_50percentile": pd.Series([], dtype="float64"),
"distance_2d_75percentile": pd.Series([], dtype="float64"),
"distance_2d_max": pd.Series([], dtype="float64"),
"distance_3d_mean": pd.Series([], dtype="float64"),
"distance_3d_std": pd.Series([], dtype="float64"),
"distance_3d_min": pd.Series([], dtype="float64"),
"distance_3d_25percentile": pd.Series([], dtype="float64"),
"distance_3d_50percentile": pd.Series([], dtype="float64"),
"distance_3d_75percentile": pd.Series([], dtype="float64"),
"distance_3d_max": pd.Series([], dtype="float64"),
"distance_vertical_mean": pd.Series([], dtype="float64"),
"distance_vertical_std": pd.Series([], dtype="float64"),
"distance_vertical_min": pd.Series([], dtype="float64"),
"distance_vertical_25percentile": pd.Series([], dtype="float64"),
"distance_vertical_50percentile": pd.Series([], dtype="float64"),
"distance_vertical_75percentile": pd.Series([], dtype="float64"),
"distance_vertical_max": pd.Series([], dtype="float64"),
"theta_mean": pd.Series([], dtype="float64"),
"theta_std": pd.Series([], dtype="float64"),
"theta_min": pd.Series([], dtype="float64"),
"theta_25percentile": pd.Series([], dtype="float64"),
"theta_50percentile": pd.Series([], dtype="float64"),
"theta_75percentile": pd.Series([], dtype="float64"),
"theta_max": pd.Series([], dtype="float64")
})
def blank_vectors():
"""A blank instance of the raw the pairs operation"""
return pd.DataFrame({
"WBT": pd.Series([], dtype="object"),
"NNS": pd.Series([], dtype="object"),
"NNS_MD": pd.Series([], dtype="float64"),
"Distance": pd.Series([], dtype="float64"),
"WBT_X": pd.Series([], dtype="float64"),
"WBT_Y": pd.Series([], dtype="float64"),
"WBT_Z": pd.Series([], dtype="float64"),
"NNS_X": pd.Series([], dtype="float64"),
"NNS_Y": pd.Series([], dtype="float64"),
"NNS_Z": pd.Series([], dtype="float64")
})
# #############################################################
def process_vectors(vectors_np:np.array, api_mapping:pd.DataFrame):
if vectors_np.shape[0] < 1:
return blank_vectors()
vector_results = pd.DataFrame(
vectors_np.astype(np.float64),
columns=['WBT', 'NNS', 'NNS_MD', 'Distance', 'WBT_X', 'WBT_Y', 'WBT_Z', 'NNS_X', 'NNS_Y', 'NNS_Z'],
).assign(
WBT=lambda idf: idf[['WBT']].astype(np.int64).merge(api_mapping, how='left', left_on='WBT', right_on='API_ID')['API'],
NNS=lambda idf: idf[['NNS']].astype(np.int64).merge(api_mapping, how='left', left_on='NNS', right_on='API_ID')['API'],
)
return vector_results
def process_stats(stats_np:np.array, api_mapping:pd.DataFrame):
if stats_np.shape[0] < 1:
return blank_stats()
stats_results = pd.DataFrame(
stats_np.astype(np.float64),
columns=['NNS', 'WBT', 'distance_segment', 'azimuth_delta', 'sidenns_heel', 'sidenns_toe', 'distance_2d_mean', 'distance_2d_std', 'distance_2d_min', 'distance_2d_25percentile', 'distance_2d_50percentile', 'distance_2d_75percentile', 'distance_2d_max', 'distance_3d_mean', 'distance_3d_std', 'distance_3d_min', 'distance_3d_25percentile', 'distance_3d_50percentile', 'distance_3d_75percentile', 'distance_3d_max', 'distance_vertical_mean', 'distance_vertical_std', 'distance_vertical_min', 'distance_vertical_25percentile', 'distance_vertical_50percentile', 'distance_vertical_75percentile', 'distance_vertical_max', 'theta_mean', 'theta_std', 'theta_min', 'theta_25percentile', 'theta_50percentile', 'theta_75percentile', 'theta_max'],
).assign(
WBT=lambda idf: idf[['WBT']].astype(np.int64).merge(api_mapping, how='left', left_on='WBT', right_on='API_ID')['API'],
NNS=lambda idf: idf[['NNS']].astype(np.int64).merge(api_mapping, how='left', left_on='NNS', right_on='API_ID')['API'],
sidenns_heel=lambda idf: idf['sidenns_heel'].pipe(side_np_to_str),
sidenns_toe=lambda idf: idf['sidenns_toe'].pipe(side_np_to_str),
)
return stats_results
def nnpairs_numpy(
coordinates:pd.DataFrame,
straight_perforation_interval:pd.DataFrame,
apis:pd.DataFrame,
radius_range:float=914.0, # meters
segment_length:float=15.0, # meters
):
# pandas building
spi_mapping = straight_perforation_interval.reset_index().rename(columns={"index": "API_ID"})
api_mapping = spi_mapping[["API", "API_ID"]]
wbts_api_ids = apis.merge(api_mapping)["API_ID"].values.astype(np.float64)
# coordinates.merge(api_mapping).head(2)
coordinates_np = (
coordinates.merge(spi_mapping[["API_ID", "API", "PerfFrom", "PerfTo"]])
.pipe(lambda idf: idf[(idf["MD"] >= idf["PerfFrom"]) & (idf["MD"] <= idf["PerfTo"])])[
["API_ID", "MD", "X", "Y", "Z"]
]
.values.astype(np.float64)
)
spi_values = spi_mapping[
["API_ID", "X", "Y", "Z", "X_East", "Y_East", "Z_East", "X_North", "Y_North", "Z_North", 'Vector_Cos_Angle_Lat']
].values
vectors_np, stats_np = nnpairs(
wbts_api_ids,
coordinates_np,
spi_values,
threshold=radius_range,
segment_length=segment_length,
)
vector_results = process_vectors(vectors_np, api_mapping)
stats_results = process_stats(stats_np, api_mapping)
return stats_results, vector_results
# read local data
apis = pd.read_parquet("apis.pq")
coordinates = pd.read_parquet("coordinates.pq")
spi = pd.read_parquet("spi.pq")
res = nnpairs_numpy(coordinates=coordinates, straight_perforation_interval=spi, apis=apis)
0
#
# f16s = np.random.RandomState(451).randn(1_000,5_000).astype(np.float16)
# f32s = np.random.RandomState(451).randn(1_000,5_000).astype(np.float32)
# f64s = np.random.RandomState(451).randn(1_000,5_000).astype(np.float64)
#
# %timeit np.std(np.mean(f16s, axis=1))
# %timeit np.std(np.mean(f32s, axis=1))
# %timeit np.std(np.mean(f64s, axis=1))
#
# f16s = np.random.RandomState(451).randn(5_000,5_000).astype(np.float16)
# f32s = np.random.RandomState(451).randn(5_000,5_000).astype(np.float32)
# f64s = np.random.RandomState(451).randn(5_000,5_000).astype(np.float64)
#
# %timeit np.std(np.mean(f16s, axis=1))
# %timeit np.std(np.mean(f32s, axis=1))
# %timeit np.std(np.mean(f64s, axis=1))
#
#
# [rdp(coordinates_np[coordinates_np[:, 0] == wbts_api_id, :], 15) for wbts_api_id in wbts_api_ids]
# [coordinates_np[coordinates_np[:, 0] == wbts_api_id, :][:, 2:].shape for wbts_api_id in wbts_api_ids]
| true
|