blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a1cb3f75b1eb3d32a6beed159b4ce8d07f359856 | Python | libxx1/CCeventcapture | /allseeingpievent.py | UTF-8 | 2,853 | 2.703125 | 3 | [] | no_license | from gpiozero import Button
from picamera import PiCamera
from time import gmtime, strftime
from overlay_functions import *
from guizero import App, PushButton, Text, Picture, TextBox
from twython import Twython
from auth import(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
def next_overlay():
global overlay
overlay = next(all_overlays)
preview_overlay(camera, overlay)
def take_picture():
global output
output = strftime("/home/pi/Documents/allseeingpi/image-%d-%m %H:%M.png", gmtime())
camera.capture(output)
camera.stop_preview()
remove_overlays(camera)
output_overlay(output, overlay)
size = 400, 400
gif_img = Image.open(output)
gif_img.thumbnail(size, Image.ANTIALIAS)
gif_img.save(latest_photo, 'gif')
your_pic.set(latest_photo)
def new_picture():
camera.start_preview(alpha=128)
preview_overlay(camera, overlay)
def send_tweet():
twitter = Twython(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
name = name_box.get()
twitterhandle = twitterhandle_box.get()
emailaddress = emailaddress_box.get()
postcode = postcode_box.get()
#Creating one variable storing all the information input
concatenated = name + ", " + twitterhandle + ", " + emailaddress + ", " + postcode
#apending latest input to the data file ready for analysis at later point
fh = open("data.csv","a")
fh.write(concatenated)
fh.write("\n")
fh.close()
message = twitterhandle + " Hi there! Here's some more information about Code Club www.codeclub.org.uk"
with open(output, 'rb') as photo:
twitter.update_status_with_media(status=message, media=photo)
name_box.clear()
twitterhandle_box.clear()
emailaddress_box.clear()
postcode_box.clear()
next_overlay_btn = Button(23)
next_overlay_btn.when_pressed = next_overlay
take_pic_btn = Button(25)
take_pic_btn.when_pressed = take_picture
camera = PiCamera()
camera.resolution = (800, 480)
camera.hflip = True
output = ""
latest_photo = '/home/pi/Documents/allseeingpi/latest.gif'
app = App("The All-Seeing Pi", 800, 600)
##app.attributes("-fullscreen", True)
message = Text(app, "Nice to meet you!")
your_pic = Picture(app, latest_photo)
new_pic = PushButton(app, new_picture, text="New picture")
name_label = Text(app, "What's your Name? ")
name_box = TextBox(app, "", width=30)
twitterhandle_label = Text(app, "What's your Twitter handle? ")
twitterhandle_box = TextBox(app, "", width=30)
emailaddress_label = Text(app, "What's your Email address? ")
emailaddress_box = TextBox(app, "", width=30)
postcode_label = Text(app, "What's your Postcode? ")
postcode_box = TextBox(app, "", width=30)
tweet_pic = PushButton(app, send_tweet, text="Tweet picture")
app.display()
| true |
954af84bd0e42c5acdcb865168ce5727a298fe76 | Python | finben/djattendance | /ap/services/models/__init__.py | UTF-8 | 1,540 | 2.5625 | 3 | [] | no_license | from seasonal_service_schedule import *
from service import *
from worker import *
from workergroup import *
from exception import *
from assignment import *
from week_schedule import *
from service_hours import *
""" services models.py
The services model defines both weekly and permanent (designated) services in the
Data Models:
- Category: This is a broad category that contains specific services. For
example,Cleanup is a category that contains services such as Tuesday
Breakfast Cleanup or Saturday Lunch Cleanup. Guard contains Guards A, B, C,
and D.
- Service: This refers to a specific service that repeats on a weekly basis.
I.e. Tuesday Breakfast Prep is a service. It repeats every week. A specific
instance of that service is defined in the service scheduler module as a
service Instance.
- SeasonalServiceSchedule: This is a period in which services are active and generally
changes with the schedule of the training. Most of the time, the regular
FTTA schedule will be in effect, but there are exceptions such as Service
Week and the semiannual training.
"""
"""
Worker Specs
- gender
- qualifications
- WORKER_ROLE_TYPES
- term_types
- worker_group
- count
- workload
worker_group join
class Assignment(models.Model):
ROLES = WORKER_ROLE_TYPES
# schedule = models.ForeignKey('Schedule')
instance = models.ForeignKey(Instance)
worker = models.ForeignKey(Worker)
role = models.CharField(max_length=3, choices=ROLES, default='wor')
"""
| true |
6d9d79fea634071c4aa4cde10e74c19ef419cb56 | Python | Nakxxgit/PyQt5_Tutorial | /widgets/splitter.py | UTF-8 | 1,479 | 2.8125 | 3 | [] | no_license | import sys
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QFrame, QSplitter, QStyleFactory, QApplication
from PyQt5.QtCore import Qt
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
hbox = QHBoxLayout(self)
# 칸마다 경계를 나누기 위해 StyledPanel 사용
topleft = QFrame(self)
topleft.setFrameShape(QFrame.StyledPanel)
topright = QFrame(self)
topright.setFrameShape(QFrame.StyledPanel)
bottomright = QFrame(self)
bottomright.setFrameShape(QFrame.StyledPanel)
bottomleft = QFrame(self)
bottomleft.setFrameShape(QFrame.StyledPanel)
# 수평 Splitter 생성, 두 개의 프레임 추가
splitter1 = QSplitter(Qt.Horizontal)
splitter1.addWidget(topleft)
splitter1.addWidget(topright)
splitter2 = QSplitter(Qt.Horizontal)
splitter2.addWidget(bottomleft)
splitter2.addWidget(bottomright)
# 수직 Splitter 생성, 두 개의 수평 Splitter 추가
splitter3 = QSplitter(Qt.Vertical)
splitter3.addWidget(splitter1)
splitter3.addWidget(splitter2)
hbox.addWidget(splitter3)
self.setLayout(hbox)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('QSplitter')
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | true |
8ed1611e1c9b82bd1236a1e3f6a49f8c24081fdc | Python | FedML-AI/FedML | /python/fedml/model/linear/lr_cifar10.py | UTF-8 | 544 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | import torch
class LogisticRegression_Cifar10(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super(LogisticRegression_Cifar10, self).__init__()
self.linear = torch.nn.Linear(input_dim, output_dim)
def forward(self, x):
# Flatten images into vectors
# print(f"size = {x.size()}")
x = x.view(x.size(0), -1)
outputs = torch.sigmoid(self.linear(x))
# except:
# print(x.size())
# import pdb
# pdb.set_trace()
return outputs
| true |
b57b2daf808085520565605593f53c0f5c0979ac | Python | Helumpago/SimplePhysics | /model.py | UTF-8 | 1,958 | 3.171875 | 3 | [] | no_license |
import threading
from .base_obj import BaseObj
from .drawable import Drawable
from .event import Event
from .eventless_object import ParentError
"""
" Controls the flow of the simulation. In other words,
" this object defines the event-model-render loop.
" This object is the root of the scene graph for all
" simulations
"""
class Model(BaseObj, Drawable, threading.Thread):
"""
" CONSTRUCTOR
" @param string Name: Name for this object.
" @param int fps: Maximum number of frames per second allowable.
"""
def __init__(self, Name = "Model", fps = 60):
BaseObj.__init__(self, parent = None, Name = Name)
Drawable.__init__(self)
threading.Thread.__init__(self)
self.fps = fps
Event(parent = self.events, Name = "onQuit") # Fired when the Model thread is ready to shut down
self.events.getFirst("onQuit").regcb(self.close).Name = "AutoClose"
self.events.getFirst("onStep").regcb(self.step).Name = "MainLoop"
"""
" Prevent this object from being parented to anything
"""
def setParent(self, parent = None):
if parent == None:
object.__setattr__(self, "parent", None)
else:
raise ParentError("Can't parent a Model to any object")
"""
" ABSTRACT
" Limits the number of frames per second to the given number
" and gets the number of miliseconds since the last frame.
" @param number t: Maximum FPS
" @return: Number of miliseconds since the last frame
"""
def tick(self, t):
raise NotImplementedError("Model's tick() method left unimplemented")
"""
" Calculate this object's next frame.
"""
def step(self, event):
for ev in self.events.getChildren():
ev.run()
"""
" Main execution loop for the simulation. Separates itself
" into a separate process
"""
def run(self):
while True:
self.dt = self.tick(self.fps)
## Step the simulation ##
self.__draw__()
self.__collectEvents__()
self.__step__(self.dt)
"""
" Closes the simulation thread
"""
def close(self, event):
exit() | true |
498f7712287058cda39912a91dae234e2c6b219f | Python | vikrembhagi/gardening-iot | /DHT/TempHumid/startDAC.py | UTF-8 | 3,120 | 2.5625 | 3 | [] | no_license | import smbus
import time
import dht11
import RPi.GPIO as GPIO
import paho.mqtt.publish as publish
import psutil
# ThingSpeak Channel Settings
# The ThingSpeak Channel ID
# Replace this with your Channel ID
channelID = "305122"
# The Write API Key for the channel
# Replace this with your Write API key
apiKey = "1NUYN01J6DD4W5KJ"
# MQTT Connection Methods
# Set useUnsecuredTCP to True to use the default MQTT port of 1883
# This type of unsecured MQTT connection uses the least amount of system resources.
useUnsecuredTCP = False
# Set useUnsecuredWebSockets to True to use MQTT over an unsecured websocket on port 80.
# Try this if port 1883 is blocked on your network.
useUnsecuredWebsockets = False
# Set useSSLWebsockets to True to use MQTT over a secure websocket on port 443.
# This type of connection will use slightly more system resources, but the connection
# will be secured by SSL.
useSSLWebsockets = True
# Standard mqtt host
mqttHost = "mqtt.thingspeak.com"
# Set up the connection parameters based on the connection type
if useUnsecuredTCP:
tTransport = "tcp"
tPort = 1883
tTLS = None
if useUnsecuredWebsockets:
tTransport = "websockets"
tPort = 80
tTLS = None
if useSSLWebsockets:
import ssl
tTransport = "websockets"
tTLS = {'ca_certs':"/etc/ssl/certs/ca-certificates.crt",'tls_version':ssl.PROTOCOL_TLSv1}
tPort = 443
# Create the topic string
topic = "channels/" + channelID + "/publish/" + apiKey
#define GPIO 14 as DHT11 data pin
Temp_sensor=4
#ENABLE = 0b00000100 # Enable bit
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
#Open I2C interface
#bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
def main():
# Main program block
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
# Initialise display
instance = dht11.DHT11(pin = Temp_sensor)
while True:
#get DHT11 sensor value
result = instance.read()
# Send some test
if result.is_valid():
# build the payload string
tPayload = "field1=" + str(result.temperature) + "&field2=" + str(result.humidity)
# attempt to publish this data to the topic
try:
publish.single(topic, payload=tPayload, hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)
except (KeyboardInterrupt):
break
except:
print ("There was an error while publishing the data.")
print "temp:"+str(result.temperature)+" C"
print "humid:"+str(result.humidity)+"%"
# Set up the connection parameters based on the connection type
if useUnsecuredTCP:
tTransport = "tcp"
tPort = 1883
tTLS = None
if useUnsecuredWebsockets:
tTransport = "websockets"
tPort = 80
tTLS = None
if useSSLWebsockets:
import ssl
tTransport = "websockets"
tTLS = {'ca_certs':"/etc/ssl/certs/ca-certificates.crt",'tls_version':ssl.PROTOCOL_TLSv1}
tPort = 443
# Create the topic string
topic = "channels/" + channelID + "/publish/" + apiKey
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
raise
| true |
930e8c2d848a72c8ddbcdebe1b9af8899720b2b9 | Python | Lucas-Guimaraes/Reddit-Daily-Programmer | /Easy Problems/41-50/49easy.py | UTF-8 | 3,060 | 3.765625 | 4 | [] | no_license | # https://www.reddit.com/r/dailyprogrammer/comments/tb2h0/572012_challenge_49_easy/
import random
def monty_hall():
winner = random.randint(1, 3)
choices = [1, 2, 3]
result_lst = ['car' if i == winner else 'goat' for i in range(1, 4)]
goat_doors = [i for i in range(1, 4) if result_lst[i-1] == 'goat']
invalid_answer = True
while invalid_answer:
first_answer = int(raw_input("""Pick a door! \n\n1\n2\n3\n\nYour answer here: """))
if first_answer not in choices:
print("{} is invalid. Please pick a valid answer!".format(first_answer))
continue
else:
print("You've chosen door number {}!".format(first_answer))
invalid_answer = False
random_goat = random.randint(0, 1)
if random_goat == 0:
if goat_doors[0] == first_answer:
reveal_goat_door = goat_doors[1]
else:
reveal_goat_door = goat_doors[0]
else:
if goat_doors[1] == first_answer:
reveal_goat_door = goat_doors[0]
else:
reveal_goat_door = goat_doors[1]
check_door = [reveal_goat_door, first_answer]
remaining_door = set(choices) - set(check_door)
remaining_door = list(remaining_door)
remaining_door = remaining_door[0]
print("You have revealed that there is a goat behind door number {}".format(reveal_goat_door))
print("Would you like to Switch to Door {0} or stick to Door {1}?").format(remaining_door, first_answer)
print("Type 'stay' to Stay, and 'switch' to Switch")
invalid_answer_2 = True
correct_answer = result_lst.index("car")+1
while invalid_answer_2:
second_answer = raw_input("Will you stay or will you go?: ")
if second_answer == 'stay':
invalid_answer_2 = False
elif second_answer == 'switch':
first_answer = remaining_door
invalid_answer_2 = False
else:
"That's not a valid input!"
if first_answer == correct_answer:
print("\nYou've won a brand new car!")
else:
print("\nSorry. The correct answer was Door {}").format(correct_answer)
# dothisalllater
def monty_hall_sim(n):
doors = [1, 2, 3]
stay = 0
switch = 0
for i in range(n):
winner = random.choice(doors)
player_choice = random.choice(doors)
if player_choice == winner:
stay += 1
else:
switch += 1
f_n = float(n)
stay_percent = stay / f_n * 100
switch_percent = switch / f_n * 100
return "After {0} runs, the amount of times it produced a win for staying is {1}% with {2} wins and a win for switching is {3}% with {4} wins".format(n, stay_percent, stay, switch_percent, switch)
monty_hall()
print("")
sim = int(raw_input("How many times would you like to try running the simulation? Try a real big number, like, something with at least 6 digits.\n"))
print(monty_hall_sim(sim))
raw_input("\nPress enter to exit")
| true |
581b8813826d95430361793e944c0f1e9e681b7f | Python | gayoung0838/bioinfo-lecture-2021 | /bioinfo_python/015-1.py | UTF-8 | 254 | 3.421875 | 3 | [] | no_license | #!/usr/bin/python3
# N = int(input())
# print(N * 2)
import sys
def make_double(num):
return num * 2
if len(sys.argv) != 2:
print(f"#usage: python {sys.argv[0]} [number]")
sys.exit()
num = int(sys.argv[1])
result = make_double(num)
print(result)
| true |
4d23f479c0d52b5aac75b40db94716144ea4dbc8 | Python | xingya1/tensorflow | /2/init.py | UTF-8 | 693 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 14:51:13 2018
@author: yao
"""
from sklearn import preprocessing
from sklearn import datasets
from numpy import *
def normalization(data,target):
min_max_scaler = preprocessing.MinMaxScaler()
data = min_max_scaler.fit_transform(data)
label = zeros([150,3])
for i in range(150):
label[i][target[i]] = 1
return data,label
def loadData():
iris = datasets.load_iris()
#n_samples,n_features=iris.data.shape
#print("Number of sample:",n_samples)
#print("Number of feature",n_features)
data = iris.data
label = iris.target
data,label = normalization(data,label)
return data,label
| true |
38156292902f372260d965946fee6d5ec35ab0d3 | Python | Aileenshanhong/NLTK | /ex3.py | UTF-8 | 3,990 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 24 21:42:52 2017
@author: aileenlin
"""
import nltk, re, pprint
from nltk import word_tokenize
from urllib import request
url = "http://www.gutenberg.org/files/2554/2554.txt"
response = request.urlopen(url)
raw = response.read().decode('utf8')
type(raw)
len(raw)
raw[:75]
tokens = word_tokenize(raw)
type(tokens)
tokens[:10]
text = nltk.Text(tokens)
type(text)
text[1024:1062]
tokens[1024:1062]
text.collocations()
text.concordance('young')
"""Processing HTML file"""
url = "http://news.bbc.co.uk/2/hi/health/2284783.stm"
html = request.urlopen(url).read().decode('utf8')
html[:60]
from bs4 import BeautifulSoup
raw = BeautifulSoup(html).get_text()
tokens = word_tokenize(raw)
tokens
text = nltk.Text(tokens)
text
"""Processing RSS Feeds"""
import feedparser
llog = feedparser.parse("http://languagelog.ldc.upenn.edu/nll/?feed=atom")
llog['feed']['title']
s = input("Enter some text:")
print("You typed", len(word_tokenize(s)), "words.")
couplet = '''Squirrel has a new job.
I am happy for him.'''
print(couplet)
a = [1, 2, 3, 4, 5, 6, 7, 6, 5, 4, 3, 2, 1]
b = [' ' * 2 * (7 - i) + 'very' * i for i in a]
for line in b:
print(line)
path = nltk.data.find('corpora/unicode_samples/polish-lat2.txt')
f = open(path, encoding = 'latin2')
a = ""
for line in f:
line = line.strip()
a = a+line
print(line)
import re
wordlist = [w for w in nltk.corpus.words.words('en') if w.islower()]
[w for w in wordlist if re.search('..j..t..', w)]
re.search('^m*i*e*$','me')
a = r'\band\b'
print(a)
"""Regular expression"""
raw = """'When I'M a Duchess,' she said to herself, (not in a very hopeful tone
though), 'I won't have any pepper in my kitchen AT ALL. Soup does very
well without--Maybe it's always pepper that makes people hot-tempered,'"""
re.split(r' ',raw)
re.split(r'[ \n\t]',raw)
re.split(r'\s+', raw)
re.split(r'\W+',raw)
[int(n) for n in re.findall('[0-9]{2,}', '2009-12-31')]
regexp = r'^[AEIOUaeiou]+|[AEIOUaeiou]+$|[^AEIOUaeiou]'
english_udhr = nltk.corpus.udhr.words('English-Latin1')
re.findall(regexp, english_udhr[0])
rotokas_words = nltk.corpus.toolbox.words('rotokas.dic')
cv_word_pairs = [(cv, w) for w in rotokas_words
for cv in re.findall(r'[ptksvr][aeiou]', w)]
cv_index = nltk.Index(cv_word_pairs)
text = nltk.corpus.gutenberg.raw('chesterton-thursday.txt')
sents = nltk.sent_tokenize(text)
pprint.pprint(sents[79:89])
text = "doyouseethekittyseethedoggydoyoulikethekittylikethedoggy"
seg1 = "0000000000000001000000000010000000000000000100000000000"
seg2 = "0100100100100001001001000010100100010010000100010010000"
def segment(text, segs):
words = []
last = 0
for i in range(len(segs)):
if segs[i] == '1':
words.append(text[last: i+1])
last = i + 1
words.append(text[last:])
return words
def evaluate(text, segs):
words = segment(text, segs)
text_size = len(words)
lexicon_size = sum(len(w)+1 for w in set(words))
return text_size + lexicon_size
evaluate(text, seg1)
from random import randint
def flip(segs, pos):
return segs[:pos] + str(1-int(segs[pos])) + segs[pos+1:]
def flip_n(segs, n):
for i in range(n):
segs = flip(segs, randint(0, len(segs)-1))
return segs
def anneal(text, segs, iterations, cooling_rate):
temperature = float(len(segs))
while temperature > 0.5:
best_segs, best = segs, evaluate(text, segs)
for i in range(iterations):
guess = flip_n(segs, round(temperature))
score = evaluate(text, guess)
if score < best:
best, best_segs = score, guess
score, segs = best, best_segs
temperature = temperature / cooling_rate
print(evaluate(text, segs), segment(text, segs))
print()
return segs
anneal(text, seg1, 5000, 1.2)
import os
os.getcwd()
os.chdir('/Users/aileenlin/Documents/NLTK/output') | true |
8ffeb3c81c11f91cf7af3c133b49f1d905bb6daa | Python | coderZsq/coderZsq.practice.data | /study-notes/py-collection/11_列表/09_列表推导式_练习.py | UTF-8 | 1,674 | 3.78125 | 4 | [
"MIT"
] | permissive | import random as r
# 方法2
# 随机数的范围
edge = 10
# 随机数的数量
size = 20
# 生成随机数
nos = [r.randrange(edge) for _ in range(size)]
# 统计每一个随机数的出现次数
all_times = [0 for _ in range(edge)]
for no in nos:
all_times[no] += 1
# 打印
print(nos)
for no, times in enumerate(all_times):
print(f'{no}出现了{times}次')
# all_times[no]就代表随机数no的出现次数
# 比如all_times[0]就代表随机数0的出现次数
# 比如all_times[9]就代表随机数9的出现次数
# 方法1
# 统计每一个随机数的出现次数
# nos = [0 for _ in range(20)]
# for i in range(len(nos)):
# nos[i] = r.randrange(10)
# times0 = 0
# times1 = 0
# times2 = 0
# times3 = 0
# times4 = 0
# times5 = 0
# times6 = 0
# times7 = 0
# times8 = 0
# times9 = 0
# for no in nos:
# if no == 0:
# times0 += 1
# elif no == 1:
# times1 += 1
# elif no == 2:
# times2 += 1
# elif no == 3:
# times3 += 1
# elif no == 4:
# times4 += 1
# elif no == 5:
# times5 += 1
# elif no == 6:
# times6 += 1
# elif no == 7:
# times7 += 1
# elif no == 8:
# times8 += 1
# elif no == 9:
# times9 += 1
#
# print(nos)
# print(f'0出现了{times0}次')
# print(f'1出现了{times1}次')
# print(f'2出现了{times2}次')
# print(f'3出现了{times3}次')
# print(f'4出现了{times4}次')
# print(f'5出现了{times5}次')
# print(f'6出现了{times6}次')
# print(f'7出现了{times7}次')
# print(f'8出现了{times8}次')
# print(f'9出现了{times9}次')
| true |
4f54301558345fbe03df70aef130418be6cf770e | Python | emistern/EC601_Robotic_Guidedog | /path_planning/draw.py | UTF-8 | 1,601 | 2.546875 | 3 | [] | no_license | import cv2
import numpy as np
def draw_max_conn(grid, idx, lines=False):
unit_size = 10
height = len(grid)
width = len(grid[0])
t_h = unit_size * height
t_w = unit_size * width
world = np.array([[[240] * 3] * (t_w)] * (t_h)).astype(np.uint8)
if lines:
for x in range(0, t_w, unit_size):
pt1 = (x, 0)
pt2 = (x, t_h)
world = cv2.line(world, pt1, pt2, (255, 0, 0))
for y in range(0, t_h, unit_size):
pt1 = (0, y)
pt2 = (t_w, y)
world = cv2.line(world, pt1, pt2, (255, 0, 0))
# Draw Obstacles
ofs = int(unit_size / 5)
for i, row in enumerate(grid):
for j, e in enumerate(row):
if (e == 1):
# Draw an obstacle in world
pt1 = (j * unit_size + ofs, i * unit_size + ofs)
pt2 = ((j+1) * unit_size - ofs, (i+1) * unit_size - ofs)
cv2.rectangle(world, pt1, pt2, (0, 0, 200), 3)
sqr_dict = {}
count = 0
for i in range(height):
for j in range(width):
if (grid[i][j] == 0):
sqr_dict[count] = (i, j)
count += 1
# Draw connected compoment
for i in range(len(idx)):
_id = idx[i]
_j = sqr_dict[_id][1]
_i = sqr_dict[_id][0]
pt1 = (_j * unit_size + ofs, _i * unit_size + ofs)
pt2 = ((_j+1) * unit_size - ofs, (_i+1) * unit_size - ofs)
cv2.rectangle(world, pt1, pt2, (200, 0, 0), 3)
world = np.flip(np.array(world), 0)
#cv2.imshow("path", world)
return world | true |
0534c72b00bbde586b46f8a69ad706458937586b | Python | piyuid/my-bangkit-repos | /google-it-automation-with-python/A3-crash-course-on-python/string1.py | UTF-8 | 313 | 3.265625 | 3 | [] | no_license | email = "leopuji17@gmail.com"
old_domain = "gmail.com"
new_domain = "rf.com"
def replace_domain(email, old_domain, new_domain):
if "@" + old_domain in email:
index = email.index("@" + old_domain)
new_email = email[:index] + "@" + new_domain
return new_email
return email
print(replace_domain) | true |
de45877b66c69ffe4cbeab8d807b7d87d00d2cc5 | Python | hujinxinb/test202007 | /1.py | UTF-8 | 2,062 | 3.421875 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
from concurrent.futures import ThreadPoolExecutor
import threading
import time
# 定义一个准备作为线程任务的函数
def action(max,a):
my_sum = 0
for i in range(max):
print(threading.current_thread().name + ' ' + str(i))
my_sum += i
return my_sum
for i in range(4):
pool = ThreadPoolExecutor(2)
future1 = pool.submit(action, 5,1)
future2 = pool.submit(action, 5,1)
def get_result(future):
print(future.result())
future1.add_done_callback(get_result)
future2.add_done_callback(get_result)
print('--------------')
pool.shutdown(wait=True)
# # 创建一个包含2条线程的线程池
# with ThreadPoolExecutor(max_workers=2) as pool:
# # 向线程池提交一个task, 50会作为action()函数的参数
# future1 = pool.submit(action, 5)
# # 向线程池再提交一个task, 100会作为action()函数的参数
# future2 = pool.submit(action, 5)
# def get_result(future):
# print(future.result())
# # 为future1添加线程完成的回调函数
# future1.add_done_callback(get_result)
# # 为future2添加线程完成的回调函数
# future2.add_done_callback(get_result)
# print('--------------')
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# class Parent: # 定义父类
# parentAttr = 100
# def __init__(self):
# print ("调用父类构造函数")
# def parentMethod(self):
# print ('调用父类方法')
# def setAttr(self, attr):
# Parent.parentAttr = attr
# def getAttr(self):
# print ("父类属性 :", Parent.parentAttr)
#
# class Child(Parent): # 定义子类
# def __init__(self):
# print ("调用子类构造方法")
# def childMethod(self):
# print ('调用子类方法')
#
# c = Child() # 实例化子类
# c.childMethod() # 调用子类的方法
# c.parentMethod() # 调用父类方法
# c.setAttr(200) # 再次调用父类的方法 - 设置属性值
# c.getAttr() # 再次调用父类的方法 - 获取属性值 | true |
8cd9f91ab738fa6a0e6fa5a92ee12807d37c563c | Python | su-de-sh/HandWrittenAlphabetRecognition | /pyimagesearch/nn/conv/shallownet.py | UTF-8 | 884 | 2.6875 | 3 | [] | no_license | from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.core import Activation
from keras.layers.core import Dense
from keras.layers.core import Flatten
from keras import backend as K
class ShallowNet:
@staticmethod
def build(width,height,depth,classes):
#initialize the model along with the input shape to be
# "channel last"
model=Sequential()
inputShape = (height, width, depth)
# if we are using "channels first ", update the input shape
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
model.add(Conv2D(32, (3,3), padding="same",input_shape = inputShape))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(classes))
model.add(Activation("softmax"))
return model
| true |
f92a4d99de563125ed4746036794d7d349b7d66c | Python | muhit04/xero_connection | /connection.py | UTF-8 | 4,067 | 2.984375 | 3 | [] | no_license | '''This script tries to connect to Xero without using any python wrapper
created by Muhit Anik <muhit@convertworx.com.au>
For xero reference use this guide: https://developer.xero.com/documentation/api
To access another endpoint for instance accessing Name which is found inside Contact, we must call it like Contact.Name
The following example demonstrates that. Keep in mind, we are doing percent encoding. So there exists difference between
'' (single quote) and "" (double quote).
The settings are inside config.cfg.
The sample response will be written in output.json
'''
import requests
from requests_oauthlib import OAuth1
import simplejson as json
from urllib2 import quote
import ConfigParser
def Xero(url, requestType="GET", body=""):
config = ConfigParser.ConfigParser()
config.readfp(open('config.cfg'))
consumer_key = config.get("xero_api", "consumer_key") ##consumer secret is NOT used for private companies.
with open("privatekey.pem", "rb") as rsafile:
rsakey = rsafile.read()
### consumer key is used both as consumer key and auth token.
oauth = OAuth1(consumer_key, resource_owner_key=consumer_key, rsa_key=rsakey, signature_method='RSA-SHA1', signature_type='auth_header')
if requestType == "POST":
headers = {'Content-Type': 'application/json'}
if body == "":
print "Empty body. Nothing to post."
exit()
resp = requests.post(url=url, auth=oauth, headers=headers, data=body)
if requestType == "PUT":
headers = {'Content-Type': 'application/json'}
if body == "":
print "Empty body. Nothing to put."
exit()
resp = requests.put(url=url, auth=oauth, headers=headers, data=body)
if requestType == "GET":
### this will allow the output in json
headers = {'Accept': 'application/json'}
resp = requests.get(url=url, auth=oauth, headers=headers)
with open("output.json", "wb") as f:
f.write(resp.text)
def filter_invoice_by_contact_name():
### API reference: https://developer.xero.com/documentation/api/invoices
### Example-1: Getting invoices where Contact Name is "ABCD"
base_url = "https://api.xero.com/api.xro/2.0/Invoices?where="
filter_url = 'Contact.Name=="ABCD"' ### value must be double quoted, single quoting will fail.
url = base_url + quote(filter_url)
Xero(url)
def filter_invoice_by_trackingCategory():
### API reference: https://developer.xero.com/documentation/api/tracking-categories#Options
### Example-2: Using trackingCategories end point and filtering by category name
base_url = "https://api.xero.com/api.xro/2.0/TrackingCategories?where="
filter_url = 'Name=="Region"'
url = base_url + quote(filter_url)
Xero(url)
def startswith_contains_endswith():
### API reference: https://developer.xero.com/documentation/api/requests-and-responses#get-modified
### Example-3 usage of Name.Contains
base_url = "https://api.xero.com/api.xro/2.0/Invoices?where="
filter_url = 'Contact.Name.StartsWith("B")'
### similarly we can use Contact.Name.Contains("B") and Contact.Name.EndsWith("B") etc
url = base_url + quote(filter_url)
Xero(url)
def journals_by_sourceType():
### API reference: https://developer.xero.com/documentation/api/journals
### Example-4 getting a journal by using the sourceType attribute
base_url = "https://api.xero.com/api.xro/2.0/Journals?where="
filter_url = 'SourceType=="ACCREC"'
url = base_url + quote(filter_url)
Xero(url)
def new_invoice():
### API reference: https://developer.xero.com/documentation/api/invoices#post
### Example-5 demonstrates how to make post/put requests to Xero.
url = "https://api.xero.com/api.xro/2.0/Invoices"
body = {
"Type" : "ACCREC",
"Contact" : {"Name": "TESTABCD"},
"LineItems" : [{"Description" : "TEST Item 1", "Quantity" : 5, "UnitAmount" : 30}]
}
body = json.dumps(body, encoding="utf-8")
Xero(url, "POST", body)
| true |
d60b39dbff7166a0f2b842b4ab7d9c85cd41c8f7 | Python | antgouri/IPP2MCA | /For April1st Class/fnCount.py | UTF-8 | 82 | 2.859375 | 3 | [] | no_license | import sys
fn = sys.argv[0]
print("The length of the file name is ", len(fn)-3)
| true |
b265c0a97d9a11e36ef4a0c45a4814634022532b | Python | geekbitcreations/illinoistech | /ITMD_513/hw5/SortedList.py | UTF-8 | 1,341 | 4.625 | 5 | [] | no_license | '''
Deborah Barndt
2-20-19
SortedList.py
hw5: Question 1 Sorted List
This program will prompt the user to enter a list and display whether the list
is sorted or not sorted.
Written by Deborah Barndt.
'''
# Function that returns true if the list is already sorted in increasing order.
def isSorted(lst):
for i in range(len(lst) - 1):
if (lst[i] > lst[i + 1]):
return False
return True
# Function that will prompt the user to enter a list and then displays whether
# the list is sorted or is not sorted.
def main():
enterAgain = 'y'
while (enterAgain == 'y'):
lst = input('Please enter a list of numbers with spaces: ')
lst = lst.split(' ')
for i in range(len(lst)):
lst[i] = int(lst[i])
if isSorted(lst):
print('The list is already sorted.')
# Ask the user if they would like to enter another list.
enterAgain = input('\nWould you like to enter another list? (y/n) ')
else:
print('The list is not sorted.')
# Ask the user if they would like to enter another list.
enterAgain = input('\nWould you like to enter another list? (y/n) ')
if (enterAgain == 'n'):
print('\nThank you. Please come again.')
# Call the main function to begin the test program.
main()
| true |
140749aed0d3401f192f236b1838143955230257 | Python | sheetalkaktikar/csvttlconvertor | /rdfsample.py | UTF-8 | 269 | 2.84375 | 3 | [] | no_license | import rdflib
g=rdflib.Graph()
result = g.parse("http://www.w3.org/People/Berners-Lee/card")
print("Graph has %s statements." %len(g))
for subj,pred,obj in g:
if (subj,pred,obj) not in g:
raise Exception("It better be!")
s=g.serialize(format='turtle')
| true |
681439c28db01d6c85a1452b66ef3a86bf96abfe | Python | LucasVanWijk/ABD | /Group/CBS_csv_to_groupinfo.py | UTF-8 | 1,536 | 3.359375 | 3 | [] | no_license | def get_info_piramide():
def index_containing_substring(the_list, substring):
'''
https://stackoverflow.com/questions/2170900/get-first-list-index-containing-sub-string
'''
for i, s in enumerate(the_list):
if substring in s:
return i
return -1
# https://opendata.cbs.nl/statline/#/CBS/nl/dataset/7461bev/table?ts=1614870199236
info = open("Bevolking.csv", "r+").readlines()
info = info[index_containing_substring(info,"0 tot 5"):-1]
info = [x.rstrip() for x in info]
age_dictionary = {
"Child" : 0,
"Student" : 0,
"Adult":0,
"Elderly":0,
}
total = 0
for row in info:
age,amount = row.replace('"','').split(";")
age = int(age.split(" ")[0])
total += int(amount)
if age < 15: # 0-15, boven 15 telt niet mee
age_dictionary["Child"] += int(amount)
elif age < 25: # 15-25, boven 25 telt niet mee
age_dictionary["Student"] += int(amount)
elif age < 65: # 25-65, boven 25 telt niet mee
age_dictionary["Adult"] += int(amount)
else: # en alles boven 65
age_dictionary["Elderly"] += int(amount)
prev_percentage = 0
for i in age_dictionary.keys():
age_group = age_dictionary[i]
age_dictionary[i] = float(prev_percentage)
prev_percentage += (age_group / total)
return age_dictionary | true |
fe44b467d5fd24a5c8c21ff737e97dc956123984 | Python | L00n3y/Python_Excercises | /enum_excercise_1.py | UTF-8 | 426 | 3.65625 | 4 | [] | no_license | from enum import Enum
#class Country aanmaken met de python functie Enum. Deze functie zorgt ervoor dat er een member en een value is.
#Door deze member en value kan gerouteerd worden.
class Country(Enum):
StarWars = 10
LOTR = 100
GOT = 1000
Walking_Dead = 1250
#Nu roepen wij een member en de value aan.
print('\nMember name: {}'.format(Country.GOT.name))
print('Member value: {}'.format(Country.GOT.value)) | true |
1ea0535f139eb116eca8df0e0b7b8a9e736444c6 | Python | venkat-narahari/Opinion-Mining-on-Twitter-Data-using-Machine-learning | /src/SentimentAnalysis/sentiment.py | UTF-8 | 7,266 | 3.078125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import re
import nltk
from sklearn.externals import joblib
import tweepy
from tweepy import OAuthHandler
import matplotlib.pyplot as plt
import datetime
class TwitterClient(object):
#Generic Twitter Class for sentiment analysis.
def __init__(self):
#Class constructor or initialization method.
# keys and tokens from the Twitter Dev Console
consumer_key = '1qRm35j3kskUyITp8FquUk3Sj'
consumer_secret = 'bdzrMnivVpi5ku4i1Dd4Dpmxdyo1oWjsnQNUvHPAZWRaKuAroi'
access_token = '158240218-M7DsUlvQKmxOtjfnKxFNKBTEmheuvNn4vi0MM6BP'
access_token_secret = 'oWY5G9sTxnH81tFbaicN5DKs1AjkD2WsWM5oCyoSh8NoR'
# attempt authentication
try:
# create OAuthHandler object
self.auth = OAuthHandler(consumer_key, consumer_secret)
# set access token and secret
self.auth.set_access_token(access_token, access_token_secret)
# create tweepy API object to fetch tweets
self.api = tweepy.API(self.auth)
except:
print("Error: Authentication Failed")
#Processing Tweets
def preprocessTweets(self,tweet):
#Convert www.* or https?://* to URL
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','URL',tweet)
#Convert @username to __HANDLE
tweet = re.sub('@[^\s]+','__HANDLE',tweet)
#Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
#trim
tweet = tweet.strip('\'"')
# Repeating words like happyyyyyyyy
rpt_regex = re.compile(r"(.)\1{1,}", re.IGNORECASE)
tweet = rpt_regex.sub(r"\1\1", tweet)
#Emoticons
emoticons = \
[
('__positive__',[ ':-)', ':)', '(:', '(-:', ':-D', ':D', 'X-D', 'XD', 'xD', '<3', ':\*', ';-)', ';)', ';-D', ';D', '(;', '(-;', ] ),\
('__negative__', [':-(', ':(', '(:', '(-:', ':,(', ':\'(', ':"(', ':((', ] ),\
]
def replace_parenth(arr):
return [text.replace(')', '[)}\]]').replace('(', '[({\[]') for text in arr]
def regex_join(arr):
return '(' + '|'.join( arr ) + ')'
emoticons_regex = [ (repl, re.compile(regex_join(replace_parenth(regx))) ) for (repl, regx) in emoticons ]
for (repl, regx) in emoticons_regex :
tweet = re.sub(regx, ' '+repl+' ', tweet)
#Convert to lower case
tweet = tweet.lower()
return tweet
#Stemming of Tweets
def stem(self,tweet):
stemmer = nltk.stem.PorterStemmer()
tweet_stem = ''
words = [word if(word[0:2]=='__') else word.lower() \
for word in tweet.split() \
if len(word) >= 3]
words = [stemmer.stem(w) for w in words]
tweet_stem = ' '.join(words)
return tweet_stem
#Predict the sentiment
def predict(self, tweet,classifier):
#Utility function to classify sentiment of passed tweet
tweet_processed = self.stem(self.preprocessTweets(tweet))
if ( ('__positive__') in (tweet_processed)):
sentiment = 1
return sentiment
elif ( ('__negative__') in (tweet_processed)):
sentiment = 0
return sentiment
else:
X = [tweet_processed]
sentiment = classifier.predict(X)
return (sentiment[0])
def get_tweets(self,classifier, query, count = 1000):
'''
Main function to fetch tweets and parse them.
'''
# empty list to store parsed tweets
tweets = []
try:
# call twitter api to fetch tweets
fetched_tweets = self.api.search(q = query, count = count)
# parsing tweets one by one
for tweet in fetched_tweets:
# empty dictionary to store required params of a tweet
parsed_tweet = {}
# saving text of tweet
parsed_tweet['text'] = tweet.text
# saving sentiment of tweet
parsed_tweet['sentiment'] = self.predict(tweet.text,classifier)
# appending parsed tweet to tweets list
if tweet.retweet_count > 0:
# if tweet has retweets, ensure that it is appended only once
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
# return parsed tweets
return tweets
except tweepy.TweepError as e:
# print error (if any)
print("Error : " + str(e))
# Main function
def main():
print('Loading the Classifier, please wait....')
classifier = joblib.load('svmClassifier.pkl')
# creating object of TwitterClient Class
api = TwitterClient()
# calling function to get tweets
q = 0
while (q == 0):
query = input("Enter the Topic for Opinion Mining: ")
tweets = api.get_tweets(classifier, query, count = 1000)
ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 0]
ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 1]
neg=(100*len(ntweets)/len(tweets))
pos=(100*len(ptweets)/len(tweets))
# console output of sentiment
print("Opinion Mining on ",query)
# plotting graph
ax1 = plt.axes()
ax1.clear()
xar = []
yar = []
x = 0
y = 0
for tweet in tweets:
x += 1
if tweet['sentiment'] == 1 :
y += 1
elif tweet['sentiment'] == 0 :
y -= 1
xar.append(x)
yar.append(y)
ax1.plot(xar,yar)
ax1.arrow(x, y, 0.5, 0.5, head_width=1.5, head_length=4, fc='k', ec='k')
plt.title('Graph')
plt.xlabel('Time')
plt.ylabel('Opinion')
plt.show()
# plotting piechart
labels = 'Positive Tweets', 'Negative Tweets'
sizes = [pos,neg]
# exploding Negative
explode = (0, 0.1)
fig1, ax2 = plt.subplots()
ax2.pie(sizes, explode=explode, labels=labels, autopct='%2.3f%%', shadow=False, startangle=180)
# Equal aspect ratio ensures that pie is drawn as a circle.
ax2.axis('equal')
plt.title('Pie Chart')
plt.show()
# percentage of negative tweets
print("Negative tweets percentage: ",neg)
# percentage of positive tweets
print("Positive tweets percentage: ",pos)
now = datetime.datetime.now()
print ("Date and Time analysed: ",str(now))
q = input("Do you want to continue[Press 1 for Yes/ 0 for No]? ")
if(q == 0):
break
if __name__ == "__main__":
main()
| true |
e24e8fe3f46aae73ff119cbecfcdcbe85757427e | Python | bogedy/vqvae | /ae.py | UTF-8 | 2,178 | 2.59375 | 3 | [] | no_license | import tensorflow as tf
from tensorflow.keras.backend import batch_flatten
import os
from tqdm import tqdm
BATCH_SIZE= 3
optimizer = tf.optimizers.Adam(1e-4)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train/255
x_test = x_test/255
trainset = tf.data.Dataset.from_tensor_slices(x_train)
testset = tf.data.Dataset.from_tensor_slices(x_test)
trainset = trainset.batch(BATCH_SIZE)
class ae(tf.keras.Model):
def __init__(self):
super(ae, self).__init__()
self.encoder = tf.keras.Sequential(
[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4),
tf.keras.layers.Dense(2),
]
)
self.decoder = tf.keras.Sequential(
[
tf.keras.layers.Dense(4),
tf.keras.layers.Dense(28*28),
tf.keras.layers.Reshape(target_shape=(28, 28)),
]
)
@tf.function
def encode(self, x):
return self.encoder(x)
@tf.function
def decode(self, z):
return self.decoder(z)
@tf.function
def saver(self, tag):
directory = './saved/{0}'.format(tag)
if not os.path.exists(directory):
os.mkdir(directory)
self.encoder.save(directory+'/inf', save_format='h5')
self.decoder.save(directory+'/gen', save_format='h5')
@tf.function
def mse(input, output):
#flatten the tensors, maintaining batch dim
return tf.losses.MSE(batch_flatten(input), batch_flatten(output))
@tf.function
def train_step(input, model):
with tf.GradientTape() as tape:
z = model.encode(input)
output = model.decode(z)
loss = mse(input, output)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
def train(model, trainset):
end = x_train.shape[0]
with tqdm(total = end) as pbar:
for batch in tqdm(trainset):
train_step(batch, model)
pbar.update(BATCH_SIZE)
if __name__ == "__main__":
model = ae()
train(model, trainset)
| true |
8aa240439f15a55a239ff5a96ace02b77ab7b54d | Python | 2018-B-GR1-Python/eguez-sarzosa-vicente-adrian | /01-Python/05_diccionarios.py | UTF-8 | 784 | 3.359375 | 3 | [] | no_license | adrian = {
'nombre': "Adrian",
'apellido': 'Eguez',
"edad": 29,
"sueldo": 1.01,
"hijos": [],
"casado": False,
"loteria": None,
"mascota": {
"nombre": "Cachetes",
"edad": 3
},
}
print(adrian)
print(adrian["nombre"]) # Adrian
print(adrian["mascota"]["nombre"]) # Cachetes
print(adrian.get("apellido"))
adrian.pop("casado")
print(adrian)
print(adrian.values())
for valor in adrian.values():
print(f"Valor: {valor}")
for llave in adrian.keys():
print(f"Llave: {llave} valor: {adrian.get(llave)}")
for clave, valor in adrian.items():
print(f"clave: {clave} valor: {valor}")
adrian["profesion"] = "Maistro"
print(adrian)
nuevos_valores = {"peso": 0, "altura": 1}
adrian.update({"peso": 0, "altura": 1})
print(adrian)
| true |
7f39954f125c3c4f9288cdd2870428d36394169a | Python | zhiwenliang/archive | /python_crash_course/basics/utils/json_utils.py | UTF-8 | 237 | 2.90625 | 3 | [
"MIT"
] | permissive | import json
def dump_json_to_file(json_obj, file_path):
with open(file_path, "w") as f:
json.dump(json_obj, f)
def load_json_file(file_path):
with open(file_path) as f:
result = json.load(f)
return result
| true |
c93112160331bebaebeeaecd4e9ab7bb49f556a3 | Python | gauravnagal/my-solution | /FizzBuzz.py | UTF-8 | 505 | 4.28125 | 4 | [] | no_license | '''
Write a program that prints the numbers from 1 to 50. But for
multiples of three print "Fizz" instead of the number and for
the multiples of five print "Buzz". For numbers which are
multiples of both three and five print "FizzBuzz"
'''
for fizzbuzz in range(1, 51):
fizz = (fizzbuzz % 3 == 0)
buzz = (fizzbuzz % 5 == 0)
if fizz and buzz:
print('FizzBuzz')
elif fizz:
print('Fizz')
elif buzz:
print('Buzz')
else:
print(fizzbuzz) | true |
b69749332300b91e646adac10be7cee9bb4f9482 | Python | krishshah99615/Single-Hand-Gesture | /dataset.py | UTF-8 | 2,202 | 3.0625 | 3 | [] | no_license | ####################### LIBRARRIES ##########################
import cv2
import numpy
import os
import argparse
####################### INITIALIZAING CAPTURE ##########################
# Name of base directory
BASE_DIR = "Dataset"
# Starting capturing
cap = cv2.VideoCapture(0)
# Setting height and width of webcam
HEIGHT, WIDTH = 360, 640
cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT)
####################### Getting arguments from command promt ##########################
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--name", required=True,
help="Name of the Gesture")
args = vars(ap.parse_args())
g_name = args["name"]
# Creating drectory of gesturres name if does not exist
g_dir = os.path.join(BASE_DIR, g_name)
os.makedirs(g_dir, exist_ok=True)
print("Made folder name "+str(g_name))
counter = 0
####################### Starting capturing ##########################
print("Starting Capturing .......")
while(1):
# counter to get count of no of images saved
ret, frame = cap.read()
# avoid mirror image
frame = cv2.flip(frame, 1)
# bounding box from previous script
bbox = [346, 123, 544, 312]
# Make a rectabgle for visual help
cv2.rectangle(frame, (bbox[0], bbox[1]),
(bbox[2], bbox[3]), (255, 0, 0), 2)
#[x1, y1, x2, y2]
# Cropping rectangle
rect = frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
# if user presses "s" saves image in that folder
if cv2.waitKey(1) & 0xFF == ord('s'):
# storing dynaic name of the file
f_name = f"{g_name}-{counter}.jpg"
# writing the image
cv2.imwrite(os.path.join(g_dir, f_name), rect)
print("Saving "+str(f_name))
# printing on screen the counter
# updating count
counter = counter+1
cv2.putText(frame, f"Counter for {g_name} : {counter}",
(100, 38), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0, 2))
cv2.imshow("frame", frame)
#cv2.imshow("bbox", rect)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
#[561, 83, 384, 287]
| true |
1114afbbf6e43ed53242e42e9ffb7daa11b0859c | Python | LawftyGoals/LongWayHome | /LongWayHomeEnemy.py | UTF-8 | 779 | 2.984375 | 3 | [] | no_license | class enemy :
etype = ""
selectedType = ["melee", "ranged"]
def __init__(self, level, etype):
self.level = level
self.levelMultiplyer = [1, 1.5, 2]
self.etype = self.selectedType[etype]
self.numberInGroup = 0
self.etypeI = ""
if self.etype == "melee":
self.strength = 10 * self.levelMultiplyer[self.level]
self.health = 15 * self.levelMultiplyer[self.level]
self.etypeI = "M"
elif self.etype == "ranged":
self.strength = 15 * self.levelMultiplyer[self.level]
self.health = 10 * self.levelMultiplyer[self.level]
self.etypeI="R"
#Converts level to a multiplyer.
#variables important for enemy
| true |
b1f8d7198d8967bdacd133797959f84f7fff58df | Python | mikelty/algos | /solutions/computational_geometry/max_darts_in_circular_board_line_sweeping.py | UTF-8 | 1,087 | 3.3125 | 3 | [] | no_license | #solves https://leetcode.com/problems/maximum-number-of-darts-inside-of-a-circular-dartboard/
from math import acos, atan2
class Solution:
def numPoints(self, points):
best=1
for px,py in points:
angles=[] #all angles where a q touches p's sweeping line's circle
for qx,qy in points:
if (px,py)!=(qx,qy):
pq=((px-qx)**2+(py-qy)**2)**0.5
if pq<=2*r: #if p and q share a circle
#calculate angles relative to x-axis and the sweeping line
ab=atan2((qy-py),(qx-px))
b=acos(pq/(2.0*r))
angles.append((ab-b,+1)) #go in at alpha, one more q
angles.append((ab+b,-1)) #go out at alpha + 2 * beta, one less q
angles.sort(key=lambda x:(x[0],-x[1])) #in comes before out to maximize count
count=1 #need to count p
for _, value in angles:
best=max(best,count:=count+value) #q touches p's circle, update best?
return best | true |
655bfc8dec5989b549e3eacdd09851bdce54ff63 | Python | mindthegrow/cafelytics | /simulate.py | UTF-8 | 5,153 | 3.046875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | import datetime
import matplotlib.pyplot as plt
from cafe.farm import (
Config,
Event,
Farm,
guate_harvest_function,
predict_yield_for_farm,
)
def simulateCoOp(plotList, numYears, pruneYear=None, growthPattern=None, strategy=None):
"""
Uses a list of plots, `plotList`, to simulate a cooperative over `numFarms` number of years.
Returns a list of two lists: `simulation`
list one, `harvestYear`, represents the year range in the simulation.
list two, `annualHarvest`, represents the amount of coffee (in lbs) harvested for that year
"""
# numPlots = len(plotList)
annualHarvest = []
harvestYear = []
start_year = min([plot.start.year for plot in plotList])
# start_year = 2020
for current_year in range(start_year, start_year + numYears + 1):
configs = (
Config("e14", name="e14", output_per_crop=125, unit="cuerdas"),
Config("borbon", name="borbon", output_per_crop=200, unit="cuerdas"),
Config("catuai", name="catuai", output_per_crop=125, unit="cuerdas"),
Config("catura", name="catura", output_per_crop=125, unit="cuerdas"),
)
species_list = [config.species for config in configs]
scopes = {
species: {"type": "species", "def": species} for species in species_list
}
harvest_functions = {
"e14": guate_harvest_function(lifespan=15, mature=5),
"catura": guate_harvest_function(lifespan=16, mature=4),
"catuai": guate_harvest_function(lifespan=17, mature=4),
"borbon": guate_harvest_function(lifespan=30, mature=5),
}
events = [
Event(
name=f"{species} harvest",
impact=harvest_functions[species],
scope=scopes[species],
)
for species in species_list
]
start_year = datetime.datetime(2020, 1, 1)
end_year = datetime.datetime(2021, 1, 1)
events.append(
Event(
"catastrophic overfertilization",
impact=0.001,
scope={"type": "species", "def": "borbon"},
start=start_year,
end=end_year,
)
)
farm = Farm(plotList)
thisYearsHarvest = predict_yield_for_farm(
farm=farm,
configs=configs,
events=events,
time=datetime.datetime(current_year, 1, 1),
)
harvestYear.append(current_year)
# annualHarvest.append(thisYearsHarvest[0]) # inspect single plot
annualHarvest.append(sum(thisYearsHarvest))
simulation = [harvestYear, annualHarvest]
return simulation
def main(args):
import os
farmData = args.farm
# trees = args.trees
# strategy = args.strategy
years = args.years
output = args.output
if not os.path.exists(farmData):
raise ValueError(
(
f"File: {farmData} does not exist.\n"
"If you are running default commands and this is your first time"
"running the simulation, assure you have run:\n"
"`python3 src/cafe/fakeData.py --farms 100"
"--year 2020 --output data/fakeData.csv`\n"
"in the core directory before calling"
"simulateCoOp.py from the command line."
)
)
print("Importing Data")
farm_example = Farm.from_csv(farmData)
farmList = farm_example.plots
print("Simulating Cooperative")
simData = simulateCoOp(farmList, years)
print("Plotting")
pltYears, pltHarvests = simData
# get parameters for axes
mnYear, mxYear = min(pltYears), max(pltYears)
mxHarvest = max(pltHarvests)
plt.rcParams["figure.figsize"] = (20, 10)
fsize = 20 # font size
plt.axes(xlim=(mnYear, mxYear), ylim=(0, (mxHarvest + (mxHarvest * 0.10))))
plt.plot(pltYears, pltHarvests, linewidth=4)
plt.style.use("ggplot")
plt.title("Yield Forecast", fontsize=(fsize * 1.25))
plt.xlabel("Year", fontsize=fsize)
plt.xticks(pltYears, rotation=45)
plt.ylabel("Total pounds of green coffee produced", fontsize=fsize)
plt.savefig(output, dpi=100)
# plt.show()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Parse growth data for simulation.")
parser.add_argument(
"-f",
"--farm",
default="data/fakeData.csv",
type=str,
help="""
Path to data containing plot details.
e.g., cuerdas, tree types, etc.\n
""",
)
parser.add_argument(
"-y",
"--years",
default=75,
type=int,
help="""
Number of years that should be iterated
through in the simulation (default=30).\n
""",
)
parser.add_argument(
"-o",
"--output",
default="testNewFarm.png",
type=str,
help="Desired name of plot output file (default=testNewFarm.png).",
)
args = parser.parse_args()
main(args)
| true |
4535bd3f5ce24b87fb38a21042659b6de4472fc2 | Python | SGT103/med_segmentation | /models/metrics.py | UTF-8 | 6,778 | 3 | 3 | [
"Apache-2.0"
] | permissive | import tensorflow as tf
import tensorflow.keras.backend as K
class Metric:
"""
Extension of evaluation metrics not yet existing in keras and/or Tensorflow
"""
"""
per class metrics
"""
# sensitivity, recall, hit rate, true positive rate
# TPR = TP/P = TP/(TP+FN) = 1-FNR
def recall_per_class(self, selected_class, y_true, y_pred, config):
smooth = 1e-16
print('line17 metrics',y_true.shape, y_pred.shape)
y_pred = tf.one_hot(tf.argmax(y_pred, axis=-1), config['channel_label_num'])
print('selected_class',selected_class)
true_positive = K.sum((y_true[..., selected_class] * y_pred[..., selected_class]))
return (true_positive + smooth) / (K.sum(y_true[..., selected_class]) + smooth)
# precision, positive predictive value (PPV)
# PPV = TP/(TP+FP) = 1-FDR
def precision_per_class(self, selected_class, y_true, y_pred, config):
smooth = 1e-16
y_pred = tf.one_hot(tf.argmax(y_pred, axis=-1), config['channel_label_num'])
true_positive = K.sum((y_true[..., selected_class] * y_pred[..., selected_class]))
return (true_positive + smooth) / (K.sum(y_pred[..., selected_class]) + smooth)
# specificity, selectivity, true negative rate (TNR)
# TNR = TN/N = TN/(TN+FP) = 1-FPR
def specificity_per_class(self, selected_class, y_true, y_pred, config):
smooth = 1e-16
y_pred = tf.one_hot(tf.argmax(y_pred, axis=-1), config['channel_label_num'])
true_negative = K.sum((y_true[..., selected_class] - 1) * (y_pred[..., selected_class] - 1))
return (true_negative + smooth) / (K.abs(K.sum(y_true[..., selected_class] - 1)) + smooth)
# F1 score
# F1 = 2* (PPV*TPR)/(PPV+TPR)
def f1_score_per_class(self, selected_class, y_true, y_pred, config):
smooth = 1e-16
recall_func = getattr(self, 'recall_all')
precision_func = getattr(self, 'precision_all')
recall = recall_func(self, selected_class, y_true, y_pred, config)
precision = precision_func(self, selected_class, y_true, y_pred, config)
return (2 * recall * precision + smooth) / (recall + precision + smooth)
def dice_coef_per_class(self, selected_class, y_true, y_pred, config):
""" Dice coefficient for Melanoma network
y_true: true targets tensor.
y_pred: predictions tensor.
Dice calculation with smoothing to avoid division by zero
"""
# smooth = 1E-16
# assert y_true.shape == y_pred.shape
smooth = K.epsilon()
sum_metric, weight_sum = 0, 0
y_t = y_true[..., selected_class]
y_p = y_pred[..., selected_class]
intersection = tf.math.reduce_sum(y_t * y_p) * config['loss_channel_weight'][selected_class]
denominator = tf.math.reduce_sum(y_t) + tf.math.reduce_sum(y_p) + smooth
dice_coef = (2. * intersection / denominator)
#y_mean = sum_metric / weight_sum
return dice_coef
"""
one against-rest metrics
"""
# sensitivity, recall, hit rate, true positive rate
# TPR = TP/P = TP/(TP+FN) = 1-FNR
def recall_all(self, y_true, y_pred, config):
smooth = 1e-16
y_pred = tf.one_hot(tf.argmax(y_pred, axis=-1), config['channel_label_num'])
true_positive = K.sum(y_true * y_pred)
return (true_positive + smooth) / (K.sum(y_true) + smooth)
# precision, positive predictive value (PPV)
# PPV = TP/(TP+FP) = 1-FDR
def precision_all(self, y_true, y_pred, config):
smooth = 1e-16
y_pred = tf.one_hot(tf.argmax(y_pred, axis=-1), config['channel_label_num'])
true_positive = K.sum(y_true * y_pred)
return (true_positive + smooth) / (K.sum(y_pred) + smooth)
# specificity, selectivity, true negative rate (TNR)
# TNR = TN/N = TN/(TN+FP) = 1-FPR
def specificity_all(self, y_true, y_pred, config):
smooth = 1e-16
y_pred = tf.one_hot(tf.argmax(y_pred, axis=-1), config['channel_label_num'])
true_negative = K.sum((y_true - 1) * (y_pred - 1))
return (true_negative + smooth) / (K.abs(K.sum(y_true - 1)) + smooth)
# F1 score
# F1 = 2* (PPV*TPR)/(PPV+TPR)
def f1_score_all(self, y_true, y_pred, config):
smooth = 1e-16
recall_func = getattr(self, 'recall_all')
precision_func = getattr(self, 'precision_all')
recall, precision = recall_func(self, y_true, y_pred, config), precision_func(self, y_true, y_pred, config)
return (2 * recall * precision + smooth) / (recall + precision + smooth)
def dice_coef_all(self, y_true, y_pred, config):
""" Dice coefficient for Melanoma network
y_true: true targets tensor.
y_pred: predictions tensor.
Dice calculation with smoothing to avoid division by zero
"""
# smooth = 1E-16
# assert y_true.shape == y_pred.shape
smooth = K.epsilon()
#assert len(y_true.shape) == 5
sum_metric, weight_sum = 0, 0
for class_index in range(config['num_classes']):
y_t = y_true[..., class_index]
y_p = y_pred[..., class_index]
intersection = tf.math.reduce_sum(y_t * y_p) * config['loss_channel_weight'][class_index]
denominator = tf.math.reduce_sum(y_t) + tf.math.reduce_sum(y_p) + smooth
metric = (2. * intersection / denominator)
sum_metric += metric ## this returns a tensor
weight_sum += config['loss_channel_weight'][class_index] ## this returns a tensor too
y_mean = sum_metric / weight_sum
return y_mean
def get_custom_metrics(amount_classes, name_metric, config):
"""
Get list of metric functions by their name, and amount of class
:param amount_classes: type int: amount of channel
:param name_metric: type string: name of the metric
:param config: type dict: config parameter.
:return: list_metric: type list of function: list of metric funtions from class Metric()
"""
metric_func = getattr(Metric, name_metric)
list_metric = []
if '_per_class' in name_metric:
metric_func_per_class = lambda c: lambda y_true, y_pred: metric_func(Metric, c, y_true, y_pred, config)
list_metric = [metric_func_per_class(c) for c in range(amount_classes)]
for j, f in enumerate(list_metric):
f.__name__ = name_metric + '_channel_' + str(j)
if '_all' in name_metric:
metric_func_all = lambda y_true, y_pred: metric_func(Metric, y_true, y_pred, config)
metric_func_all.__name__ = name_metric
list_metric = [metric_func_all]
return list_metric
| true |
349c7147aeb65d6fccd9f8a7c323b9d5670f0eff | Python | pongtr/charm | /src/design_rules.py | UTF-8 | 5,161 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python3
'''
design_rules.py
design rules
'''
from collections import defaultdict
n_layers = 5 # number of metal layers
# == SPACING ==================
material_spacing = {
'm1': 3, # m1-m1
'm2': 3, # m2-m2
'm3': 3, # m2-m2
'm4': 3, # m2-m2
'm5': 3, # m2-m2
'poly': 3, # poly-poly
'pdiff': 1, # pdiff0- poly
'ndiff': 1 # ndiff-poly
}
material_spacing['pc'] = max(material_spacing['m1'],material_spacing['poly'])
material_spacing['m2c'] = max(material_spacing['m1'],material_spacing['m2'])
material_spacing['m3c'] = max(material_spacing['m2'],material_spacing['m3'])
material_spacing['m4c'] = max(material_spacing['m3'],material_spacing['m4'])
material_spacing['m5c'] = max(material_spacing['m4'],material_spacing['m5'])
material_spacing['pdc'] = max(material_spacing['m1'],material_spacing['pdiff'])
material_spacing['ndc'] = max(material_spacing['m1'],material_spacing['ndiff'])
# == WIDTH =====================
material_width = {
'm1': 3, # make this the same as contact size
'm2': 3,
'm3': 3,
'm4': 3,
'm5': 3,
'poly':2,
'pc': 4,
'pdc': 4,
'ndc': 4,
'm2c': 4,
'm3c': 4,
'm4c': 4,
'm5c': 4
}
# == COST ======================
material_cost = {
'm1': 2,
'm2': 2,
'm3': 2,
'm4': 2,
'm5': 2,
'm2c': 2,
'm3c': 5,
'm4c': 5,
'm5c': 5,
'poly': 5,
'pc': 5
}
# == CONTACT MATERIALS ==========
contact_materials = {
'ndc': ['m1'],
'pdc': ['m1'],
'pc' : ['poly','m1'],
'm2c': ['m2','m1'],
'm3c': ['m2','m3'],
'm4c': ['m3','m4'],
'm5c': ['m4','m5'],
}
def get_contact(materials):
"""Given a list of two materials,
returns the contact material between the two
or None if one does not exist
"""
for contact, mat in contact_materials.items():
if set(mat) == set(materials):
return contact
return None
# material_order = ['poly','m1','m2'] # just these two for now
# == MATERIAL IN EACH LAYER =======
'''
mat_layers = [['poly','pc','ndiff','ndc','pdiff','pdc'],
['m1','ndc','pdc','pc','m2c'],
['m2','m2c','m3c'],
['m3','m3c']]
'''
def get_mat_layer(mat):
return layers_mat[mat]
diff_mats = {
'pdiff': 0,
'ndiff': 0,
'pdc': 1,
'ndc': 1
}
mat_layers, layers_mat = [], {}
routing_materials = []
def generate_mat_layers(n_layers):
global mat_layers, layers_mat
mat_layers.append('poly')
routing_materials.append('poly')
mat_layers.append('pc')
for i in range(1, n_layers + 1):
if i > 1:
mat_layers.append('m{}c'.format(i))
mat_layers.append('m{}'.format(i))
routing_materials.append('m{}'.format(i))
for i,mat in enumerate(mat_layers):
layers_mat[mat] = i
for k,v in diff_mats.items():
layers_mat[k] = v
generate_mat_layers(n_layers)
connected_mats = [
['poly','pc'],
['m1','pc','m2c'],
['m2','m2c','m3c'],
['m3','m3c']
]
def get_other_mats():
other_mats = defaultdict(list)
for mats in connected_mats:
for m in mats:
other_mats[m] += [om for om in mats if om != m]
return other_mats
other_mats = get_other_mats()
# == MATERIAL DIRECTIONS ============
'''
Options:
- s: straight only
- x: only horizontal
- y: only vertical
- xy: both horizontal and vertical
'''
material_directions = {
'poly': 'xy',
'm1': 'xy',
'm2': 'xy',
'm3': 'xy',
'm4': 'xy',
'm5': 'xy'
}
# == FIVE NEW RULES =============
# Line End Threshold (line vs joint)
line_end = {
'poly': 3, # arbitrary for now
'm1': 3, # 0.04 microns
'm2': 2, # 0.02 microns
'm3': 2 # arbirary for now
}
# End of line
end_of_line = {
'poly': 4, # arbitrary for now
'm1': 4, # rules 506 : both sides >= 0.065 microns
'm2': 4, # rules 606 : both sides >= 0.065 microns
'm3': 4, # rules 606 : both sides >= 0.065 microns
'm4': 4, # rules 606 : both sides >= 0.065 microns
'm5': 4, # rules 606 : both sides >= 0.065 microns
}
# Point to edge
point_to_edge = {
'poly': 5, # allow poly to turn for now
'm1': 3, # SE4 0.05 micron (same as min width)
'm2': 3, # SE5 0.05 micron (same as min width)
'm3': 3, # SE5 0.05 micron (same as min width)
'm4': 3, # SE5 0.05 micron (same as min width)
'm5': 3 # SE5 0.05 micron (same as min width)
}
# Min area
min_area = {
'poly': 4, # arbitrary
'm1' : 36, # 501d 0.01 micron2 (relative to min width 3)
'm1se': 108, # 501aSE all edges less than 0.130 micron (8 lambda)
'm2' : 40, # 601d 0.01 micron2 (relative to min width 3)
'm2se': 108, # 601aSE all edges less than 0.130 micron (8 lambda)
'm2' : 40, # 601d 0.01 micron2 (relative to min width 3)
'm3' : 40, # 601d 0.01 micron2 (relative to min width 3)
'm4' : 40, # 601d 0.01 micron2 (relative to min width 3)
'm5' : 40, # 601d 0.01 micron2 (relative to min width 3)
}
# short edges for area
area_se = {
'm1': 8,
'm2': 8,
'm3': 8
}
# Fat wire
# Coloring
| true |
fbdd6173d82f53604e5dc86c2f66f57283a0193a | Python | LukeTempleman/Personal_projects | /Lukes_song_Downloader/Lukes_song_Downloader.py | UTF-8 | 564 | 2.59375 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
PATH ='C:\Program Files (x86)\EdgeDriver\msedgedriver.exe'
driver = webdriver.Edge(PATH)
# users_input = input("Input The song you want to Download")
driver.get("https://www.mp3juices.cc")
search = driver.find_element_by_name("query")
search.send_keys("yeet")
search_button = driver.find_element_by_id("button")
time.sleep(2)
search_button.click()
first_result = driver.find_element_by_css_selector("")
first_result.click
print(first_result)
| true |
77b9f099a653b37f2a26469c1d297bd5646669e1 | Python | dwillist/ProjectEuler | /Euler483/maxCycleLength.py | UTF-8 | 1,017 | 3.484375 | 3 | [] | no_license | # here we whish to count the max cycle length as well as number of cycles of this length
import math
def isPrime(k):
for i in range(2,int(math.sqrt(k)) + 1):
if k % i == 0:
return False
return True
def calculate_max():
pSet = []
for i in range(2,350 + 1):
if isPrime(i):
pSet.append(i)
# now we have a prime set
index = 0
summation = 0
length = 1
count = 1
print(pSet)
while summation + pSet[index] + pSet[index+1] < 350:
summation += pSet[index]
length *= pSet[index]
index += 1
count *= math.factorial(pSet[index] -1)
#
prevIndex = index
while(summation + pSet[prevIndex+1] < 350):
prevIndex += 1
summation += pSet[prevIndex]
length *= pSet[prevIndex]
count *= math.factorial(pSet[prevIndex] - 1)
index = prevIndex
cycleCount = math.factorial(350)//count
print(cycleCount,summation,length,pSet[index])
print(length**2 * cycleCount)
calculate_max()
| true |
0bc9e7b0baca233795067e429bc04f86ab6f06ff | Python | vivequeramji/hackathon_Princeton_F16 | /plot_location.py | UTF-8 | 263 | 2.734375 | 3 | [] | no_license | import time
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
TIME_CONSTANT = 3600*6
def plot(place, timestamp):
size = time.time() - timestamp
alp = 0.5 + (size/(2*TIME_CONSTANT))
plt.scatter(x=place.x, y=place.y, s=150, alpha=alp) | true |
cd44a99c9e2b109677701f5233c793317d70985e | Python | BristolTopGroup/DailyPythonScripts | /tests/utils/test_Fitting_RooFitFit.py | UTF-8 | 3,154 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | '''
Created on 31 Oct 2012
@author: kreczko
'''
import unittest
from dps.utils.Fitting import RooFitFit, FitData, FitDataCollection
from rootpy.plotting import Hist
from math import sqrt
import numpy as np
N_bkg1 = 9000
N_signal = 1000
N_bkg1_obs = 10000
N_signal_obs = 2000
N_data = N_bkg1_obs + N_signal_obs
mu1, mu2, sigma1, sigma2 = 100, 140, 15, 5
x1 = mu1 + sigma1 * np.random.randn(N_bkg1)
x2 = mu2 + sigma2 * np.random.randn(N_signal)
x1_obs = mu1 + sigma1 * np.random.randn(N_bkg1_obs)
x2_obs = mu2 + sigma2 * np.random.randn(N_signal_obs)
class Test(unittest.TestCase):
def setUp(self):
# create histograms
h_bkg1_1 = Hist(100, 40, 200, title='Background')
h_signal_1 = h_bkg1_1.Clone(title='Signal')
h_data_1 = h_bkg1_1.Clone(title='Data')
# fill the histograms with our distributions
map(h_bkg1_1.Fill, x1)
map(h_signal_1.Fill, x2)
map(h_data_1.Fill, x1_obs)
map(h_data_1.Fill, x2_obs)
histograms_1 = {'signal': h_signal_1,
'bkg1': h_bkg1_1,
# 'data': h_data_1
}
fit_data_1 = FitData(h_data_1, histograms_1, fit_boundaries=(40, 200))
self.single_fit_collection = FitDataCollection()
self.single_fit_collection.add( fit_data_1 )
# self.roofitFitter = RooFitFit(histograms_1, dataLabel='data', fit_boundries=(40, 200))
self.roofitFitter = RooFitFit(self.single_fit_collection)
def tearDown(self):
pass
def test_normalisation(self):
normalisation = self.roofitFitter.normalisation
self.assertAlmostEqual(normalisation["data"], N_data, delta=sqrt(N_data))
self.assertAlmostEqual(normalisation["bkg1"], N_bkg1, delta=sqrt(N_bkg1))
self.assertAlmostEqual(normalisation["signal"], N_signal, delta=sqrt(N_signal))
def test_signal_result(self):
self.roofitFitter.fit()
results = self.roofitFitter.readResults()
self.assertAlmostEqual(N_signal_obs, results['signal'][0], delta=2 * results['signal'][1])
self.assertAlmostEqual(N_bkg1_obs, results['bkg1'][0], delta=2 * results['bkg1'][1])
def test_constraints(self):
self.single_fit_collection.set_normalisation_constraints({'signal': 0.8, 'bkg1': 0.5})
self.roofitFitter = RooFitFit(self.single_fit_collection)
# self.roofitFitter.set_fit_constraints({'signal': 0.8, 'bkg1': 0.5})
self.roofitFitter.fit()
results = self.roofitFitter.readResults()
self.assertAlmostEqual(N_signal_obs, results['signal'][0], delta=2 * results['signal'][1])
self.assertAlmostEqual(N_bkg1_obs, results['bkg1'][0], delta=2 * results['bkg1'][1])
# def test_relative_error(self):
# results = self.roofitFitter.readResults()
# self.roofitFitter.saved_result.Print("v");
# self.assertLess(results['signal'][1]/results['signal'][0], 0.1)
# self.assertLess(results['bkg1'][1]/results['bkg1'][0], 0.1)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testTemplates']
unittest.main()
| true |
01b012c1cb56604e6bd843ddb11a03cd1cbe7eda | Python | hugoladret/submissionJHEPC20 | /fig/generate_cloud.py | UTF-8 | 937 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Hugo Ladret
This file can be used to generate the MC.png image
MotionClouds library can be installed with a simple ' pip install MotionClouds '
paper : https://journals.physiology.org/doi/full/10.1152/jn.00737.2011
"""
import MotionClouds as mc
import numpy as np
import imageio
def generate_cloud(theta, b_theta, sf_0,
N_X, N_Y,
seed, contrast=1):
fx, fy, ft = mc.get_grids(N_X, N_Y, 1)
mc_i = mc.envelope_gabor(fx, fy, ft,
V_X=0., V_Y=0., B_V=0.,
sf_0=sf_0, B_sf=sf_0,
theta=theta, B_theta=b_theta)
im_ = mc.rectif(mc.random_cloud(mc_i, seed=seed),
contrast=contrast)
return im_[:, :, 0]
if __name__ == "__main__" :
im = generate_cloud(np.pi/4, np.pi/36, .1, 512, 512, 42)
imageio.imsave('./MC.png', im) | true |
d9742adcc82423db27e0bc43ccc4dd1a4228b4e2 | Python | pengliang1226/model_procedure | /feature_preprocess/Encoding.py | UTF-8 | 6,513 | 3.1875 | 3 | [] | no_license | # encoding: utf-8
"""
@author: pengliang.zhao
@time: 2020/12/7 11:09
@file: Encoding.py
@desc: 特征编码
"""
from typing import List
from category_encoders import OrdinalEncoder, OneHotEncoder, HashingEncoder, HelmertEncoder, SumEncoder, \
TargetEncoder, MEstimateEncoder, JamesSteinEncoder, WOEEncoder, LeaveOneOutEncoder, CatBoostEncoder
from pandas import DataFrame, Series
from sklearn.base import TransformerMixin
class FeatureEncoding(TransformerMixin):
def __init__(self, cols: List = None):
"""
初始化函数
:param cols: 编码列列表
"""
self.cols = cols
self.encoder = None
def Ordinal_Encoding(self):
"""
序数编码将类别变量转化为一列序数变量,包含从1到类别数量之间的整数
:return:
"""
self.encoder = OrdinalEncoder(cols=self.cols)
def OneHot_Encoding(self, handle_missing='indicator', handle_unknown='indicator'):
"""
one-hot编码,其可以将具有n_categories个可能值的一个分类特征转换为n_categories个二进制特征,其中一个为1,所有其他为0
:param handle_missing: 默认value,缺失值用全0替代;indicator,增加缺失值一列
:param handle_unknown: 默认value,未知值用全0替代;indicator,增加未知值一列
:return:
"""
self.encoder = OneHotEncoder(cols=self.cols, handle_missing=handle_missing, handle_unknown=handle_unknown)
def Hashing_Encoding(self, n_components: int = 8):
"""
哈希编码,将任意数量的变量以一定的规则映射到给定数量的变量。特征哈希可能会导致要素之间发生冲突。哈希编码器的大小及复杂程度不随数据类别的增多而增多。
:param n_components: 用来表示特征的位数
:return:
"""
self.encoder = HashingEncoder(cols=self.cols, n_components=n_components)
def Helmert_Encoding(self, handle_missing='indicator', handle_unknown='indicator'):
"""
Helmert编码,分类特征中的每个值对应于Helmert矩阵中的一行
:param handle_missing: 默认value,缺失值用全0替代;indicator,增加缺失值一列
:param handle_unknown: 默认value,未知值用全0替代;indicator,增加未知值一列
:return:
"""
self.encoder = HelmertEncoder(cols=self.cols, handle_unknown=handle_unknown, handle_missing=handle_missing)
def Devaition_Encoding(self, handle_missing='indicator', handle_unknown='indicator'):
"""
偏差编码。偏差编码后,线性模型的系数可以反映该给定该类别变量值的情况下因变量的平均值与全局因变量的平均值的差异
:param handle_missing: 默认value,缺失值用全0替代;indicator,增加缺失值一列
:param handle_unknown: 默认value,未知值用全0替代;indicator,增加未知值一列
:return:
"""
self.encoder = SumEncoder(cols=self.cols, handle_missing=handle_missing, handle_unknown=handle_unknown)
def Target_Encoding(self, min_samples_leaf: int = 1, smoothing: float = 1.0):
"""
目标编码是一种不仅基于特征值本身,还基于相应因变量的类别变量编码方法。
对于分类问题:将类别特征替换为给定某一特定类别值的因变量后验概率与所有训练数据上因变量的先验概率的组合。
对于连续目标:将类别特征替换为给定某一特定类别值的因变量目标期望值与所有训练数据上因变量的目标期望值的组合。
该方法严重依赖于因变量的分布,但这大大减少了生成编码后特征的数量。
:param min_samples_leaf:
:param smoothing:
:return:
"""
self.encoder = TargetEncoder(cols=self.cols, min_samples_leaf=min_samples_leaf, smoothing=smoothing)
def MEstimate_Encoding(self, m: float = 1.0, sigma: float = 0.05, randomized: bool = False):
"""
M估计量编码是目标编码的一个简化版本
:param m:
:param sigma:
:param randomized:
:return:
"""
self.encoder = MEstimateEncoder(cols=self.cols, m=m, sigma=sigma, randomized=randomized)
def JamesStein_Encoding(self, model: str = 'independent', sigma: float = 0.05, randomized: bool = False):
"""
James-Stein编码,也是一种基于目标编码的编码方法,也尝试通过参数B来平衡先验概率与观测到的条件概率。
但与目标编码与M估计量编码不同的是,James-Stein编码器通过方差比而不是样本大小来平衡两个概率。
:param model:
:param sigma:
:param randomized:
:return:
"""
self.encoder = JamesSteinEncoder(cols=self.cols, model=model, sigma=sigma, randomized=randomized)
def WOE_Encoding(self, regularization: float = 1.0, sigma: float = 0.05, randomized: bool = False):
"""
woe编码
:param regularization:
:param sigma:
:param randomized:
:return:
"""
self.encoder = WOEEncoder(cols=self.cols, regularization=regularization, randomized=randomized, sigma=sigma)
def LeaveOneOut_Encoding(self, sigma: float = 0.05):
"""
留一编码
:param sigma:
:return:
"""
self.encoder = LeaveOneOutEncoder(cols=self.cols, sigma=sigma)
def CatBoost_Encoding(self, sigma: float = None, a: float = 1):
"""
CatBoost是一个基于树的梯度提升模型。其在包含大量类别特征的数据集问题中具有出色的效果。
在使用Catboost编码器之前,必须先对训练数据随机排列,因为在Catboost中,编码是基于“时间”的概念,即数据集中观测值的顺序。
:param sigma:
:param a:
:return:
"""
self.encoder = CatBoostEncoder(cols=self.cols, a=a, sigma=sigma)
def fit(self, X: DataFrame, y: Series = None):
"""
拟合函数
:param X:
:param y:
:return:
"""
if y is None:
self.encoder.fit(X)
else:
self.encoder.fit(X, y)
def transform(self, X: DataFrame):
"""
转换函数
:param X:
:return:
"""
res = self.encoder.transform(X)
return res
| true |
2d121bc009c9feec59b0bb2279ee467f76ed11c0 | Python | qq184861643/pytorch-CapsNet | /PrimaryLayer.py | UTF-8 | 1,158 | 2.734375 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import torch
import torch.nn as nn
import numpy as np
from utilFuncs import squash
# In[3]:
class PrimaryLayer(nn.Module):
def __init__(self,in_channels=256,out_channles=256,kernel_size=5,stride=1,caps_dims=8):
super(PrimaryLayer,self).__init__()
self.in_channels = in_channels
self.out_channles = out_channles
self.kernel_size = kernel_size
self.stride=stride
self.caps_dims = caps_dims
self.capsules = nn.Conv2d(in_channels=self.in_channels,
out_channels=self.out_channles,
kernel_size=self.kernel_size,
stride=self.stride)
def forward(self,x):
'''
input:
x:[b_s,width,height,channel]
output:
y:[b_s,capsules_nums,1,capsules_dims,1]
'''
batch_size = x.size(0)
hidden = self.capsules(x)
reshaped_hidden = hidden.view(batch_size,-1,1,self.caps_dims,1)
squashed_hidden = squash(reshaped_hidden,axis=-2)
return squashed_hidden
| true |
b136121e505a7449654d752d6687037d2de05b5d | Python | luoyawen/Python_learning | /杂乱的爬/爬取_百度百科.py | UTF-8 | 608 | 2.75 | 3 | [] | no_license | import urllib.request as u
import re
from bs4 import BeautifulSoup
def main():
url = 'http://baike.baidu.com/view/284853.htm'
req = u.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36')
response = u.urlopen(url)
html = response.read().decode('utf-8')
soup = BeautifulSoup(html, 'lxml')
for each in soup.find_all(href=re.compile("view")):
print(' -> '.join([each.text, ''.join(["https://baike.baidu.com", each['href']])]))
if __name__ == "__main__":
main()
| true |
d6bb22726850bf00d2609a3d29ab6df3c739ea84 | Python | jchernjr/code | /advent2021/day2.py | UTF-8 | 717 | 3.734375 | 4 | [] | no_license | if __name__ == "__main__":
with open("day2input.txt", "r") as f:
lines = f.readlines()
commands = [s.split(" ") for s in lines] # should be ('forward'|'up'|'down', number) list
x_pos = 0 # horizontal (forward) distance
depth = 0 # depth
for direction, dist_str in commands:
dist = int(dist_str)
if direction == 'forward':
x_pos += dist
elif direction == 'up':
depth -= dist
elif direction == 'down':
depth += dist
else:
print("unknown direction: " + str(direction))
print(f"Horz: {x_pos}, Depth: {depth}")
print(x_pos * depth)
| true |
02fce433068642b0fc94eba7b3f0c9685696d60b | Python | maedoc/epsilon_free_inference | /demos/lotka_volterra_demo/lv_main.py | UTF-8 | 7,935 | 2.796875 | 3 | [
"BSD-2-Clause"
] | permissive | """
Lotka volterra demo, main file. Sets up the simulations. Should be imported by all other lotka volterra demo files.
"""
from __future__ import division
import time
import numpy as np
import numpy.random as rng
import matplotlib
import matplotlib.pyplot as plt
import util.MarkovJumpProcess as mjp
import util.helper as helper
# parameters that globally govern the simulations
init = [50, 100]
dt = 0.2
duration = 30
true_params = [0.01, 0.5, 1.0, 0.01]
log_prior_min = -5
log_prior_max = 2
max_n_steps = 10000
# directory names for saving results
datadir = 'demos/lotka_volterra_demo/results/data/'
netsdir = 'demos/lotka_volterra_demo/results/nets/'
plotsdir = 'demos/lotka_volterra_demo/results/plots/'
def calc_summary_stats(states):
"""
Given a sequence of states produced by a simulation, calculates and returns a vector of summary statistics.
Assumes that the sequence of states is uniformly sampled in time.
"""
N = states.shape[0]
x, y = states[:, 0].copy(), states[:, 1].copy()
# means
mx = np.mean(x)
my = np.mean(y)
# variances
s2x = np.var(x, ddof=1)
s2y = np.var(y, ddof=1)
# standardize
x = (x - mx) / np.sqrt(s2x)
y = (y - my) / np.sqrt(s2y)
# auto correlation coefficient
acx = []
acy = []
for lag in [1, 2]:
acx.append(np.dot(x[:-lag], x[lag:]) / (N-1))
acy.append(np.dot(y[:-lag], y[lag:]) / (N-1))
# cross correlation coefficient
ccxy = np.dot(x, y) / (N-1)
return np.array([mx, my, np.log(s2x + 1), np.log(s2y + 1)] + acx + acy + [ccxy])
def sim_prior_params(num_sims=1):
"""
Simulates parameters from the prior. Assumes a uniform prior in the log domain.
"""
z = rng.rand(4) if num_sims == 1 else rng.rand(num_sims, 4)
return np.exp((log_prior_max - log_prior_min) * z + log_prior_min)
def calc_dist(stats_1, stats_2):
"""
Calculates the distance between two vectors of summary statistics. Here the euclidean distance is used.
"""
return np.sqrt(np.sum((stats_1 - stats_2) ** 2))
def test_LotkaVolterra(savefile=None):
"""
Runs and plots a single simulation of the lotka volterra model.
"""
params = true_params
#params = sim_prior_params()
lv = mjp.LotkaVolterra(init, params)
states = lv.sim_time(dt, duration)
times = np.linspace(0.0, duration, int(duration / dt) + 1)
sum_stats = calc_summary_stats(states)
print sum_stats
fontsize = 20
if savefile is not None:
matplotlib.rcParams.update({'font.size': fontsize})
matplotlib.rc('text', usetex=True)
savepath = '../nips_2016/figs/lv/'
fig = plt.figure()
plt.plot(times, states[:, 0], lw=3, label='Predators')
plt.plot(times, states[:, 1], lw=3, label='Prey')
plt.xlabel('Time')
plt.ylabel('Population counts')
plt.ylim([0, 350])
#plt.title('params = {0}'.format(params))
plt.legend(loc='upper right', handletextpad=0.5, labelspacing=0.5, borderaxespad=0.5, handlelength=2.0, fontsize=fontsize)
plt.show(block=False)
if savefile is not None: fig.savefig(savepath + savefile + '.pdf')
def get_obs_stats():
"""
Runs the lotka volterra simulation once with the true parameters, and saves the observed summary statistics.
The intention is to use the observed summary statistics to perform inference on the parameters.
"""
lv = mjp.LotkaVolterra(init, true_params)
states = lv.sim_time(dt, duration)
stats = calc_summary_stats(states)
helper.save(stats, datadir + 'obs_stats.pkl')
plt.figure()
times = np.linspace(0.0, duration, int(duration / dt) + 1)
plt.plot(times, states[:, 0], label='predators')
plt.plot(times, states[:, 1], label='prey')
plt.xlabel('time')
plt.ylabel('counts')
plt.title('params = {0}'.format(true_params))
plt.legend(loc='upper right')
plt.show()
def do_pilot_run():
"""
Runs a number of simulations, and it calculates and saves the mean and standard deviation of the summary statistics
across simulations. The intention is to use these to normalize the summary statistics when doing distance-based
inference, like rejection or mcmc abc. Due to the different scales of each summary statistic, the euclidean distance
is not meaningful on the original summary statistics. Note that normalization also helps when using mdns, since it
normalizes the neural net input.
"""
n_sims = 1000
stats = []
i = 1
while i <= n_sims:
params = sim_prior_params()
lv = mjp.LotkaVolterra(init, params)
try:
states = lv.sim_time(dt, duration, max_n_steps=max_n_steps)
except mjp.SimTooLongException:
continue
stats.append(calc_summary_stats(states))
print 'pilot simulation {0}'.format(i)
i += 1
stats = np.array(stats)
means = np.mean(stats, axis=0)
stds = np.std(stats, axis=0, ddof=1)
helper.save((means, stds), datadir + 'pilot_run_results.pkl')
def sum_stats_hist():
"""
Runs several simulations with given parameters and plots a histogram of the resulting normalized summary statistics.
"""
n_sims = 1000
sum_stats = []
i = 1
pilot_means, pilot_stds = helper.load(datadir + 'pilot_run_results.pkl')
while i <= n_sims:
lv = mjp.LotkaVolterra(init, true_params)
try:
states = lv.sim_time(dt, duration, max_n_steps=max_n_steps)
except mjp.SimTooLongException:
continue
sum_stats.append(calc_summary_stats(states))
print 'simulation {0}'.format(i)
i += 1
sum_stats = np.array(sum_stats)
sum_stats -= pilot_means
sum_stats /= pilot_stds
_, axs = plt.subplots(3, 3)
nbins = int(np.sqrt(n_sims))
for i, ax in enumerate(axs.flatten()):
ax.hist(sum_stats[:, i], nbins, normed=True)
ax.set_title('stat ' + str(i+1))
plt.show()
def run_sims_from_prior():
"""
Runs several simulations with parameters sampled from the prior. Saves the parameters, normalized summary statistics
and distances with the observed summary statistic. Intention is to use the data for rejection abc and to train mdns.
"""
num_sims = 100000
pilot_means, pilot_stds = helper.load(datadir + 'pilot_run_results.pkl')
obs_stats = helper.load(datadir + 'obs_stats.pkl')
obs_stats -= pilot_means
obs_stats /= pilot_stds
params = []
stats = []
dist = []
for i in xrange(num_sims):
prop_params = sim_prior_params()
lv = mjp.LotkaVolterra(init, prop_params)
try:
states = lv.sim_time(dt, duration, max_n_steps=max_n_steps)
except mjp.SimTooLongException:
continue
sum_stats = calc_summary_stats(states)
sum_stats -= pilot_means
sum_stats /= pilot_stds
params.append(prop_params)
stats.append(sum_stats)
dist.append(calc_dist(sum_stats, obs_stats))
print 'simulation {0}, distance = {1}'.format(i, dist[-1])
params = np.array(params)
stats = np.array(stats)
dist = np.array(dist)
filename = datadir + 'sims_from_prior_{0}.pkl'.format(time.time())
helper.save((params, stats, dist), filename)
def load_sims_from_prior(n_files=12):
"""Loads the huge file(s) that store the results from simulations from the prior."""
params = np.empty([0, 4])
stats = np.empty([0, 9])
dist = np.empty([0])
for i in xrange(n_files):
params_i, stats_i, dist_i = helper.load(datadir + 'sims_from_prior_{0}.pkl'.format(i))
params = np.concatenate([params, params_i], axis=0)
stats = np.concatenate([stats, stats_i], axis=0)
dist = np.concatenate([dist, dist_i], axis=0)
n_sims = params.shape[0]
assert n_sims == stats.shape[0]
assert n_sims == dist.shape[0]
return params, stats, dist
| true |
2f0dedfd30215235dd264a0cdf3b54ea12d298cc | Python | KadinTucker/Hunters | /map_editor.py | UTF-8 | 1,156 | 2.921875 | 3 | [] | no_license | import pygame
from pygame.locals import *
import sys
import objects
import math
pygame.init()
display = pygame.display.set_mode((1000, 800))
objs = [objects.bandit1, objects.bandit2]
enemies = []
def save():
world = open('savedworld.txt', 'w')
world.write(str(enemies))
while True:
display.fill((75, 35, 35))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONDOWN:
mouse = pygame.mouse.get_pos()
if event.button == 1:
enemies.append((objs[0][0], objs[0][1], mouse, objs[0][2]))
elif event.button == 3:
for i in enemies:
if math.hypot(mouse[0] - (i[2][0] + 32), mouse[1] - (i[2][1] + 32)) <= 48:
enemies.remove(i)
elif event.type == KEYDOWN:
if event.key == K_TAB:
objs.append(objs[0])
objs.remove(objs[0])
elif event.key == K_s:
save()
for i in enemies:
display.blit(pygame.image.load(i[1][0]), i[2])
pygame.display.update()
| true |
70825887ff8e44517cbfc58318f1f57d9aba0f6e | Python | jmbaker94/qoc | /qoc/core/common.py | UTF-8 | 11,035 | 2.6875 | 3 | [
"MIT"
] | permissive | """
common.py - This module defines methods that are used by
multiple core functionalities.
"""
import numpy as np
from qoc.standard import(complex_to_real_imag_flat,
real_imag_to_complex_flat)
def clip_control_norms(max_control_norms, controls):
"""
Me: I need the entry-wise norms of the column entries of my
control array to each be scaled to a fixed
maximum norm if they exceed that norm
Barber: u wot m8?
Args:
max_control_norms :: ndarray (control_count) - an array that
specifies the maximum norm for each control for all time
controls :: ndarray - the controls to be clipped
Returns: none
"""
for i, max_control_norm in enumerate(max_control_norms):
control = controls[:, i]
mag_control = np.abs(control)
offending_indices = np.nonzero(np.less(max_control_norm, mag_control))
offending_control_points = control[offending_indices]
resolved_control_points = (np.divide(offending_control_points, mag_control[offending_indices])
* max_control_norm)
control[offending_indices] = resolved_control_points
#ENDFOR
def gen_controls_cos(complex_controls, control_count, control_step_count,
evolution_time,
max_control_norms, periods=10.):
"""
Create a discrete control set that is shaped like
a cosine function.
Args:
complex_controls :: bool - whether or not the controls should be complex
control_count :: int - how many controls are given to the hamiltonian
at each time step
control_step_count :: int - the number of time steps at which
controleters are discretized
evolution_time :: float - the duration of the system evolution
max_control_norms :: ndarray (control count) - an array that
specifies the maximum norm for each control for all time
periods :: float - the number of periods that the wave should complete
Returns:
controls :: ndarray(control_step_count, control_count) - controls for
the specified control_step_count and control_count with a cosine fit
"""
period = np.divide(control_step_count, periods)
b = np.divide(2 * np.pi, period)
controls = np.zeros((control_step_count, control_count))
# Create a wave for each control over all time
# and add it to the controls.
for i in range(control_count):
# Generate a cosine wave about y=0 with amplitude
# half of the max.
max_norm = max_control_norms[i]
_controls = (np.divide(max_norm, 2)
* np.cos(b * np.arange(control_step_count)))
# Replace all controls that have zero value
# with small values.
small_norm = max_norm * 1e-1
_controls = np.where(_controls, _controls, small_norm)
controls[:, i] = _controls
#ENDFOR
# Mimic the cosine fit for the imaginary parts and normalize.
if complex_controls:
controls = (controls - 1j * controls) / np.sqrt(2)
return controls
def gen_controls_flat(complex_controls, control_count, control_step_count,
evolution_time,
max_control_norms, periods=10.):
"""
Create a discrete control set that is shaped like
a flat line with small amplitude.
"""
controls = np.zeros((control_step_count, control_count))
# Make each control a flat line for all time.
for i in range(control_count):
max_norm = max_control_norms[i]
small_norm = max_norm * 1e-1
control = np.repeat(small_norm, control_step_count)
controls[:, i] = control
#ENDFOR
# Mimic the flat line for the imaginary parts and normalize.
if complex_controls:
controls = (controls - 1j * controls) / np.sqrt(2)
return controls
_NORM_TOLERANCE = 1e-10
def initialize_controls(complex_controls,
control_count,
control_step_count,
evolution_time,
initial_controls, max_control_norms,):
"""
Sanitize `initial_controls` with `max_control_norms`.
Generate both if either was not specified.
Args:
complex_controls :: bool - whether or not the controls should be complex
control_count :: int - number of controls per control_step
control_step_count :: int - number of pulse steps
initial_controls :: ndarray (control_count, control_step_count)
- the user specified initial controls
max_control_norms :: ndarray (control_count) - the user specified max
control norms
evolution_time :: float - the duration of the pulse
Returns:
controls :: ndarray - the initial controls
max_control_norms :: ndarray - the maximum control norms
"""
if max_control_norms is None:
max_control_norms = np.ones(control_count)
if initial_controls is None:
controls = gen_controls_flat(complex_controls, control_count, control_step_count,
evolution_time, max_control_norms)
else:
# Check that the user-specified controls match the specified data type.
if complex_controls:
if not np.iscomplexobj(initial_controls):
raise ValueError("The program expected that the initial_controls specified by "
"the user conformed to complex_controls, but "
"the program found that the initial_controls were not complex "
"and complex_controls was set to True.")
else:
if np.iscomplexobj(initial_controls):
raise ValueError("The program expected that the initial_controls specified by "
"the user conformed to complex_controls, but "
"the program found that the initial_controls were complex "
"and complex_controls was set to False.")
# Check that the user-specified controls conform to max_control_norms.
for control_step, step_controls in enumerate(initial_controls):
if not (np.less_equal(np.abs(step_controls), max_control_norms + _NORM_TOLERANCE).all()):
raise ValueError("The program expected that the initial_controls specified by "
"the user conformed to max_control_norms, but the program "
"found a conflict at initial_controls[{}]={} and "
"max_control_norms={}"
"".format(control_step, step_controls, max_control_norms))
#ENDFOR
controls = initial_controls
return controls, max_control_norms
def slap_controls(complex_controls, controls, controls_shape,):
"""
Reshape and transform controls in optimizer format
to controls in cost function format.
Args:
controls :: ndarray - the controls in question
pstate :: qoc.models.GrapeState - information about the optimization
Returns:
new_controls :: ndarray - the reshapen, transformed controls
"""
# Transform the controls to C if they are complex.
if complex_controls:
controls = real_imag_to_complex_flat(controls)
# Reshape the controls.
controls = np.reshape(controls, controls_shape)
return controls
def strip_controls(complex_controls, controls):
"""
Reshape and transform controls understood by the cost
function to controls understood by the optimizer.
Args:
controls :: ndarray - the controls in question
Returns:
new_controls :: ndarray - the reshapen, transformed controls
"""
# Flatten the controls.
controls = controls.flatten()
# Transform the controls to R2 if they are complex.
if complex_controls:
controls = complex_to_real_imag_flat(controls)
return controls
### MODULE TESTS ###
_BIG = 100
def _test():
"""
Run test on the module's methods.
"""
from qoc.models.dummy import Dummy
# Test control optimizer transformations.
pstate = Dummy()
pstate.complex_controls = True
shape_range = np.arange(_BIG) + 1
for step_count in shape_range:
for control_count in shape_range:
pstate.controls_shape = controls_shape = (step_count, control_count)
pstate.max_control_norms = np.ones(control_count) * 2
controls = np.random.rand(*controls_shape) + 1j * np.random.rand(*controls_shape)
stripped_controls = strip_controls(pstate, controls)
assert(stripped_controls.ndim == 1)
assert(not (stripped_controls.dtype in (np.complex64, np.complex128)))
transformed_controls = slap_controls(pstate, stripped_controls)
assert(np.allclose(controls, transformed_controls))
assert(controls.shape == transformed_controls.shape)
#ENDFOR
pstate.complex_controls = False
for step_count in shape_range:
for control_count in shape_range:
pstate.controls_shape = controls_shape = (step_count, control_count)
pstate.max_control_norms = np.ones(control_count)
controls = np.random.rand(*controls_shape)
stripped_controls = strip_controls(pstate, controls)
assert(stripped_controls.ndim == 1)
assert(not (stripped_controls.dtype in (np.complex64, np.complex128)))
transformed_controls = slap_controls(pstate, stripped_controls)
assert(np.allclose(controls, transformed_controls))
assert(controls.shape == transformed_controls.shape)
#ENDFOR
# Test control clipping.
for step_count in shape_range:
for control_count in shape_range:
controls_shape = (step_count, control_count)
max_control_norms = np.ones(control_count)
controls = np.random.rand(*controls_shape) * 2
clip_controls(max_control_norms, controls)
for step_controls in controls:
assert(np.less_equal(step_controls, max_control_norms).all())
controls = np.random.rand(*controls_shape) * -2
clip_controls(max_control_norms, controls)
for step_controls in controls:
assert(np.less_equal(-max_control_norms, step_controls).all())
#ENDFOR
#ENDFOR
# Control norm clipping.
controls = np.array(((1+2j, 7+8j), (3+4j, 5), (5+6j, 10,), (1-3j, -10),))
max_control_norms = np.array((7, 8,))
expected_clipped_controls = np.array(((1+2j, (7+8j) * np.divide(8, np.sqrt(113))),
(3+4j, 5),
((5+6j) * np.divide(7, np.sqrt(61)), 8,),
(1-3j, -8)))
clip_control_norms(max_control_norms, controls)
assert(np.allclose(controls, expected_clipped_controls))
if __name__ == "__main__":
_test()
| true |
505c0d03dc52750dfc72242c7333a0d5dbbcbd63 | Python | blester125/LAFF_Cython | /src/test_laff_copy.py | UTF-8 | 1,521 | 2.734375 | 3 | [] | no_license | import unittest
import numpy as np
from .copy import copy
class LaffCopyTest(unittest.TestCase):
def setUp(self):
real_length = np.random.randint(1, 20)
self.x = np.random.uniform(0, 10, real_length)
self.x = np.reshape(self.x, [1, real_length])
self.y = np.random.uniform(0, 10, real_length)
self.y = np.reshape(self.y, [1, real_length])
z_diff = 0
while z_diff == 0 or real_length + z_diff < 0:
z_diff = np.random.randint(-5, 6)
self.z = np.random.uniform(0, 10, real_length + z_diff)
self.z = np.reshape(self.z, [1, real_length + z_diff])
def test_column_column_copy(self):
np.testing.assert_allclose(copy(self.x, self.y), self.x)
def test_column_row_copy(self):
np.testing.assert_allclose(copy(self.x, self.y.T), self.x.T)
def test_row_column_copy(self):
np.testing.assert_allclose(copy(self.x.T, self.y), self.x)
def test_row_row_copy(self):
np.testing.assert_allclose(copy(self.x.T, self.y.T), self.x.T)
def test_column_column_worong_size(self):
self.assertRaises(Exception, copy, self.x, self.z)
def test_column_row_worong_size(self):
self.assertRaises(Exception, copy, self.x, self.z.T)
def test_row_column_worong_size(self):
self.assertRaises(Exception, copy, self.x.T, self.z)
def test_row_row_worong_size(self):
self.assertRaises(Exception, copy, self.x.T, self.z.T)
if __name__ == "__main__":
unittest.main()
| true |
9715f9fafab122eb55d6ce4b819c8fb6b076c816 | Python | ayushi8795/Python-Training | /PythonTask4/5.py | UTF-8 | 290 | 3.171875 | 3 | [] | no_license | def function():
l = []
l2 =[]
l =input("Enter space separated input: ").split()
for a in l:
l1=[]
for p in a:
x = p.capitalize()
l1.append(x)
y = "".join(l1)
l2.append(y)
return (" ".join(l2))
print(function()) | true |
dc3916ee7deab51f5b5f761b60caafc504987d40 | Python | aowens-21/python-sorts | /bubble.py | UTF-8 | 370 | 4.21875 | 4 | [] | no_license | def bubble_sort(list):
# This function will take in a list and sort it in ascending order
# using the bubble sort algorithm
for i in range(len(list) - 1):
for j in range(len(list) - i - 1):
if (list[j + 1] < list[j]):
temp = list[j]
list[j] = list[j + 1]
list[j + 1] = temp
return list
| true |
bc6b995c8662307a08ead6c2d16a25f45264f31b | Python | max65536/CloudServer | /Client/oldcode/md5_check.py | UTF-8 | 920 | 3.1875 | 3 | [] | no_license | import hashlib
def md5_check(file_list, file_dir):
file_list_len = len(file_list)
print('The number of files are is: %d' % file_list_len)
md5_result = hashlib.md5(file_dir.encode('ascii'))
for num in range(file_list_len):
md5_result.update(file_list[num].encode('ascii'))
print('MD5 is: %s' % (md5_result.hexdigest()))
return md5_result.hexdigest()
def md5_file_content_check(file_list, file_dir):
md5_file_content = []
file_list_len = len(file_list)
for num in range(file_list_len): #read all files content and calculate their own md5
with open(file_dir+'/'+file_list[num], 'rb') as f:
content = f.read()
md5_file_content.append(hashlib.md5(content).hexdigest())
with open(file_dir + '/md5_client01_file_content.txt', 'w') as f:
for num in md5_file_content:
print(num, file=f)
return md5_file_content
| true |
0cf2c6d09cd1445f70c92b16207d99db5ccb501e | Python | mrhhug/CS4520 | /Assignment_2/Loan/run.py | UTF-8 | 583 | 2.65625 | 3 | [] | no_license | #!/usr/bin/python2
import pdb
'''
@author: Michael Hug hmichae4@students.kennesaw.edu
Created for Dr Setzer's Fall 2013 4520 Distributed Systems Development
Assignment 2
9 September 2013
'''
import loanClass
import sys
if (len(sys.argv)==4):
loan=loanClass.Loanclass(int(float(sys.argv[1])),float(sys.argv[2]),float(sys.argv[3]))
elif (len(sys.argv)==5):
loan=loanClass.Loanclass(int(float(sys.argv[1])),float(sys.argv[2]),float(sys.argv[3]),float(sys.argv[4]))
loan=loanClass.Loanclass(int(float(20)),float(10000),float(10))
#print loan.interestAccrued(8)
print loan.remainingBalance(8)
sys.exit(0)
| true |
f3df94251f99b87844c2d2849da7150dfcad16b2 | Python | bennymuller/glTools | /data/apfData.py | UTF-8 | 4,018 | 2.84375 | 3 | [] | no_license | import maya.cmds as cmds
import os
import data
class ApfData(data.Data):
"""
Apf data class definition
"""
def __init__(self, apfFile=''):
"""
Apf data class initializer
@param apfFile: Apf file to load.
@type apfFile: str
"""
# Execute Super Class Initializer
super(ApfData, self).__init__()
# Initialize Data Type
self.dataType = 'ApfData'
if apfFile:
self.read(apfFile)
self.apfChan = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
def read(self, apfFile):
"""
@param apfFile: Apf file to load.
@type apfFile: str
"""
# Check File
if not os.path.isfile(apfFile):
raise Exception('Apf file "' + apfFile + '" is not a valid path!')
# Read File
f = open(apfFile, 'r')
# Sort Data
char = ''
for line in f:
# Get Line Data
lineData = line.split()
# Skip Empty Lines
if not lineData:
continue
# Check BEGIN
if lineData[0] == 'BEGIN':
char = lineData[1]
self._data[char] = {}
continue
# Check Character
if not char:
continue
# Parse Line Data
lineObj = lineData[0]
lineVal = [float(i) for i in lineData[1:]]
self._data[char][lineObj] = lineVal
def processDir(srcDir):
"""
Convert all apf files in a specified directory to ApfData object files (*.bpf)
@param srcDir: Source directory to process apf files for.
@type srcDir: str
"""
# Check Source Directory
if not os.path.isdir(srcDir):
raise Exception('Source directory "' + srcDir + '" is not a valid path!')
# Start Timer
timer = cmds.timerX()
# Find all APF files
apfFiles = [i for i in os.listdir(srcDir) if i.endswith('.apf')]
apfFiles.sort()
bpfFiles = []
for apfFile in apfFiles:
# Check File
srcFile = srcDir + '/' + apfFile
if not os.path.isfile(srcFile):
raise Exception('Apf file "' + srcFile + '" is not a valid path!')
print apfFile
apfData = ApfData(srcFile)
bpfFile = apfData.save(srcFile.replace('.apf', '.bpf'))
bpfFiles.append(bpfFile)
# Print Result
totalTime = cmds.timerX(st=timer)
print 'Total time: ' + str(totalTime)
# Return Result
return bpfFiles
def loadAnim(srcDir, agentNS):
"""
Load animation from apf file data
@param srcDir: Source directory to load bpf files from.
@type srcDir: str
@param agentNS: Agent namespace to apply animation to.
@type agentNS: str
"""
# Check Source Directory
if not os.path.isdir(srcDir):
raise Exception('Source directory "' + srcDir + '" is not a valid path!')
# Start Timer
timer = cmds.timerX()
# Load Agent Animation
bpfFiles = [i for i in os.listdir(srcDir) if i.endswith('.bpf')]
bpfIndex = [int(i.split('.')[1]) for i in bpfFiles]
bpfIndex.sort()
# For Each File
apfChan = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
for ind in bpfIndex:
data = ApfData().load(srcDir + '/frame.' + str(ind) + '.bpf')
if data._data.has_key(agentNS):
for item in data._data[agentNS].iterkeys():
# Check Agent:Item Exists
if not cmds.objExists(agentNS + ':' + item):
continue
# Load Anim Channels
if item == 'Hips':
for i in range(3):
cmds.setKeyframe(agentNS + ':' + item, at=apfChan[i], t=ind, v=data._data[agentNS][item][i])
for i in range(3, 6):
cmds.setKeyframe(agentNS + ':' + item, at=apfChan[i], t=ind, v=data._data[agentNS][item][i])
# Print Result
totalTime = cmds.timerX(st=timer)
print 'Total time: ' + str(totalTime)
| true |
e8f5efb4e7bdc0da4baf3db236e0dab42d189d3c | Python | Lycos-Novation/PyEngine4 | /pyengine/common/components/text_component.py | UTF-8 | 1,954 | 2.8125 | 3 | [] | no_license | from pyengine.common.components.component import Component
from pyengine.common.utils import Color
class TextComponent(Component):
def __init__(self, game_object):
super().__init__(game_object)
self.name = "TextComponent"
self.text = ""
self.background_transparent = True
self.background_color = Color.from_rgb(0, 0, 0)
self.font_name = "arial"
self.font_size = 16
self.font_bold = False
self.font_italic = False
self.font_underline = False
self.font_color = Color.from_rgb(0, 0, 0)
self.font_antialias = False
def to_dict(self):
return {
"name": self.name,
"text": self.text,
"background_transparent": self.background_transparent,
"background_color": self.background_color.rgba(),
"font_name": self.font_name,
"font_size": self.font_size,
"font_bold": self.font_bold,
"font_italic": self.font_italic,
"font_underline": self.font_underline,
"font_color": self.font_color.rgba(),
"font_antialias": self.font_antialias
}
@classmethod
def from_dict(cls, game_object, values):
comp = cls(game_object)
comp.text = values.get("text", "")
comp.background_transparent = values.get("background_transparent", True)
comp.background_color = Color.from_rgba(*values.get("background_color", (0, 0, 0, 255)))
comp.font_name = values.get("font_name", "arial")
comp.font_size = values.get("font_size", 16)
comp.font_bold = values.get("font_bold", False)
comp.font_italic = values.get("font_italic", False)
comp.font_underline = values.get("font_underline", False)
comp.font_color = Color.from_rgba(*values.get("font_color", (0, 0, 0, 255)))
comp.font_antialias = values.get("font_antialias", False)
return comp
| true |
95a17dfdffa94bed5764ce4626ed19d0cad58fef | Python | ericyeung/PHY407 | /Lab4/Lab4_q2a.py | UTF-8 | 1,524 | 3.390625 | 3 | [] | no_license | # PHY407, Fall 2015, Lab 4, Q2a
# Author: DUONG, BANG CHI
from numpy import tanh, cosh, linspace
from pylab import figure, subplot, plot, show, title, ylim, xlabel, ylabel, legend
import scipy.optimize
Tmax = 2.0
points = 1000
accuracy = 1e-6
mag_relaxation = []
mag_newton = []
iter_relaxation = []
iter_newton = []
temp = linspace (0.01,Tmax,points)
#-------------------------Relaxation method-----------------------
for T in temp:
m1 = 1.0
error = 1.0
iter_num = 0
while error>accuracy:
m1,m2 = tanh(m1/T),m1
error = abs((m1-m2)/(1-T*cosh(m1/T)**2))
iter_num += 1
mag_relaxation.append(m1)
iter_relaxation.append(iter_num)
#---------------------------Newton's method------------------------
for T in temp:
m = 1.0
delta = 1.0
iter_num = 0
while abs(delta)>accuracy:
delta = (m - tanh(m/T))/(1/T*cosh(m/T)**2)
m -= delta
iter_num += 1
iter_newton.append(iter_num)
mag_newton.append(m)
# Plot Magnetization vs Temperature
figure()
plot(temp, mag_relaxation, label='Relaxation method')
plot(temp, mag_newton, label='Newton method')
ylim(-0.1, 1.1)
xlabel('Temperature')
ylabel('Magnetization')
legend(loc='upper right')
# Plot Number of Iteration for 2 methods
figure()
subplot(211)
plot(temp, iter_newton)
title("Number of iterations for Newton's method")
ylabel("Count")
subplot(212)
plot(temp, iter_relaxation)
title("Number of iterations for Relaxation method")
xlabel("Temperature")
ylabel("Count")
show() | true |
747401bce0c737593c58306b9eba2013153b311e | Python | ShashankSinha98/Leet-Code-Solutions | /Problems/153. Find Minimum in Rotated Sorted Array-(READ).py | UTF-8 | 523 | 3.09375 | 3 | [] | no_license | from typing import List
class Solution:
def findMin(self, nums: List[int]) -> int:
n = len(nums)
l = 0
r = n-1
while(l<=r):
if l==r:
return nums[l]
mid = (l+r)//2
if nums[mid]<nums[r]:
r = mid
else:
l = mid + 1
return -1
arr = [4,5,5,5]
s = Solution()
ans = s.findMin(arr)
print(ans) | true |
5e8f689fb017a88a855b65dd6f7a20314a7d5a66 | Python | BhavikDudhrejiya/Python-Hands-on | /7. Variable Concatenat.py | UTF-8 | 317 | 4.21875 | 4 | [] | no_license | # Assigning Variables
var1 = 'Hello World' # String Variable
var2 = 4 # Integer
var3 = 36.7 # Float
var4 = 'This is a Python Tutorial'
var5 = '32'
# Concatenation of var1 and var2
print(var2 + var3)
print(var1 + ' ' + var4)
print(var1 + var5)
#Concatenation is possible only if the same type of variables
| true |
fa0fba2c1737029736c6aa2ee24c522d955cb556 | Python | keumdohoon/STUDY | /keras/keras61_cifar10_dnn.py | UTF-8 | 2,088 | 2.6875 | 3 | [] | no_license | from keras.datasets import cifar10
from keras.utils import np_utils
from keras.models import Sequential, Model
from keras.layers import Dense, LSTM, Conv2D, Input
from keras.layers import Flatten, MaxPooling2D, Dropout
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print(x_train[0])
print('y_train[0] :', y_train[0]) #y_train[0] : [6]
print(x_train.shape) #(50000, 32, 32, 3)
print(x_test.shape) #(10000, 32, 32, 3)
print(y_train.shape) #(50000, 1)
print(y_test.shape) #(10000, 1)
plt.imshow(x_train[0])
plt.show()
x_train= x_train.reshape(x_train.shape[0],3072 )
print('x_train:', x_train)
# [[ 59 62 63 ... 123 92 72]
# [154 177 187 ... 143 133 144]
# [255 255 255 ... 80 86 84]
# ...
# [ 35 178 235 ... 12 31 50]
# [189 211 240 ... 195 190 171]
# [229 229 239 ... 163 163 161]]
print('x_train_shape: ', x_train.shape)
# (50000, 3072)
#데이터 전처리
from keras.utils import np_utils
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print(y_train.shape)#(50000, 10)
x_train = x_train.reshape(50000, 3072,).astype('float32') / 255
x_test = x_test.reshape(10000, 3072,).astype('float32') / 255
#2. 모델링
input1 = Input(shape=(3072,))
dense1_1 = Dense(12)(input1)
dense1_2 = Dense(24)(dense1_1)
dense1_2 = Dense(24)(dense1_2)
dense1_2 = Dense(24)(dense1_2)
dense1_2 = Dense(24)(dense1_2)
dense1_2 = Dense(24)(dense1_2)
dense1_2 = Dense(24)(dense1_2)
output1_2 = Dense(32)(dense1_2)
output1_2 = Dense(16)(output1_2)
output1_2 = Dense(8)(output1_2)
output1_2 = Dense(4)(output1_2)
output1_3 = Dense(10)(output1_2)
model = Model(inputs = input1,
outputs = output1_3)
#3. 훈련
model.compile(loss = 'binary_crossentropy',
optimizer = 'adam', metrics = ['accuracy'])
model.fit(x_train, y_train, epochs = 15, batch_size = 50, verbose= 2)
#acc75프로로 잡아라
#4. 예측
loss,acc = model.evaluate(x_test,y_test,batch_size=30)
print(f"loss : {loss}")
print(f"acc : {acc}")
| true |
bec8de7eb445923328f9ff64e8187950d4c52000 | Python | dhanushraparthy/HeartDiseaseClassifier | /Heart_Disease_Model.py | UTF-8 | 2,161 | 3.3125 | 3 | [] | no_license | # Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import json
import requests
# Load Dataset
dataset = pd.read_csv('heart.csv')
# Selecting Features
X = dataset.iloc[:, :-1]
# Selecting Target
y = dataset.iloc[:, -1]
# Printing Features And Target names
# print('Features :' , X)
# print('Target :', y)
# Printing Shapes
print(X.shape)
print(y.shape)
# Splitting Training and testing Data
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.3, random_state=0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
train_X = sc_X.fit_transform(train_X)
test_X = sc_X.transform(test_X)
sc_y = StandardScaler()
train_y = sc_y.fit_transform(train_y)
# KNeighborsClassifier Training Model
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(train_X, train_y)
# Predicting value from test set
test_prediction = knn.predict(test_X)
# Accuracy Score
from sklearn import metrics
print("AUC score: {:.5f}".format(metrics.accuracy_score(test_y, test_prediction))) # OUTPUT: AUC score: 0.81319
print("MAE score: {:.5f}".format(metrics.mean_absolute_error(test_y, test_prediction))) # OUTPUT: MAE score: 0.18681
# Plotting best K value for KNN
v = []
k_range = list(range(1,50))
for i in k_range:
knn = KNeighborsClassifier(n_neighbors=i)
# fit the model with training data
knn.fit(train_X, train_y)
pred = knn.predict(test_X)
# adding all accuracy result to list
v.append(metrics.accuracy_score(test_y, pred))
plt.plot(k_range, v, c='orange')
plt.show()
# Training model with best K value
knn = KNeighborsClassifier(n_neighbors=6)
knn.fit(train_X, train_y)
test_prediction = knn.predict(test_X)
# Dumping file to pickle to make python instances
pickle.dump(knn, open('model.pkl', 'wb'))
print("AUC score: {:.5f}".format(metrics.accuracy_score(test_y, test_prediction))) # OUTPUT: AUC score: 0.86813
print("MAE score: {:.5f}".format(metrics.mean_absolute_error(test_y, test_prediction))) # OUTPUT: MAE score: 0.13187 | true |
2c72513e473495fe65fde5fdd128cbc32c0d1776 | Python | thenagababupython/python_modules | /oops2/using one number class to another classs.py | UTF-8 | 406 | 3.75 | 4 | [] | no_license | class Engine:
a=10
def __init__(self):
self.b=20
def m1(self):
print("Engine specfic functionality")
class Car:
print("Engine Functionality")
def __init__(self):
self.engine=Engine()
def m2(self):
print("car using engine function ")
print(self.engine.a)
print(self.engine.b)
self.engine.m1()
c=Car()
c.m2() | true |
9ac4e296726c744831486efd288617a4e389389c | Python | haldron/python-projects | /simplepython/class.py | UTF-8 | 705 | 4.3125 | 4 | [] | no_license | """
This python script contains one class and one inherited class
with each having its own functions
and testing for the functions
"""
class Dog():
#Representing a dog
def __init__(self, name):
#initialise function for the dog object
self.name = name
def sit(self):
#function to simulate sitting
print(self.name + ' is sitting.')
my_dog = Dog('Peso')
print(my_dog.name + " is a great dog!")
my_dog.sit()
#inheritance of dog class
class SARDog(Dog):
def search(self):
#function to simulate searching
print(self.name + " is searching.")
my_dog = SARDog('Willie')
print(my_dog.name + " is a search dog.")
my_dog.search()
my_dog.sit()
| true |
ce7b7b825cf0891244f5a02ed7c47b9d1a4bfcb2 | Python | Dominik-Kaczor/epitech_mathematique_2019 | /203hotline_2019/203hotline | UTF-8 | 2,755 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python3
from sys import*
from math import*
import random
import time
def compute_1(argv):
if (argv[1] == "-h"):
print("USAGE\n\t./203hotline [n k | d]\n\nDESCRIPTION\nn\tn value for the computation of C(n, k)\nk\tk value for the computation of C(n, k)\nd\taverage duration of calls (in seconds)");
return 0
elif (argv[1].isdigit()):
d = int(argv[1])
count = 0
print("Binomial distribution:")
med = d / (3600 * 8)
medp = 3500 * (d / (3600 * 8))
overload = 0
start = time.time()
while (count <= 50):
resb = (factorial(3500) // (factorial(count) * factorial(3500 - count))) * pow(med, count) * pow((1 - med), (3500 - count))
if (count > 25):
overload = overload + resb
if (count % 5 == 0 and count != 0):
print("")
if (count % 5 == 1):
print("\t%d -> %0.3f" % (count, resb), end='')
elif (count % 5 == 0):
print("%d -> %0.3f" % (count, resb), end='')
else:
print("\t%d -> %0.3f" % (count, resb), end='')
count += 1
print("")
print("Overload: %0.1f%%" % (overload * 100))
end = time.time()
print("Computation time: %.2f ms" % ((end - start) * 1000))
print("")
count = 0
print("Poisson distribution:")
overload = 0
start = time.time()
while (count <= 50):
resb = (exp(-medp) * pow(medp, count)) / factorial(count)
if (count > 25):
overload = overload + resb
if (count % 5 == 0 and count != 0):
print("")
if (count % 5 == 1):
print("\t%d -> %0.3f" % (count, resb), end='')
elif (count % 5 == 0):
print("%d -> %0.3f" % (count, resb), end='')
else:
print("\t%d -> %0.3f" % (count, resb), end='')
count += 1
print("")
print("Overload: %0.1f%%" % (overload * 100))
end = time.time()
print("Computation time: %.2f ms" % ((end - start) * 1000))
return 0
else:
print("Args have to be ints")
return(84)
def compute_2(argv):
if (argv[1].isdigit() and argv[2].isdigit()):
n = int(argv[1])
k = int(argv[2])
print("%d" % k + "-combinations of a set of size %d:" %n)
res = factorial(n) // (factorial(k) * factorial(n - k))
print("%d" % res)
return 0
else:
print("Args have to be ints")
return 84
def main():
arg = len(argv)
if (arg == 2):
return compute_1(argv)
elif (arg == 3):
return compute_2(argv)
else:
print("Wrong args")
return (84)
if __name__ == "__main__":
exit(main())
| true |
29994ec1f593eed989f0069839d2758bdf63044a | Python | liaohhhhhh/denoisy | /Method.py | UTF-8 | 7,151 | 2.5625 | 3 | [] | no_license | import numpy as np
import cv2 as cv
import math as m
m1 = np.array([[-1, 0, 1],
[-1, 0, 1],
[-1, 0, 1]])
m2 = np.array([[-1,-1,-1],
[ 0, 0, 0],
[ 1, 1, 1]])
m3 = np.array([[-1, 0, 0, 0, 1],
[-1, 0, 0, 0, 1],
[-1, 0, 0, 0, 1],
[-1, 0, 0, 0, 1],
[-1, 0, 0, 0, 1],])
m4 = np.array([[-1,-1,-1,-1,-1],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0],
[ 1, 1, 1, 1, 1]])
def canny(f, r, i, j):
result = np.zeros((3))
# print(i,',',j,',',f.shape)
g = cv.cvtColor(f,cv.COLOR_BGR2GRAY)
theta = 0
Gx = (np.dot(np.array([[1, 1, 1]]), (m1 * g[i-1:i+2, j-1:j+2]))).dot(np.array([[1], [1], [1]]))
Gy = (np.dot(np.array([[1, 1, 1]]), (m2 * g[i-1:i+2, j-1:j+2]))).dot(np.array([[1], [1], [1]]))
if Gx[0] == 0:
result = insert90(g, r, i, j)
else:
temp = (np.arctan(Gy[0] / Gx[0])) * 180 / np.pi
if Gx[0]*Gy[0] > 0:
if Gx[0] > 0:
theta = np.abs(temp)
else:
theta = np.abs(temp) - 180
if Gx[0]*Gy[0] < 0:
if Gx[0] > 0:
theta = (-1) * np.abs(temp)
else:
theta = 180 - np.abs(temp)
if( ((theta >= -22.5) and (theta < 22.5)) or
((theta <= -157.5) and (theta > -180)) or
((theta >= 157.5) and (theta < 180))):
result = insert0(f, r, i, j)
elif( ((theta >= 22.5) and (theta < 67.5)) or
((theta <= -112.5) and (theta > -157.5))):
result = insert45(f, r, i, j)
elif( ((theta >= 67.5) and (theta < 122.5)) or
((theta <= -67.5) and (theta > -122.5))):
result = insert90(f, r, i, j)
elif( ((theta >= 122.5) and (theta < 157.5)) or
((theta <= -22.5) and (theta > -67.5))):
result = insert135(f, r, i, j)
return result
def canny1(f, r, i, j):
result = np.zeros((5))
# print(i,',',j,',',f.shape)
g = cv.cvtColor(f,cv.COLOR_BGR2GRAY)
theta = 0
# Gx = (np.dot(np.array([[1, 1, 1, 1, 1]]), (m3 * g[i-2:i+3, j-2:j+3]))).dot(np.array([[1], [1], [1], [1], [1]]))
# Gy = (np.dot(np.array([[1, 1, 1, 1, 1]]), (m4 * g[i-2:i+3, j-2:j+3]))).dot(np.array([[1], [1], [1], [1], [1]]))
Gx = (np.dot(np.array([[1, 1, 1]]), (m1 * g[i-1:i+2, j-1:j+2]))).dot(np.array([[1], [1], [1]]))
Gy = (np.dot(np.array([[1, 1, 1]]), (m2 * g[i-1:i+2, j-1:j+2]))).dot(np.array([[1], [1], [1]]))
if Gx[0] == 0:
result = insert90(g, r, i, j)
else:
temp = Gy[0] / Gx[0]
if abs(temp) > 4:
result = insert90(g, r, i, j)
else:
temp = round(float(temp))
result = insert(g, r, i ,j, temp)
return result
def insert(g, r, i, j, temp):
x_step = 1
y_step = temp
XL = XH = i
YL = YH = j
while XL > 1 and YL > abs(y_step) and YL < (r.shape[1] - abs(y_step)):
if r[XL][YL] == 255:
XL -= x_step
YL -= y_step
else:
break
while XH < (r.shape[0] - 1) and YH > abs(y_step) and YH < (r.shape[1] - abs(y_step)):
if r[XH][YH] == 255:
XH += x_step
YH += y_step
else:
break
d1 = ((XL - i) ** 2 + (YL - j) ** 2) ** 1/2
d2 = ((XH - i) ** 2 + (YH - j) ** 2) ** 1/2
d1 = d1 ** 5
d2 = d2 ** 5
return ((g[XL][YL].astype(float) * d1+ g[XH][YH].astype(float) * d2) // (d1 + d2)).astype(int)
def insert0(g, r, i, j):
XL = XH = i
while r[XL][j] == 255 and XL > 0:
XL -= 1
while r[XH][j] == 255 and XH < (r.shape[0]-1):
XH += 1
d1 = (i - XL) ** 5
d2 = (XH - i) ** 5
return ((g[XL][j].astype(float) * d1+ g[XH][j].astype(float) * d2) // (d1 + d2)).astype(int)
def insert45(g, r, i, j):
XL = XH = i
YL = YH = j
while r[XL][YH] == 255 and XL > 0 and YH < (r.shape[1]-1):
XL -= 1
YH += 1
while r[XH][YL] == 255 and XH < (r.shape[0]-1) and YL > 0:
XH += 1
YL -= 1
d1 = (2 * (i - XL)) ** 5
d2 = (2 * (XH - i)) ** 5
return ((g[XL][YH].astype(float) * d1 + g[XH][YL].astype(float) * d2) // (d1 + d2)).astype(int)
def insert90(g, r, i, j):
YL = YH = j
while r[i][YL] == 255 and YL > 0:
YL -= 1
while r[i][YH] == 255 and (YH < r.shape[1] - 1):
YH += 1
d1 = (j - YL) ** 5
d2 = (YH - j) ** 5
return ((g[i][YL].astype(float) * d1 + g[i][YH].astype(float) * d2) // (d1 + d2)).astype(int)
def insert135(g, r, i, j):
XL = XH = i
YL = YH = j
while r[XL][YL] == 255 and XL > 0 and YL > 0:
XL -= 1
YL -= 1
while r[XH][YH] == 255 and XH < (r.shape[0]-1) and YH < (r.shape[1]-1):
XH += 1
YH += 1
d1 = (2 * (i - XL)) ** 5
d2 = (2 * (XH - i)) ** 5
return ((g[XL][YL].astype(float) * d1 + g[XH][YH].astype(float) * d2) // (d1 + d2)).astype(int)
def median(f, i, j, size=3):
k = int(size/2)
s = [[],[],[]]
XL = max(0,i-k)
XH = min(f.shape[0],i+k+1)
YL = max(0,j-k)
YH = min(f.shape[1],j+k+1)
for i in range(XL,XH):
for j in range(YL,YH):
s[0].append(f[i][j][0])
s[1].append(f[i][j][1])
s[2].append(f[i][j][2])
s[0].sort()
s[1].sort()
s[2].sort()
r = np.zeros((3))
r[0] = s[0][int((size**2-1)/2)]
r[1] = s[1][int((size**2-1)/2)]
r[2] = s[2][int((size**2-1)/2)]
return r
def c_median(f, i, j, size=5):
k = int(size/2)
s = [[],[],[]]
XL = max(0,i-k)
XH = min(f.shape[0],i+k+1)
YL = max(0,j-k)
YH = min(f.shape[1],j+k+1)
row = XL
col = YL
while row < XH:
s[0].append(f[row][col][0])
s[1].append(f[row][col][1])
s[2].append(f[row][col][2])
row += 1
while col < YL:
s[0].append(f[row][col][0])
s[1].append(f[row][col][1])
s[2].append(f[row][col][2])
col += 1
while row >= XL:
s[0].append(f[row][col][0])
s[1].append(f[row][col][1])
s[2].append(f[row][col][2])
row -= 1
while col > YL:
s[0].append(f[row][col][0])
s[1].append(f[row][col][1])
s[2].append(f[row][col][2])
col -= 1
s[0].sort()
s[1].sort()
s[2].sort()
r = np.zeros((3))
r[0] = s[0][len(s[0])//2]
r[1] = s[1][len(s[1])//2]
r[2] = s[2][len(s[2])//2]
return r
def mean(f, i, j, size=3):
k = int(size/2)
r = np.zeros((3))
r[0] = np.mean(f[i-k:i+k+1,j-k:j+k+1][0])
r[1] = np.mean(f[i-k:i+k+1,j-k:j+k+1][1])
r[2] = np.mean(f[i-k:i+k+1,j-k:j+k+1][2])
return r.astype(int)
def c_mean(g, i, j, size = 5):
f = g.astype(float)
k = int(size/2)
s = [[],[],[]]
XL = max(0,i-k)
XH = min(f.shape[0],i+k+1)
YL = max(0,j-k)
YH = min(f.shape[1],j+k+1)
row = XL
col = YL
while row < XH:
s[0].append(f[row][col][0])
s[1].append(f[row][col][1])
s[2].append(f[row][col][2])
row += 1
while col < YL:
s[0].append(f[row][col][0])
s[1].append(f[row][col][1])
s[2].append(f[row][col][2])
col += 1
while row >= XL:
s[0].append(f[row][col][0])
s[1].append(f[row][col][1])
s[2].append(f[row][col][2])
row -= 1
while col > YL:
s[0].append(f[row][col][0])
s[1].append(f[row][col][1])
s[2].append(f[row][col][2])
col -= 1
r = np.zeros((3))
r[0] = np.mean(s[0])
r[1] = np.mean(s[1])
r[2] = np.mean(s[2])
return r.astype(int)
def equalize(f, r, i, j, size = 10):
k = int(size/2)
R,G,B = cv.split(f)
R = np.mean(R)
G = np.mean(G)
B = np.mean(B)
r_sum = sum(r)
f0 = R / (R + G + B)
f1 = G / (R + G + B)
f2 = B / (R + G + B)
r[0] = r_sum * f0
r[1] = r_sum * f1
r[2] = r_sum * f2
return r
| true |
c5059f6fc23389fcaed8178ffe3ea353bae95246 | Python | acgoularthub/Curso-em-Video-Python | /desafio022.py | UTF-8 | 362 | 3.90625 | 4 | [] | no_license | nome = input('Digite seu nome completo: ')
separa = nome.split()
print('Seu nome com todas as letras maiúsculas: {}'.format(nome.upper()))
print('Seu nome completo tem {} letras'.format(len(nome.replace(" ", ""))))
# ou: print('Seu nome completo tem {} letras'.format(len(nome) - nome.count(' ')))
print('Seu primeiro nome tem {} letras'.format(len(separa[0]))) | true |
5766a35c8399fa6e6211f82abdd2c7811b55588a | Python | csJd/dg_text_contest_2018 | /embedding_model/w2v_model.py | UTF-8 | 4,459 | 2.703125 | 3 | [
"MIT"
] | permissive | # coding: utf-8
# created by deng on 7/25/2018
from utils.path_util import from_project_root, exists
from utils.data_util import load_raw_data, load_to_df
from gensim.models.word2vec import Word2Vec, Word2VecKeyedVectors
from sklearn.externals import joblib
from collections import OrderedDict
from time import time
import numpy as np
DATA_URL = from_project_root("processed_data/phrase_level_data.csv")
TRAIN_URL = from_project_root("data/train_set.csv")
TEST_URL = from_project_root("data/test_set.csv")
N_JOBS = 4
def train_w2v_model(data_url=None, kwargs=None):
""" get or train a new d2v_model
Args:
data_url: url to data file, None to train use
kwargs: args for d2v model
Returns:
w2v_model
"""
model_url = args_to_url(kwargs)
if exists(model_url):
return Word2Vec.load(model_url)
if data_url is not None:
_, sequences = load_raw_data(data_url)
# use data from all train text and test text
else:
train_df = load_to_df(TRAIN_URL)
test_df = load_to_df(TEST_URL)
sequences = train_df['word_seg'].append(test_df['word_seg'], ignore_index=True)
sequences = sequences.apply(str.split)
print("Word2Vec model is training...\n trained model will be saved at \n ", model_url)
s_time = time()
# more info here [https://radimrehurek.com/gensim/models/word2vec.html#gensim.models.word2vec.Word2Vec]
model = Word2Vec(sequences, workers=N_JOBS, **kwargs)
e_time = time()
print("training finished in %.3f seconds" % (e_time - s_time))
model.save(model_url)
# save wv of model
wv_save_url = model_url.replace('.bin', '.txt').replace('w2v', 'wv')
model.wv.save_word2vec_format(wv_save_url, binary=False)
return model
def load_wv(url):
""" load KeyedVectors wv
Args:
url: url to wv file
Returns:
Word2VecKeyedVectors: wv
"""
return Word2VecKeyedVectors.load_word2vec_format(url, binary=False)
def args_to_url(args, prefix='w2v_word_seg_'):
""" generate model_url from args
Args:
args: args dict
prefix: filename prefix to save model
Returns:
str: model_url for train_w2v_model
"""
args = dict(sorted(args.items(), key=lambda x: x[0]))
filename = '_'.join([str(x) for x in OrderedDict(args).values()]) + '.bin'
return from_project_root("embedding_model/models/" + prefix + filename)
def avg_wv_of_words(wv_url, words):
""" get avg word vector of words
Args:
wv_url: url to wv file
words: word list
Returns:
np.ndarray: averaged word vector
"""
wv = load_wv(wv_url)
wvs = np.array([])
for word in words:
if word not in wv.vocab:
continue
wvs = np.append(wvs, wv[word])
wvs = wvs.reshape(-1, wv.vector_size)
avg_wv = np.mean(wvs, axis=0)
return avg_wv.reshape((wv.vector_size,))
def infer_avg_wvs(wv_url, sentences):
""" refer avg word vectors of sentences
Args:
wv_url: url to wv
sentences: sentences, every sentence is a list of words
Returns:
np.ndarray: averaged word vectors
"""
dvs = np.array([])
wv = load_wv(wv_url)
for sentence in sentences:
wvs = np.array([])
for word in sentence:
if word not in wv.vocab:
continue
wvs = np.append(wvs, wv[word])
wvs = wvs.reshape(-1, wv.vector_size)
avg_wv = np.mean(wvs, axis=0)
avg_wv = avg_wv.reshape((wv.vector_size,))
dvs = np.append(dvs, avg_wv)
return dvs.reshape(len(sentences), -1)
def gen_data_for_clf(wv_url, save_url):
train_df = load_to_df(TRAIN_URL)
test_df = load_to_df(TEST_URL)
X = infer_avg_wvs(wv_url, train_df['word_seg'].apply(str.split))
y = train_df['class'].values
X_test = infer_avg_wvs(wv_url, test_df['word_seg'].apply(str.split))
joblib.dump((X, y, X_test), save_url)
def main():
kwargs = {
'size': 300,
'min_count': 5,
'window': 5,
'iter': 5,
'sg': 1,
'hs': 1
}
model = train_w2v_model(data_url=None, kwargs=kwargs)
print(len(model.wv.vocab))
wv_url = from_project_root("embedding_model/models/wv_word_seg_300_5_5_5_1_1.txt")
save_url = from_project_root("processed_data/vector/avg_wvs_300.pk")
gen_data_for_clf(wv_url, save_url=save_url)
pass
if __name__ == '__main__':
main()
| true |
179eecebacc893e8437f06da307559155cfd5e57 | Python | osak/ICFPC2017 | /src/python/tsuchinoko-viewer/__main__.py | UTF-8 | 3,716 | 2.703125 | 3 | [] | no_license | from argparse import ArgumentParser
import json
import sys
def get_rank(arr):
sorted_arr = sorted(arr, reverse=True)
rank_map = {}
for i, val in enumerate(sorted_arr):
if val not in rank_map:
rank_map[val] = i + 1
return [rank_map[val] for val in arr]
def add_meta_data(objs):
# get stats
for obj in objs:
for perf in obj["performances"]:
perf["highest"] = max(perf["scores"])
perf["average"] = sum(perf["scores"]) / len(perf["scores"])
for i in range(len(objs[0]["performances"])):
scores = list(map((lambda obj: obj["performances"][i]["total"]), objs))
score_max = max(scores)
score_min = min(scores)
ranks = get_rank(scores)
for j, obj in enumerate(objs):
obj["performances"][i]["rank"] = ranks[j]
obj["performances"][i]["loss_percentage"] = (1.0 - scores[j] / score_max) * 100
obj["performances"][i]["ratio"] = max(0, (25 - obj["performances"][i]["loss_percentage"]) / 25)
return objs
def print_table(objs, headers):
print("<html><head><style>")
print_style()
print("</style></head><body>")
print("<h3>Tsuchinoko Report</h3>")
print("<p>Benchmark Version: {}".format(objs[0]["version"]))
print("<table border><thead>")
print_header(headers)
print("</thead><tbody>")
for obj in objs:
print_row(obj)
print("</tbody></table>")
print("</body></html>")
def print_header(headers):
print("<tr>")
print("<th></th>")
for column in headers:
print("<th>{}</th>".format(column))
print("</tr></thead>")
def print_row(obj):
print("<tr>")
print("<th>{}<br/><small>({})</small><br/>".format(obj["ai"], obj["ai_commit"][:7]))
ranks = list(map(lambda perf: perf["rank"], obj["performances"]))
print("<small>Ave Rank: {:.2f}</small>".format(sum(ranks) / len(ranks)))
print("</th>")
for perf in obj["performances"]:
print("<td bgcolor={}><center>".format(calculate_color(perf["ratio"])))
print("rank: {}<br/>".format(perf["rank"]))
print("<b>{}</b><br/>".format(perf["average"]))
print("<small>(-{:.1f}%)</small><br/>".format(perf["loss_percentage"]))
print("<small>max: {}</small><br/>".format(perf["highest"]))
print("</center></td>")
print("</tr>")
def print_style():
print('table {font-size: 12px; word-wrap:break-word; border-collapse: collapse;}')
print('table, th, tr, td {border: solid black 1px;}')
print('th, td {min-width: 90px; max-width: 90px;}')
def calculate_color(ratio):
red = [222, 102, 65]
yellow = [242, 229, 92]
green = [57, 168, 105]
color = []
if ratio < 0.5:
left = red
right = yellow
ratio *= 2
else:
left = yellow
right = green
ratio = (ratio - 0.5) * 2
for i in range(3):
color.append(format(int(right[i] * ratio + left[i] * (1 - ratio)), "02X"))
return "#{}".format("".join(color))
def main():
parser = ArgumentParser()
parser.add_argument("--files", type=str, nargs="+", help="Json files")
args = parser.parse_args()
version = -1
objs = []
for file in args.files:
f = open(file, "r")
obj = json.loads(f.read())
f.close()
if version == -1:
version = obj["version"]
if version != obj["version"]:
print("Use reports with the same version", file = sys.stderr)
sys.exit()
objs.append(obj)
headers = [perf["name"] for perf in objs[0]["performances"]]
models = add_meta_data(objs)
print_table(models, headers)
if __name__ == '__main__':
main()
| true |
eef723c60bca723588b70f59f09fee9034dec604 | Python | lmmProject/python_01 | /04_对象/02_多态.py | UTF-8 | 781 | 4.75 | 5 | [] | no_license | # 静态语言 vs 动态语言
# 对于静态语言(例如Java)来说,如果需要传入Animal类型,
# 则传入的对象必须是Animal类型或者它的子类,否则,将无法调用run()方法。
# 对于Python这样的动态语言来说,则不一定需要传入Animal类型。
# 我们只需要保证传入的对象有一个run()方法就可以了:
class Animal(object):
def run(self):
print('Animal is running...')
class Dog(Animal):
def run(self):
print('Dog is running...')
def eat(self):
print('Eating meat...')
class Timer(object):
def run(self):
print('Start...')
class Cat(Timer):
def run(self):
print('Cat is running...')
dog = Dog()
cat = Cat()
print(dog.run())
print(cat.run())
| true |
cc68da405273606d40660bfc8e0e3e1cf56e87b4 | Python | tinoxn/twitter | /tinox.py | UTF-8 | 2,291 | 2.921875 | 3 | [] | no_license | import streamlit as st
import pickle
from sklearn.feature_extraction.text import CountVectorizer
import preprocessor as p
import numpy as np
import pandas as pd
import re
from sklearn.model_selection import train_test_split
#set up punctuations we want to be replaced
REPLACE_NO_SPACE = re.compile("(\.)|(\;)|(\:)|(\!)|(\')|(\?)|(\,)|(\")|(\|)|(\()|(\))|(\[)|(\])|(\%)|(\$)|(\>)|(\<)|(\{)|(\})")
REPLACE_WITH_SPACE = re.compile("(<br\s/><br\s/?)|(-)|(/)|(:).")
def clean_tweets(df):
tempArr = []
for line in df:
# clean using tweet_preprocessor
tmpL = p.clean(line)
# remove all punctuation
tmpL = REPLACE_NO_SPACE.sub("", tmpL.lower())
tmpL = REPLACE_WITH_SPACE.sub(" ", tmpL)
tempArr.append(tmpL)
return tempArr
pickle_in = pickle_in = open("moody_sentiment_model.sav", "rb")
model = pickle.load(pickle_in)
# datasets
train = pd.read_csv("SentimentDataset_train.csv")
test = pd.read_csv("SentimentDataset_test.csv")
train_tweet = clean_tweets(train["tweet"])
train_tweet = pd.DataFrame(train_tweet)
# append cleaned tweets to the training dataset
train["clean_tweet"] = train_tweet
test_tweet = clean_tweets(test["tweet"])
test_tweet = pd.DataFrame(test_tweet)
test["clean_tweet"] = test_tweet
y = train.label.values
x_train, x_test, y_train, y_test = train_test_split(train.clean_tweet.values, y,
stratify = y,
random_state = 1,
test_size = 0.3,
shuffle = True)
# initilizing the vectorizer
vectorizer = CountVectorizer(binary = True, stop_words = "english")
vectorizer.fit(list(x_train) + list(x_test))
def classify_tweet(user_text):
clean = clean_tweets([user_text])
text_vec = vectorizer.transform(clean)
predicted = model.predict(text_vec)
return predicted
st.title("Welcome Bot-Twitter")
st.header("Enter the tweet text")
user_text = st.text_input("Your tweet")
result = ""
r = ""
if st.button("check up Tweet"):
result = classify_tweet(user_text)
if result == [0]:
r = "Positive"
elif result == [1]:
r = "Negative"
st.success('This tweet is : {}'.format(r))
| true |
d1f276ec42decf7fda3d771109486e1bd9243815 | Python | duncanmmacleod/gwosc | /gwosc/urls.py | UTF-8 | 5,804 | 2.625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# Copyright (C) Cardiff University, 2018-2020
#
# This file is part of GWOSC.
#
# GWOSC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWOSC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWOSC. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for URL handling
"""
import re
from os.path import (basename, splitext)
from .utils import segments_overlap
# LOSC filename re
URL_REGEX = re.compile(
r"\A((.*/)*(?P<obs>[^/]+)-"
r"(?P<ifo>[A-Z][0-9])_(L|GW)OSC_"
r"((?P<tag>[^/]+)_)?"
r"(?P<samp>\d+(KHZ)?)_"
r"[RV](?P<version>\d+)-"
r"(?P<strt>[^/]+)-"
r"(?P<dur>[^/\.]+)\."
r"(?P<ext>[^/]+))\Z"
)
VERSION_REGEX = re.compile(r'[RV]\d+')
def sieve(urllist, segment=None, **match):
"""Sieve a list of LOSC URL metadata dicts based on key, value pairs
Parameters
----------
urllist : `list` of `dict`
the ``'strain'`` metadata list, as retrieved from the GWOSC
server
segment : `tuple` of `int`
a ``[start, stop)`` GPS segment against which to check overlap
for each URL
**match
other keywords match **exactly** against the corresponding key
in the `dict` for each URL
Yields
------
dict :
each URL dict that matches the parameters is yielded, in the same
order as the input ``urllist``
"""
# remove null keys
match = {key: value for key, value in match.items() if value is not None}
# sieve
for urlmeta in urllist:
try:
if any(match[key] != urlmeta[key] for key in match):
continue
except KeyError as exc:
raise TypeError(
"unrecognised match parameter: {}".format(str(exc))
)
if segment: # check overlap
_start = urlmeta["GPSstart"]
thisseg = (_start, _start + urlmeta["duration"])
if not segments_overlap(segment, thisseg):
continue
yield urlmeta
def _match_url(
url,
detector=None,
start=None,
end=None,
tag=None,
sample_rate=None,
version=None,
duration=None,
ext=None,
):
"""Match a URL against requested parameters
Returns
-------
None
if the URL doesn't match the request
tag, version : `str`, `int`
if the URL matches the request
Raises
------
StopIteration
if the start time of the URL is _after_ the end time of the
request
"""
reg = URL_REGEX.match(basename(url)).groupdict()
if (
(detector and reg['ifo'] != detector) or
(tag and reg['tag'] != tag) or
(version and int(reg['version']) != version) or
(sample_rate and
float(reg["samp"].rstrip("KHZ")) * 1024 != sample_rate) or
(duration and float(reg["dur"]) != duration) or
(ext and reg["ext"] != ext)
):
return
# match times
if end is not None:
gps = int(reg['strt'])
if gps >= end: # too late
return
if start is not None:
gps = int(reg['strt'])
dur = int(reg['dur'])
if gps + dur <= start: # too early
return
return reg['tag'], int(reg['version'])
def match(
urls,
detector=None,
start=None,
end=None,
tag=None,
sample_rate=None,
version=None,
duration=None,
ext=None,
):
"""Match LOSC URLs for a given [start, end) interval
Parameters
----------
urls : `list` of `str`
List of URL paths
start : `int`
GPS start time of match interval
end : `int`
GPS end time of match interval
tag : `str`, optional
URL tag to match, e.g. ``'CLN'``
version : `int`, optional
Data release version to match, defaults to highest available
version
Returns
-------
urls : `list` of `str`
A sub-list of the input, based on matching, if no URLs are matched,
the return will be empty ``[]``.
"""
matched = {}
matched_tags = set()
# sort URLs by duration, then start time, then ...
urls = sorted(
urls, key=lambda u: splitext(basename(u))[0].split('-')[::-1],
)
# format version request
if VERSION_REGEX.match(str(version)):
version = version[1:]
if version is not None:
version = int(version)
# loop URLS
for url in urls:
m = _match_url(
url,
detector=detector,
start=start,
end=end,
tag=tag,
sample_rate=sample_rate,
version=version,
duration=duration,
ext=ext,
)
if m is None:
continue
mtag, mvers = m
matched_tags.add(mtag)
matched.setdefault(mvers, [])
matched[mvers].append(url)
# if multiple file tags found, and user didn't specify, error
if len(matched_tags) > 1:
tags = ', '.join(map(repr, matched_tags))
raise ValueError("multiple LOSC URL tags discovered in dataset, "
"please select one of: {}".format(tags))
# extract highest version
try:
return matched[max(matched)]
except ValueError: # no matched files
return []
| true |
11e6c9bbe1b4be478f20a805df604149f5f1e05a | Python | AkshayMukkavilli/Analyzing-the-Significance-of-Structure-in-Amazon-Review-Data-Using-Machine-Learning-Approaches | /src/file_mergers/merger_for_title_only_data.py | UTF-8 | 384 | 2.59375 | 3 | [] | no_license | import pandas as pd
df1 = pd.read_csv(r'../../final_csv_files/FinalTitles_LatestData.csv')
print(df1.shape)
df2 = pd.read_csv(r'../../final_csv_files/OriginalFeatures(Corrected).csv')
print(df2.columns)
df1['Helpful_Votes'] = df2['Helpful_Votes']
df1['Z_Score_HelpfulVotes'] = df2['Z_Score_HelpfulVotes']
print(df1.head())
df1.to_csv(r'../../final_csv_files/TitleOnlyDataLatest.csv') | true |
1691892cc98abba69fd6dbe761c7f6edbde916c0 | Python | kenluck2001/scraper_gevent | /HTTPClass.py | UTF-8 | 4,261 | 3.203125 | 3 | [] | no_license | import time
import requests
from datetime import datetime
import requests # library for HTTP
import json
import numbers
SUCCESS = 200
def dump_args(method, filename='output/log.txt'):
def echo_func(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
argnames = method.func_code.co_varnames[:method.func_code.co_argcount]
# write to log here
# Add time of execution to log
newResult = '%s %2.2f ms \n' % (result, (te - ts) * 1000)
with open(filename, 'a') as f:
try:
if "None" not in newResult:
f.write(newResult)
except Exception as e:
print("got exception {e}".format(e=e))
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print 'Function name: %s \nTime of Execution: %2.2f ms \nFunction metadata: %s' % \
(method.func_name, (te - ts) * 1000, ', '.join('%s=%r' % entry for entry in zip(argnames,
args[:len(argnames)])+[("args", list(args[len(argnames):]))]+[("kwargs", kw)]))
return result
return echo_func
class HTTPClass:
def getCurrentTime(self):
"""
obtain current time and date
"""
millenium = 2000
d_date = datetime.utcnow()
reg_format_date = d_date.strftime("%H:%M:%S")
reg_format_date2 = d_date.strftime(
"%d/%m/") + str(int(d_date.strftime("%Y")) - millenium)
return (reg_format_date2, reg_format_date)
@dump_args
def getContent(self, url, interval):
""" get all the response object attributes in a suitable structure """
output = None
try:
if isinstance(interval, numbers.Number) and type(url) is str: # check input
if interval > 0: # avoid zero interval
# make a get request to know status code
res = requests.get(url, timeout=interval)
resStatus, rescode = self.getResponseStatus(res)
output = "{0} {1} {2} - {3} - Bytes {4}".format(
self.getCurrentTime()[0], self.getCurrentTime()[1], url, len(res.text), rescode)
if rescode != SUCCESS:
print resStatus
else:
raise Exception(
'The provided URL {0} or interval {1} is not provided or valid'.format(url, interval))
except ValueError:
print "This Url is not valid: ", url
except requests.ConnectionError:
print "DNS failure, refused connection"
except requests.HTTPError:
print "Invalid HTTP response"
except requests.TooManyRedirects:
print "Exceeds the configured number of maximum redirections"
return output
def getResponseStatus(self, res):
""" This gets the status """
if isinstance(res, requests.models.Response):
status = None
if res.status_code == requests.codes.ok:
status = "Success"
if res.status_code == 404:
# Not Found
status = "Not Found"
if res.status_code == 408:
# Request Timeout
status = "Request Timeout"
if res.status_code == 410:
# Gone no longer in server
status = "Not ON Server"
if res.status_code == 503:
# Website is temporary unavailable for maintenance
status = "Temporary Unavailable"
if res.status_code == 505:
# HTTP version not supported
status = "HTTP version not supported"
return status, res.status_code
raise Exception('Object is not of Requests type: {}'.format(res))
if __name__ == '__main__':
url = "http://www.bbc.com"
myhttp = HTTPClass()
try:
myhttp.getContent(url, interval=5)
except Exception as e:
print("got exception {e}".format(e=e))
| true |
618cfadfae855ce77b972b420d59a3dbd97201e5 | Python | sethangavel/machine_learning | /ucsc_ex/decision_tree/decision_tree.py | UTF-8 | 1,714 | 2.53125 | 3 | [] | no_license | from digits_pca import get_training_prinicipal_features_and_labels, get_test_prinicipal_features_and_labels
from utils_stump import build_tree, evaluate_tree, plot_contours
from commons import traverse_tree, log_debug, log
from sklearn.metrics import confusion_matrix
from config import *
import numpy as np
def main_task():
# Training
xi, labels = get_training_prinicipal_features_and_labels()
labels[labels == NEGATIVE_CLASS] = NEGATIVE_CLASS_MAPPED
labels[labels == POSITIVE_CLASS] = POSITIVE_CLASS_MAPPED
x_nd = np.column_stack((xi, labels))
root_node = build_tree(x_nd)
stats_dict = {}
traverse_tree(root_node, stats_dict)
log(stats_dict)
training_target_actual = [0] * np.alen(x_nd)
for idx in range(0, np.alen(x_nd)):
training_target_actual[idx] = x_nd[idx][NUM_FEATURES]
plot_contours(x_nd, training_target_actual, root_node)
# Testing
test_xi, test_labels = get_test_prinicipal_features_and_labels()
test_labels[test_labels == NEGATIVE_CLASS] = NEGATIVE_CLASS_MAPPED
test_labels[test_labels == POSITIVE_CLASS] = POSITIVE_CLASS_MAPPED
test_x_nd = np.column_stack((test_xi, test_labels))
test_target_actual = [0] * np.alen(test_x_nd)
test_target_predicted = [0] * np.alen(test_x_nd)
for idx in range(0, np.alen(test_x_nd)):
test_target_actual[idx] = test_x_nd[idx][NUM_FEATURES]
test_target_predicted[idx] = evaluate_tree((test_x_nd[idx][:NUM_FEATURES]), root_node)
plot_contours(test_x_nd, test_target_actual, root_node)
cm = confusion_matrix(test_target_actual, test_target_predicted)
log("Accuracy: ", (cm[0][0] + cm[1][1]) / (np.sum(cm)))
if __name__ == '__main__':
main_task()
| true |
58fe9e0a18cf9c0206a5b10894d3fe0650df8813 | Python | amritavarshi/guvi | /greatestofthreenos.py | UTF-8 | 126 | 4.09375 | 4 | [] | no_license | x,y,z=input().split()
if (x>y) and (x>z):
print(x)
if (y>x) and (y>z):
print(y)
if (z>x) and (z>y):
print(z) | true |
b0d4cc82276bf3efd75ac461ce23f5e8840037b8 | Python | PengfeiLi27/machine-learning | /SVM/SVM.py | UTF-8 | 8,001 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 16 17:43:03 2017
@author: PXL4593
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 16 16:09:25 2017
@author: PXL4593
"""
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_circles
class SVM(object):
def __init__(self, kernel='linear', rbf_gamma = 1, C = 1000, epsilon = 0.001):
self.kernel = kernel
self.epsilon = epsilon
# larger gamma -> over fit
# small gama -> under fit
self.gamma = rbf_gamma
# penalty C
self.C = C
def _init_parameters(self, X, Y):
'''
initalize parameter
'''
self.X = X
self.Y = Y
# bias
self.b = 0.0
# dimension of feature
self.n = len(X[0])
# number of sample
self.N = len(Y)
# set all alpha = 0
self.alpha = [0.0] * self.N
# calculate error for each sample
self.E = [self._E_(i) for i in range(self.N)]
# max iteration
self.Max_Interation = 50000
def _satisfy_KKT(self, i):
'''
Satisfy KKT
y_i * g(x_i) >=1 {x_i|a=0}
=1 {x_i|0<a<C}
<=1 {x_i|a=C}
'''
yg = self.Y[i] * self._g_(i)
if abs(self.alpha[i])<self.epsilon:
return yg > 1 or yg == 1
elif abs(self.alpha[i]-self.C)<self.epsilon:
return yg < 1 or yg == 1
else:
return abs(yg-1) < self.epsilon
def is_stop(self):
for i in range(self.N):
satisfy = self._satisfy_KKT(i)
if not satisfy:
return False
return True
def _select_two_parameters(self):
'''
select alpha_1, alpha_2 to implement SMO
'''
index_list = [i for i in range(self.N)]
i1_list_1 = list(filter(lambda i: self.alpha[i] > 0 and self.alpha[i] < self.C, index_list))
i1_list_2 = list(set(index_list) - set(i1_list_1))
i1_list = i1_list_1
i1_list.extend(i1_list_2)
for i in i1_list:
if self._satisfy_KKT(i):
continue
E1 = self.E[i]
max_ = (0, 0)
for j in index_list:
if i == j:
continue
E2 = self.E[j]
if abs(E1 - E2) > max_[0]:
max_ = (abs(E1 - E2), j)
return i, max_[1]
def _K_(self, x1, x2):
'''
kernel
'''
if self.kernel == 'linear':
return sum([x1[k] * x2[k] for k in range(self.n)])
if self.kernel == 'poly':
return (sum([x1[k] * x2[k] for k in range(self.n)])+1)**3
if self.kernel == 'RBF':
return np.exp(-self.gamma * sum([(x1[k] - x2[k])**2 for k in range(self.n)]))
def _g_(self, i):
'''
g(x_i) = sumj[a_j*y_j*K(x_j,x_i)]+b
'''
result = self.b
for j in range(self.N):
result += self.alpha[j] * self.Y[j] * self._K_(self.X[j], self.X[i])
return result
def _E_(self, i):
'''
E(i) = g(x_i) - y_i
'''
return self._g_(i) - self.Y[i]
def train(self, features, labels):
k = 0
self._init_parameters(features, labels)
while k < self.Max_Interation or self.is_stop():
i1, i2 = self._select_two_parameters()
if self.Y[i1] != self.Y[i2]:
L = max(0, self.alpha[i2] - self.alpha[i1])
H = min(self.C, self.C + self.alpha[i2] - self.alpha[i1])
else:
L = max(0, self.alpha[i2] + self.alpha[i1] - self.C)
H = min(self.C, self.alpha[i2] + self.alpha[i1])
E1 = self.E[i1]
E2 = self.E[i2]
'''
eta = k11 + k22 - 2 k12
'''
eta = self._K_(self.X[i1], self.X[i1]) + self._K_(self.X[i2], self.X[i2]) - 2 * self._K_(self.X[i1], self.X[i2])
# 7.106
alpha2_new_unc = self.alpha[i2] + self.Y[i2] * (E1 - E2) / eta
# 7.108
alph2_new = 0
if alpha2_new_unc > H:
alph2_new = H
elif alpha2_new_unc < L:
alph2_new = L
else:
alph2_new = alpha2_new_unc
# 7.109
alph1_new = self.alpha[i1] + self.Y[i1] * self.Y[i2] * (self.alpha[i2] - alph2_new)
# 7.115 7.116
b_new = 0
b1_new = -E1 - self.Y[i1] * self._K_(self.X[i1], self.X[i1]) * (alph1_new - self.alpha[i1]) - self.Y[i2] * self._K_(self.X[i2], self.X[i1]) * (alph2_new - self.alpha[i2]) + self.b
b2_new = -E2 - self.Y[i1] * self._K_(self.X[i1], self.X[i2]) * (alph1_new - self.alpha[i1]) - self.Y[i2] * self._K_(self.X[i2], self.X[i2]) * (alph2_new - self.alpha[i2]) + self.b
if alph1_new > 0 and alph1_new < self.C:
b_new = b1_new
elif alph2_new > 0 and alph2_new < self.C:
b_new = b2_new
else:
b_new = (b1_new + b2_new) / 2
self.alpha[i1] = alph1_new
self.alpha[i2] = alph2_new
self.b = b_new
self.E[i1] = self._E_(i1)
self.E[i2] = self._E_(i2)
k+= 1
def help_predict(self,x_j):
'''
f(x) = sign(sum[a*y_i*K(x,x_i)]+b)
'''
f = self.b
for i in range(self.N):
f += self.alpha[i]*self.Y[i]*self._K_(x_j,self.X[i])
if f > 0:
return 1
else:
return -1
def predict(self,X):
results = []
for x in X:
results.append(self.help_predict(x))
return results
def scatterplot(x,y,title=''):
x = np.asarray(x)
y = np.asarray(y)
plt.scatter(x[y == 1, 0],
x[y == 1, 1],
c='b', marker='x',
label='1')
plt.scatter(x[y == -1, 0],
x[y == -1, 1],
c='r',
marker='s',
label='-1')
plt.xlim([min(x[:,0]), max(x[:,0])])
plt.ylim([min(x[:,1]), max(x[:,1])])
plt.legend(loc='best')
plt.tight_layout()
plt.title(title)
plt.show()
def generate_xor_data(N=100,seed=1):
np.random.seed(seed)
X = np.random.randn(N, 2)
y = np.logical_xor(X[:, 0] > 0,X[:, 1] > 0)
y = np.where(y, 1, -1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=seed)
return X_train.tolist(), X_test.tolist(), y_train.tolist(), y_test.tolist()
def generate_circle_data(N=100,seed=1):
np.random.seed(seed)
X, y = make_circles(N, factor=.1, noise=.1)
y[y==0]=-1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
return X_train.tolist(), X_test.tolist(), y_train.tolist(), y_test.tolist()
if __name__ == "__main__":
# set random seed
seed = np.random.randint(1000)
# create xor data
#X_train, X_test, y_train, y_test = generate_xor_data(200,seed)
# create circle data
X_train, X_test, y_train, y_test = generate_circle_data(200,seed)
svm = SVM(kernel='RBF',rbf_gamma=4, C=1000)
svm.train(X_train, y_train)
test_predict = svm.predict(X_test)
accuracy = accuracy_score(y_test,test_predict)
auc = roc_auc_score(y_test, test_predict)
print ("accuracy", accuracy)
print ("auc", auc)
scatterplot(X_train,y_train,'train data')
scatterplot(X_test,y_test,'test data')
scatterplot(X_test,test_predict,'predict result') | true |
1bd37a47951ba1be936d734a25a010e3de960a4a | Python | liruqi/topcoder | /Library/strings.py | UTF-8 | 674 | 3.0625 | 3 | [] | no_license | # https://www.hackerrank.com/challenges/bigger-is-greater/
# No such impl in Python lib: https://stackoverflow.com/questions/4223349
class strings:
def next_permutation(w):
stk=[]
n=len(w)
def nextperm(sc):
i=0
for x in stk:
if x > sc:
stk[i] = sc
return x+''.join(stk)
i+=1
return ''.join(stk)+sc
for i in range(n):
k=n-1-i
if stk and w[k]<stk[-1]:
return w[:k] + nextperm(w[k])
else:
stk.append(w[k])
# return 'no answer'
return ''.join(stk)
| true |
148fcc799d1d87d575c185646a6296ac8c670d9f | Python | TimilsinaBimal/30-Day-Python-Challenge | /day14.py | UTF-8 | 574 | 4.25 | 4 | [] | no_license | # How many ways can four students Ram, Anuj, Deepak and Ravi line up in
# a line, if the order matters?
# Print all the possible Combination.
def all_combination(arr):
if len(arr) == 0:
return []
if len(arr) == 1:
return [arr]
comb = []
for i in range(len(arr)):
temp = arr[i]
remaining = arr[:i] + arr[i+1:]
for a in all_combination(remaining):
comb.append([temp] + a)
return comb
students = ["Ram", "Anuj", "Deepak", "Ravi"]
for combination in all_combination(students):
print(combination)
| true |
eee2928cb1be8675f59ed68669659c3db775c717 | Python | etiennedub/pyk4a | /example/devices.py | UTF-8 | 322 | 2.671875 | 3 | [
"MIT"
] | permissive | from pyk4a import PyK4A, connected_device_count
cnt = connected_device_count()
if not cnt:
print("No devices available")
exit()
print(f"Available devices: {cnt}")
for device_id in range(cnt):
device = PyK4A(device_id=device_id)
device.open()
print(f"{device_id}: {device.serial}")
device.close()
| true |
931241ff4a20b1c2be86577a442dd4894ae89ce9 | Python | garyForeman/artools | /artools/plotter.py | UTF-8 | 12,002 | 3.265625 | 3 | [
"MIT"
] | permissive | """Contains convenience functions for plotting AR simulation results such as transmission and reflection.
"""
#Filename: plotter.py
#Author: Andrew Nadolski
import os
import pprint
import shutil
import time
import matplotlib.pyplot as plt
import numpy as np
"""
TODO
7/26
* Debug _convert_to_wavelength(). The plot output looks funny....
* write a bandpass drawing function that take upper and lower limits
as input and draws a semi-opaque, colored rectangular region
"""
class Plot:
"""Contains the generic elements needed for an AR simulation plot
Attributes
----------
bandpasses : list
A list of bandpasses (tuples), where each element contains a lower and
upper bound, a color, a name, and an opacity. Bandpasses can be added
using ``add_bandpass()``.
data : array
Defaults to 'None' type until the data to be plotted are loaded.
Once data are loaded, any operations on the data happen to this instance.
Any call to ``load_data()`` will overwrite this instance.
draw_bandpasses : boolean
If `True`, the contents of ``bandpasses`` is drawn on the plot. If
`False`, the contents of ``bandpasses`` is ignored when drawing the
plot. Defaults to `False`.
frequency_units : string
The units to plot on the frequency axis, if it exists. Must be one of:
'Hz',
'KHz',
'MHz',
'GHz',
'THz'.
legend : boolean
If `True`, draws a legend on the plot. Defaults to `False`.
raw_data : array
Defaults to 'None' type until the data to be plotted are loaded.
Once the data are loaded, this copy of the data are kept in the
'as-loaded' state so they may be reverted to easily. Any call to
``load_data()`` will overwrite this copy.
save_name : string
The name under which the output plot is saved. Defaults to
'my_plot_XXXXX.pdf' where `XXXXX` is a time-stamp to avoid overwriting
previous plots.
save_path : string
The path to which the output plot will be saved. Defaults to the current
working directory
title : string
The title of the plot
type : string
The type of plot
wavelength_units : string
The units to plot on the wavelength axis, if it exists. Must be one of:
'm',
'cm',
'mm',
'um',
'micron'.
xlabel : string
The x-axis label
ylabel : string
The y-axis label
"""
def __init__(self):
self.bandpasses = []
self.data = None
self.draw_bandpasses = False
self.draw_legend = False
self.frequency_units = 'GHz'
self.raw_data = None
self.save_name = 'my_plot_{t}.pdf'.format(t=time.ctime(time.time()))
self.save_path = '.'
self.title = 'Generic plot'
self.type = 'Generic'
self.vs_frequency = True
self.wavelength_units = 'mm'
self.xlabel = None
self.ylabel = None
def __repr__(self):
return '{type} plot'.format(type=self.type)
def _convert_to_wavelength(self, frequencies):
"""Converts frequencies to wavelength. Ignores division by zero
errors and sets results of division by zero to 0.
Arguments
---------
frequencies : array
An array of frequencies given in hertz
Returns
-------
wavelengths : array
An array of wavelengths computed from the input frequency array
"""
with np.errstate(divide='ignore', invalid='ignore'):
wavelengths = np.true_divide(3e8, frequencies)
wavelengths[np.isinf(wavelengths)] = 0.
return wavelengths
def _draw_bandpasses(self):
"""Draws the contents of ``bandpasses`` attribute on the plot
"""
for bandpass in self.bandpasses:
low = bandpass[0]
high = bandpass[1]
color = bandpass[2]
label = bandpass[3]
opacity = bandpass[4]
plt.axvspan(low, high, fc=color, ec='none', alpha=opacity, label=label)
return
def _draw_legend(self):
"""Draws a legend on the plot at the position matplotlib deems best
"""
plt.legend(fontsize='x-small')
return
def _make_save_path(self):
"""Assembles the full save path for the output plot
Returns
-------
path : string
The full path to which the output plot will be saved
"""
if self.save_name.endswith('.pdf'):
path = os.path.join(self.save_path, self.save_name)
else:
self.save_name = self.save_name+'.pdf'
path = os.path.join(self.save_path, self.save_name)
return path
def _shape_data(self):
"""Does some basic data manipulation based on plot attributes
such as preferred units
"""
freq_units = {'Hz':1, 'KHz':10**3, 'MHz':10**6, 'GHz':10**9, 'THz':10**12}
wave_units = {'m':1, 'cm':10**-2, 'mm':10**-3, 'um':10**-6, 'micron':10**-6}
if self.vs_frequency:
try:
self.data[0] = self.data[0]/freq_units[self.frequency_units]
except:
raise ValueError('Unrecognized frequency units. See plotter.Plot() docstring for accepted units.')
else:
try:
self.data[0] = self._convert_to_wavelength(self.data[0])
self.data[0] = self.data[0]/wave_units[self.wavelength_units]
except:
raise ValueError('Unrecognized wavelength units. See plotter.Plot() docstring for accepted units.')
return
def add_bandpass(self, lower_bound, upper_bound, color=None, label=None, opacity=0.1):
"""Adds a bandpass region to the plot. The region is a shaded rectangle
spanning the full height of the plot.
Arguments
---------
lower_bound : float
The lower edge of the bandpass, given in x-axis units.
upper_bound : float
The upper edge of the bandpass, given in x-axis units.
color : string, optional
The color of the bandpass region. Can be any color string
recognized by matplotlib. Defaults to 'None', which means a
random color will be chosen for the bandpass shading.
label : string, optional
The name that will appear in the legend, if a legend is used.
Deafults to 'None', which means no name will be displayed in
the legend.
opacity : float, optional
The opacity of the shaded region. Must be between 0 and 1, inclusive.
1 is completely opaque, and 0 is completely transparent.
"""
bandpass = (lower_bound, upper_bound, color, label, opacity)
self.bandpasses.append(bandpass)
return
def load_data(self, data):
"""Load a new set of data while retaining other plot
characteristics
Arguments
---------
data : numpy array
The data to be plotted. Replaces any existing data in
the 'data' and 'raw_data' attributes.
"""
self.data = data
self.raw_data = data
return
def make_plot(self):
"""Draws a plot of the loaded data
Arguments
---------
data : array
A 2-element array where the first element is a set of
frequencies (or wavelengths) and the second elements
is a set of transmissions (or reflections)
"""
fig = plt.figure()
ax = fig.add_subplot(111)
self.set_xlabel()
ax.set_title(self.title)
ax.set_ylabel(self.ylabel)
ax.set_xlabel(self.xlabel)
ax.set_ylim(0.6,1.025)
self._shape_data()
if self.type == 'Transmission':
ax.plot(self.data[0], self.data[1])
elif self.type == 'Reflection':
ax.plot(self.data[0], self.data[2])
else:
ax.plot(self.data[0], self.data[0])
if self.draw_bandpasses:
self._draw_bandpasses()
if self.draw_legend:
self._draw_legend()
path = self._make_save_path()
plt.savefig(path, bbox_inches='tight')
def plot_vs_freq(self):
"""Plot the data vs frequency
"""
self.vs_frequency = True
return
def plot_vs_wavelength(self):
"""Plot the data vs wavelength
"""
self.vs_frequency = False
return
def revert_data(self):
"""Resets the data to its original, 'as-loaded' form
"""
self.data = self.raw_data
return
def set_title(self, title):
"""Set the plot title
Arguments
---------
title : string
The title of the plot
"""
self.title = title
return
def set_xlabel(self, xlabel=None):
"""Set the x-axis label
Arguments
---------
xlabel : string, optional
The label for the x-axis. Defaults to `None`. If `None`, x-axis
label is chosen based on the x-axis units
"""
if xlabel is None:
if self.vs_frequency:
self.xlabel = r'$\nu$'+' [{}]'.format(self.frequency_units)
else:
self.xlabel = r'$\lambda$'+' [{}]'.format(self.wavelength_units)
else:
self.xlabel = xlabel
return
def set_ylabel(self, ylabel):
"""Set the y-axis label
Arguments
---------
ylabel : string
The label for the y-axis
"""
self.ylabel = ylabel
return
def show_attributes(self):
"""Convenience function to display all the attributes of the plot
"""
print('The plot attributes are:\n')
pprint.pprint(vars(self))
return
def toggle_bandpasses(self):
"""Toggles the value of ``draw_bandpasses`` attribute between
`False` and `True`. If set to `False` bandpasses will be ignored. If
`True`, bandpasses will be drawn on the plot.
"""
if type(self.draw_bandpasses) == type(True):
if self.draw_bandpasses:
self.draw_bandpasses = False
elif not self.draw_bandpasses:
self.draw_bandpasses = True
else:
raise TypeError("'draw_bandpasses' must be boolean")
return
def toggle_legend(self):
"""Toggles the value of ``draw_legend`` attribute between `False` and
`True`. If set to `False` the legend will be ignored. If `True`,
the legend will be drawn on the plot.
"""
if type(self.draw_legend) == type(True):
if self.draw_legend:
self.draw_legend = False
elif not self.draw_legend:
self.draw_legend = True
else:
raise TypeError("'draw_legend' must be boolean")
return
class ReflectionPlot(Plot):
"""Contains elements needed for a reflection plot
"""
def __init__(self):
Plot.__init__(self) # Inherit attributes from generic 'Plot' class
self.title = 'Reflection plot'
self.type = 'Reflection'
self.ylabel = 'Reflection'
class TransmissionPlot(Plot):
"""Contains elements needed for a transmission plot
"""
def __init__(self):
Plot.__init__(self) # Inherit attributes from generic 'Plot' class
self.title = 'Transmission plot'
self.type = 'Transmission'
self.ylabel = 'Transmission'
class MCPlot(Plot):
"""Contains elements needed for a Monte Carlo plot
"""
def __init__(self):
Plot.__init__(self) # Inherit attributes from generic 'Plot' class
self.title = 'MCMC plot'
self.type = 'MCMC'
| true |
82e0f1f8a99fd3b4f5a514b1653ac8663b6dadc2 | Python | rigogsilva/sqldf | /sqldf/test/test_sqldf.py | UTF-8 | 1,653 | 3.265625 | 3 | [] | no_license | from sqldf import sqldf
# RAW DataFrame
inventory = [{'item': 'Banana', 'quantity': 33}, {'item': 'Apple', 'quantity': 2}]
orders = [{'order_number': 1, 'item': 'Banana', 'quantity': 10}, {'order_number': 2, 'item': 'Apple', 'quantity': 10}]
# To select data from a DataFrame and also register a table in memory do the following:
print('Inventory:')
inventory_pyspark_df = sqldf.sql(
"""
SELECT item,
quantity AS quantity_available
FROM inventory_table
""",
inventory,
table='inventory_table')
inventory_pyspark_df.show()
print('Orders:')
orders_pyspark_df = sqldf.sql(
"""
SELECT order_number,
item,
quantity AS quantity_ordered
FROM order_table
""",
orders,
table='order_table')
orders_pyspark_df.show()
# Since the table has been specified above, the table will be saved in memory.
# The next time you want to select data from the table jut do the following:
# Get inventory below quantity of 10 so we can order more of these items.
print('Items low in quantity:')
inventory_low = sqldf.sql(
"""SELECT item,
quantity AS quantity_low
FROM inventory_table
WHERE quantity < {{ quantity }}
""",
quantity=10)
inventory_low.show()
# Ge the orders that will be able to be fullfiled.
# Note that since we already registered these tables, we don’t need to specify the able again.
# You can specify the table name if you want to use that later in another query.
print('Orders with inventory: ')
orders_with_inventory = sqldf.sql(
"""
SELECT ot.*
FROM inventory_table it
JOIN order_table ot
ON it.item = ot.item
WHERE it.quantity >= ot.quantity
"""
)
orders_with_inventory.show()
| true |
ab55b3c072bee04479f62fa7c604bd2b8ea8afb8 | Python | jaz-programming/python-tutorial-gaming-1 | /pokerdice.py | UTF-8 | 1,551 | 3.546875 | 4 | [] | no_license | #!/usr/bin/python2.7
#pokerdice.py
import random
from itertools import groupby
nine = 1
ten = 2
jack = 3
queen = 4
king = 5
ace = 6
names = { nine: "9", ten: "10", jack: "J", queen : "Q", king = "K", ace = "A" }
player_score = 0
computer_score = 0
def start():
print "Let's play a game of Poker Dice."
while game():
pass
scores()
def game():
print "The computer will help you throw your five dice."
throws()
return play_again()
def throws():
roll_number = 5
dice = roll(roll_number)
dice.sort()
for i in range(len(dice)):
print "Dice ", i + 1, ": ", names[dice[i]]
result = hand(dice)
print "You currently have", result
while True:
rerolls = raw_input("How many dice do you want to throw again?")
try:
if rerolls in (1, 2, 3, 4, 5):
break
except ValueError:
pass
print "That wasn't a valid answer. Please enter 1, 2, 3, 4 or 5."
if rerolls == 0:
print "You finish with ", result
else:
roll_number = rerolls
dice_rerolls = roll(roll_number)
dice_changes = range(rerolls)
print "Enter the number of a dice to reroll: "
iterations = 0
while iterations < rerolls:
iterations += 1
while True:
selection = raw_input("")
try:
if selection in (1, 2, 3, 4, 5):
break
except ValueError:
pass
print "That wasn't a valid answer. Please enter 1, 2, 3, 4 or 5."
dice_changes[iterations-1] = selection-1
print "You have changed dice ", selection
iterations = 0
while iterations < rerolls:
iterations += 1
replacement = dice
| true |
acaa43f322bb6bc89477e8f5a69119a42354c3c5 | Python | filipepcampos/pokemon-xml-data | /moves.py | UTF-8 | 1,361 | 2.984375 | 3 | [] | no_license | from config import *
from dict2xml import dict2xml
import requests
def parseSingleMove(data):
url = data['url']
r = requests.get(url)
data = r.json()
moveId = int(data["id"])
dataDict = {}
dataDict["accuracy"] = data["accuracy"] if data["accuracy"] != None else 0
dataDict["power"] = data["power"] if data["power"] != None else 0
dataDict["pp"] = data["pp"] if data["pp"] != None else 0
# TODO: This is stupid, there's no text for versions below gold-silver
ver = VERSION if VERSION not in ['red-blue', 'yellow'] else 'gold-silver'
dataDict["description"] = [i for i in data["flavor_text_entries"] if i["language"]["name"] == LANGUAGE and i["version_group"]["name"] == ver][0]["flavor_text"]
dataDict["name"] = [i["name"] for i in data["names"] if i["language"]["name"] == LANGUAGE][0]
return moveId, dataDict
def parseMoves(data):
totalN = len(data)
print(f"Reading Moves ({totalN} moves in total)")
N = 0
dataDict = {}
for moveData in data:
i, j = parseSingleMove(moveData)
dataDict["_" + str(i)] = j
if(N % 10 == 0):
print(f" {N}/{totalN}")
N += 1
print("Writing Moves to XML")
outDict = {}
outDict["moves"] = dataDict
xml = dict2xml(outDict)
with open("moves.xml", "w+") as file:
file.write(xml) | true |
bc8b555f44a667ceb1d95991a5109dfe514f2417 | Python | NAV-2020/nichipurenko | /Lesson_16_DZ_Nichipurenko_A.V/Lesson_16_DZ_3_Nichipurenko_A.V.py | UTF-8 | 7,126 | 3.546875 | 4 | [] | no_license | """
Создайте программу «Фирма». Нужно хранить информацию о человеке: ФИО, телефон, рабочий email,
название должности, номер кабинета, skype. Требуется
реализовать возможность добавления, удаления, поиска, замены данных. Используйте словарь для хранения
информации.
"""
import pprint
def get_company_employee(company_employee: list) -> list:
return company_employee
def print_result(*args) -> None:
for element in args:
#print(element)
pprint.pprint(element)
input('Press to continue...')
def add_company_employee(surname_name_middle_name: str, telephone: str, emai_l: str,
post: str, room_number: str, skype: str) -> dict:
global COMPANY_EMPLOYEE
company_employee = {
"Surname, name, middle name": surname_name_middle_name,
"Telephone": telephone,
"Email": emai_l,
"Post": post,
"Room number": room_number,
"Skype": skype
}
COMPANY_EMPLOYEE.append(company_employee)
return company_employee
def del_company_employee(surname_name_middle_name: str) -> dict:
global COMPANY_EMPLOYEE
deleted_company_employee = {}
for index, company_employee in enumerate(COMPANY_EMPLOYEE):
if company_employee['Surname, name, middle name'] == surname_name_middle_name:
deleted_company_employee = company_employee
del(COMPANY_EMPLOYEE[index])
return deleted_company_employee
def search_company_employee(surname_name_middle_name: str) -> dict:
global COMPANY_EMPLOYEE
for company_employee in COMPANY_EMPLOYEE:
if company_employee['Surname, name, middle name'] == surname_name_middle_name:
return company_employee
return f"The employee in the list {surname_name_middle_name} does not exist\n"
def update_company_employee(surname_name_middle_name: str) -> dict:
global COMPANY_EMPLOYEE
for index, company_employee in enumerate(COMPANY_EMPLOYEE):
if company_employee['Surname, name, middle name'] == surname_name_middle_name:
surname_name_middle_name = company_employee["Surname, name, middle name"] # фамилие, имя, отчество
telephone = company_employee["Telephone"] # телефон
emai_l = company_employee["Email"] # email
post = company_employee["Post"] # должность
room_number = company_employee["Room number"] # номер кабинета
skype = company_employee["Skype"] # skype
new_surname_name_middle_name = input(f"Enter telephone ({surname_name_middle_name} - default): ")
new_telephone = input(f"Enter telephone ({telephone} - default): ")
new_emai_l = input(f"Enter email ({emai_l} - default): ")
new_post = input(f"Enter post ({post} - default): ")
new_room_number = input(f"Enter room number ({room_number} - default): ")
new_skype = input(f"Enter skype ({skype} - default): ")
if new_surname_name_middle_name:
company_employee["Surname, name, middle name"] = new_surname_name_middle_name
if new_telephone:
company_employee["Telephone"] = new_telephone
if new_emai_l:
company_employee["Email"] = new_emai_l
if new_post:
company_employee["Post"] = new_post
if new_room_number:
company_employee["Room number"] = new_room_number
if new_skype:
company_employee["Skype"] = new_skype
return company_employee
if __name__ == "__main__":
COMPANY_EMPLOYEE_LIST = 'list' # список баскетболистов
ADD_COMPANY_EMPLOYEE = 'add' # добавить
DEL_COMPANY_EMPLOYEE = 'delete' # удалить
UPDATE_COMPANY_EMPLOYEE = 'update' # обновить
SEARCH_COMPANY_EMPLOYEE = 'search' # поиск
EXIT = 'q' # выход
COMPANY_EMPLOYEE = []
print("""
Создайте программу «Фирма». Нужно хранить информацию о
человеке: ФИО, телефон, рабочий email, название должности,
номер кабинета, skype. Требуется реализовать возможность
добавления, удаления, поиска, замены данных. Используйте
словарь для хранения информации.
"""
)
while True:
print(f'''
Choices:
COMPANY_EMPLOYEE_LIST -> {COMPANY_EMPLOYEE_LIST}
ADD_COMPANY_EMPLOYEE -> {ADD_COMPANY_EMPLOYEE}
DEL_COMPANY_EMPLOYEE -> {DEL_COMPANY_EMPLOYEE}
UPDATE_COMPANY_EMPLOYEE -> {UPDATE_COMPANY_EMPLOYEE}
SEARCH_COMPANY_EMPLOYEE -> {SEARCH_COMPANY_EMPLOYEE}
EXIT -> {EXIT}
---------------------
''')
choice = input('Enter choice: ')
if choice == EXIT:
break
elif choice == COMPANY_EMPLOYEE_LIST:
company_employee = get_company_employee(COMPANY_EMPLOYEE)
print_result(company_employee)
elif choice == ADD_COMPANY_EMPLOYEE:
surname_name_middle_name = input('Enter surname, name, middle name: ') # фамилие, имя, отчество
telephone = input('Enter telephone: ') # телефон
emai_l = input('Enter email: ') # email
post = input('Enter post: ') # должность
room_number = input('Enter room number: ') # номер кабинета
skype = input('Enter skype: ') # skype
company_employee = add_company_employee(
surname_name_middle_name = surname_name_middle_name,
telephone = telephone,
emai_l = emai_l,
post = post,
room_number = room_number,
skype = skype
)
print_result(company_employee)
elif choice == DEL_COMPANY_EMPLOYEE:
surname = input("Enter surname, name, middle name: ")
company_employee = del_company_employee(surname_name_middle_name = surname_name_middle_name)
print_result(company_employee)
elif choice == SEARCH_COMPANY_EMPLOYEE:
surname_name_middle_name = input('Enter surname, name, middle name: ')
company_employee = search_company_employee(surname_name_middle_name = surname_name_middle_name)
print_result(company_employee)
elif choice == UPDATE_COMPANY_EMPLOYEE:
surname_name_middle_name = input("Enter surname, name, middle name: ")
company_employee = update_company_employee(surname_name_middle_name = surname_name_middle_name)
if company_employee != None:
print_result(company_employee) | true |
213ae5d280e7a17a06b8388cd98714b4eb37ceee | Python | shikhar-srivastava/Optimizing-deep-neural-networks | /graph code/helper_code/roc_curve.py | UTF-8 | 3,543 | 2.515625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc,roc_auc_score,f1_score,accuracy_score
from scipy import interp
# false_positive_rate
# true_positive_rate
fpredicted_svm = open("C:\Users\MAHE\Desktop\Programs\Python\predicted_smoteSVM.csv")
flables_svm= open("C:\Users\MAHE\Desktop\Programs\Python\labels_smoteSVM.csv")
fpredicted_rf = open("C:\Users\MAHE\Desktop\Programs\Python\predicted_smoteRandomForest.csv")
flables_rf= open("C:\Users\MAHE\Desktop\Programs\Python\labels_smoteRandomForest.csv")
y_test_svm= np.loadtxt(fname = flables_svm, delimiter = ',',dtype='double').astype(int)
y_score_svm= np.loadtxt(fname = fpredicted_svm, delimiter = ',',dtype='double').astype(int)
n_values_svm = np.max(y_test_svm) + 1
y_test_svm=np.eye(n_values_svm)[y_test_svm]
n_values_svm=np.max(y_score_svm)+1
y_score_svm=np.eye(n_values_svm)[y_score_svm]
y_test_rf= np.loadtxt(fname = flables_rf, delimiter = ',',dtype='double').astype(int)
y_score_rf= np.loadtxt(fname = fpredicted_rf, delimiter = ',',dtype='double').astype(int)
n_values_rf = np.max(y_test_rf) + 1
y_test_rf=np.eye(n_values_rf)[y_test_rf]
n_values_rf=np.max(y_score_rf)+1
y_score_rf=np.eye(n_values_rf)[y_score_rf]
print 'y_predicted (Random Forest): ',y_score_rf
print 'y_test: (Random Forest): ',y_test_rf
print 'y_predicted (SVM): ',y_score_svm
print 'y_test: (SVM): ',y_test_svm
n_classes=2
fpr_svm = dict()
tpr_svm = dict()
roc_auc_svm = dict()
fpr_rf = dict()
tpr_rf = dict()
roc_auc_rf = dict()
for i in range(n_classes):
fpr_rf[i], tpr_rf[i], _ = roc_curve(y_test_rf[:, i], y_score_rf[:, i])
roc_auc_rf[i] = auc(fpr_rf[i],tpr_rf[i])
fpr_svm[i], tpr_svm[i], _ = roc_curve(y_test_svm[:, i], y_score_svm[:, i])
roc_auc_svm[i] = auc(fpr_svm[i],tpr_svm[i])
# Compute micro-average ROC curve and ROC area
fpr_rf["micro"], tpr_rf["micro"], _ = roc_curve(y_test_rf.ravel(), y_score_rf.ravel())
roc_auc_rf["micro"] = auc(fpr_rf["micro"], tpr_rf["micro"])
fpr_svm["micro"], tpr_svm["micro"], _ = roc_curve(y_test_svm.ravel(), y_score_svm.ravel())
roc_auc_svm["micro"] = auc(fpr_svm["micro"], tpr_svm["micro"])
# First aggregate all false positive rates
"""all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
"""
# Plot all ROC curves
plt.figure(figsize=(10, 9))
plt.plot(fpr_rf["micro"], tpr_rf["micro"],
label='Random Forest: AUC = {0:0.2f}'
''.format(roc_auc_rf["micro"]),
linewidth=3)
plt.plot(fpr_svm["micro"], tpr_svm["micro"],
label='Support Vector Machine: AUC = {0:0.2f}'
''.format(roc_auc_svm["micro"]),
linewidth=3)
"""plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)"""
# plt.text(0.9,0.5, ('F1 Score: %.2f' % score).lstrip('0'),
# size=15, horizontalalignment='right')
"""for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
"""
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('FPR (1- Specificity)')
plt.ylabel('TPR (Sensitivity)')
# plt.title('ROC Curve (Models on PCA)')
plt.legend(loc="lower right")
plt.show() | true |
de1070a7b109471af788cc34aefbae84c2ff7efb | Python | chiffa/Chiffa_Area51 | /git_auto_update.py | UTF-8 | 1,310 | 2.515625 | 3 | [] | no_license | __author__ = 'Andrei'
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler, FileSystemEventHandler
from datetime import datetime
import subprocess
from time import sleep
class MyEventHandler(FileSystemEventHandler):
def on_any_event(self, event):
if not '~' in event.src_path:
message = "\"%s: %s %s\"" % (datetime.now(), event.event_type, event.src_path)
print message
bash_command = "git commit -m %s" % message
print bash_command
subprocess.Popen('git add . --ignore-removal', cwd=path)
sleep(5)
subprocess.Popen(bash_command, cwd=path)
sleep(5)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = sys.argv[1] if len(sys.argv) > 1 else '.'
# path = 'C:\\Users\\Andrei\\Desktop\\terrible_git\\Myfolder'
event_handler = MyEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() | true |
436c697eb9c3c6c54284f3427cc6fc679ace0f33 | Python | chenguosen/AspBac | /aspirelibs/MySQLLibs2.py | UTF-8 | 5,136 | 2.640625 | 3 | [] | no_license | '''
Created on 2020年5月19日
@author: xiecs
'''
import pymysql
from dbutils.pooled_db import PooledDB
class PooledMySQL(object):
__pool = None
__conn_params = {}
def __init__(self, connstr):
params = connstr.split(',')
for i in params:
kv = i.split('=')
self.__conn_params[kv[0]] = kv[1]
self.__conn_params['port']=int(self.__conn_params.get('port'))
self.getconn()
def __enter__(self):
self.conn = self.__getconn()
def __getconn(self):
if self.__pool is None:
self.__pool = PooledDB(
creator=pymysql,
mincached=0,
maxcached=3,
maxshared=5,
maxconnections=5,
blocking=True,
maxusage=0,
setsession=None,
host=self.__conn_params.get('host'),
port=self.__conn_params.get('port'),
user=self.__conn_params.get('user'),
passwd=self.__conn_params.get('passwd'),
db=self.__conn_params.get('db'),
charset=self.__conn_params.get('charset')
)
return self.__pool.connection()
def getconn(self):
conn = self.__getconn()
cursor = conn.cursor()
return cursor, conn
# def __exit__(self, exc_type, exc_val, exc_tb):
# self.cursor.close()
# self.conn.close()
class MySQLHelper(object):
'''
classdocs
'''
# __conn = None
# __cursor = None
def __init__(self, connstr):
'''
Constructor
'''
self.db = PooledMySQL(connstr)
# def __new__(cls, *args, **kwargs):
# if not hasattr(cls, 'inst'): # 单例
# cls.inst = super(MySQLHelper, cls).__new__(cls, *args, **kwargs)
# return cls.inst
#
def close(self, cursor, conn):
if cursor is not None:
conn.close()
if conn is not None:
conn.close()
def execute(self, sql, param=None, autoclose=False):
cursor, conn = self.db.getconn()
count = 0
try:
if param:
count = cursor.execute(sql, param)
else:
count = cursor.execute(sql)
conn.commit()
if autoclose:
self.close(cursor, conn)
except Exception as e:
print("error_msg:", e.args)
return cursor, conn, count
def selectone(self, sql, param=None):
try:
cursor, conn, count = self.execute(sql, param)
res = cursor.fetchone()
self.close(cursor, conn)
return res
except Exception as e:
print("error_msg:", e.args)
self.close(cursor, conn)
return count
def select(self, sql, param=None):
try:
cursor, conn, count = self.execute(sql, param)
res = cursor.fetchall()
self.close(cursor, conn)
return res
except Exception as e:
print("error_msg:", e.args)
self.close(cursor, conn)
return count
def insertone(self, sql, param):
try:
cursor, conn, count = self.execute(sql, param)
conn.commit()
self.close(cursor, conn)
return count
except Exception as e:
print(e)
conn.rollback()
self.close(cursor, conn)
return count
def insertmany(self, sql, param):
'''
:param sql:
:param param: 必须是元组或列表[(),()]或((),())
:return:
'''
cursor, conn, count = self.db.getconn()
try:
cursor.executemany(sql, param)
conn.commit()
return count
except Exception as e:
print(e)
conn.rollback()
self.close(cursor, conn)
return count
def delete(self, sql, param=None):
try:
cursor, conn, count = self.execute(sql, param)
self.close(cursor, conn)
return count
except Exception as e:
print(e)
conn.rollback()
self.close(cursor, conn)
return count
def update(self, sql, param=None):
try:
cursor, conn, count = self.execute(sql, param)
conn.commit()
self.close(cursor, conn)
return count
except Exception as e:
print(e)
conn.rollback()
self.close(cursor, conn)
return count
def main():
print("test db")
db=MySQLHelper("host=10.12.3.235,port=3306,user=tstfabric1,passwd=tstfabric1,db=db_tstfabric1,charset=utf8")
rec = db.select("SELECT * FROM t_transaction WHERE tid = '10010258-20200410132322-29998068';")
print(rec)
if __name__ == '__main__':
main()
| true |
4896f0868779256d995fe64be26bdaaaffcb09b5 | Python | Artembbk/articlesReaderTelegramBot | /main.py | UTF-8 | 5,064 | 2.65625 | 3 | [] | no_license | import telebot
from urllib.parse import urlparse
import requests
import validators
from Voicer import MeduzaVoicer
TOKEN = "TOKEN"
outputFile = "audio.opus"
folderId = "folderId"
supportedSites = ["meduza.io"]
OK_RESPONSE_CODE = 200
START_M = """
Привет! Пришли мне ссылку на любую (почти) статью с сайта meduza.io и я верну тебе озвученную версию статьи\n
ВАЖНО!!!\n
Ссылка должна быть ПОЛНОЙ\n
Например такая подойдет:
https://meduza.io/news/2021/07/16/nayden-propavshiy-pod-tomskom-an-28-on-sovershil-zhestkuyu-posadku-passazhiry-zhivy\n
А такая нет:
meduza.io/news/2021/07/16/nayden-propavshiy-pod-tomskom-an-28-on-sovershil-zhestkuyu-posadku-passazhiry-zhivy\n
/help
"""
HELP_M = """
Заходи на сайт meduza.io, выбирай любую (почти) статью и пришли мне ссылку на нее\n
ВАЖНО!!!\n
Ссылка должна быть ПОЛНОЙ\n
Например такая подойдет:
https://meduza.io/news/2021/07/16/nayden-propavshiy-pod-tomskom-an-28-on-sovershil-zhestkuyu-posadku-passazhiry-zhivy\n
А такая нет:
meduza.io/news/2021/07/16/nayden-propavshiy-pod-tomskom-an-28-on-sovershil-zhestkuyu-posadku-passazhiry-zhivy\n
/help"""
IS_NOT_URL_M = (
"Мне нужна полная ссылка, а ты либо прислал не полную, либо вообще что то странное \n"
"/help"
)
RESPONSE_INVALID_M = (
"Вроде как ссылка ок, но почему то сайт по ней не отвечает. "
"Проверь еще раз ссылку или пришли другую. \n"
"/help"
)
IS_NOT_SUPPORTED_SITE_M = "Я только с сайтом meduza.io работаю.\n" "/help"
IS_NOT_SUPPORTED_PAGE_TYPE = (
"Да, это сайт meduza.io, но с таким я не умею работать.\n"
"Либо ты вообще не статью прислал, "
"либо с таким видом статей я еще не научился работать( \n"
"/help"
)
UNEXPECTED_ERROR_M = "Что то пошло не так\n" "/help"
def isUrl(url):
return validators.url(url)
def isValidResponse(url):
return requests.get(url).status_code == OK_RESPONSE_CODE
def isSupportedSite(url):
return urlparse(url)[1] in supportedSites
def getSiteName(url):
return urlparse(url)[1]
class NotSupportedSiteError(Exception):
def __init__(self, url, message="this site is not supported"):
self.url = url
self.message = message
super().__init__(self.message)
class NotValidResponseError(Exception):
def __init__(self, url, message="this site is not responding correctly"):
self.url = url
self.message = message
super().__init__(self.message)
class NotUrlError(Exception):
def __init__(self, url, message="this is not a url"):
self.url = url
self.message = message
super().__init__(self.message)
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=["start"])
def send_welcome(m):
bot.send_message(m.chat.id, START_M)
@bot.message_handler(commands=["help"])
def send_help(m):
bot.send_message(m.chat.id, HELP_M)
@bot.message_handler(content_types=["text"])
def send_voiced_article(m):
print("---------------")
print(m.text)
try:
if not isUrl(m.text):
raise NotUrlError(m.text)
elif not isSupportedSite(m.text):
raise NotSupportedSiteError(m.text)
elif not isValidResponse(m.text):
raise NotValidResponseError(m.text)
else:
if getSiteName(m.text) == "meduza.io":
bot.send_message(
m.chat.id,
"Если статья большая, то процесс может затянуться до 5 минут",
)
meduza_voicer = MeduzaVoicer(outputFile, folderId, m.text)
meduza_voicer()
voice = open("audio.opus", "rb")
bot.send_voice(m.chat.id, voice)
print("OK")
except NotUrlError:
bot.send_message(m.chat.id, IS_NOT_URL_M)
print(IS_NOT_URL_M)
except NotSupportedSiteError:
bot.send_message(m.chat.id, IS_NOT_SUPPORTED_SITE_M)
print(IS_NOT_SUPPORTED_SITE_M)
except NotValidResponseError:
bot.send_message(m.chat.id, RESPONSE_INVALID_M)
print(RESPONSE_INVALID_M)
except MeduzaVoicer.NotSupportedPageTypeError:
bot.send_message(m.chat.id, IS_NOT_SUPPORTED_PAGE_TYPE)
print(IS_NOT_SUPPORTED_PAGE_TYPE)
except Exception as e:
print(e)
bot.send_message(m.chat.id, UNEXPECTED_ERROR_M)
print(UNEXPECTED_ERROR_M)
bot.polling()
| true |
2b3c9cf635e4e0b1d709e60067f249a270a62956 | Python | anilpai/leetcode | /Strings/PossibleStrings.py | UTF-8 | 1,158 | 3.5625 | 4 | [
"MIT"
] | permissive | class Solution(object):
def printAllStringsK(self, s, prefix, n, k):
'''
Permutation of a String : print all possible combinations.
'''
if k == 0:
print(prefix)
return
for i in range(n):
self.printAllStringsK(s, prefix + s[i], n, k-1)
def printUniqueCombinations(self, s, partial=[]):
'''
Print unique combinations.
'''
print(''.join(partial))
for i in range(len(s)):
left = s[i]
right = s[i + 1:]
self.printUniqueCombinations(right, partial + [left])
def permute(self, a, l, r):
if l == r:
print(''.join(a))
else:
for i in range(l, r+1):
a[l], a[i] = a[i], a[l]
self.permute(a, l+1, r)
a[l], a[i] = a[i], a[l]
if __name__=='__main__':
solution = Solution()
s = 'abcd'
k = 3
solution.printAllStringsK(s, '', len(s), k)
solution.printUniqueCombinations(s)
a = list(s)
solution.permute(a, 0, len(a)-1)
s = '+-'
k = 3
solution.printAllStringsK(s, '', len(s), k) | true |
c202756c577073e799a0ffea5e227d002d0c8726 | Python | RYO515/test | /scp_ing_pra/chap4/scp_chap4-22.py | UTF-8 | 360 | 2.890625 | 3 | [] | no_license | import pandas as pd
import folium
df = pd.read_csv("store.csv")
# print(len(df))
# print(df.columns.values)
store = df[["緯度", "経度", "店舗名(日本語)"]].values
m = folium.Map(location=[35.942957, 136.198863], zoom_start=16)
for data in store:
folium.Marker([data[0], data[1]], tooltip=data[2], zoom_start=16).add_to(m)
m.save("store.html")
| true |
5c63d0f5e2ad4aa6f5530662520c2545d71b27e4 | Python | imazerty/TelecomParistech | /INF344 Données du web/TP Philosophie/philosophie/getpage.py | UTF-8 | 2,266 | 3.0625 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Ne pas se soucier de ces imports
import setpath
from bs4 import BeautifulSoup
from json import loads
from urllib.request import urlopen
from urllib.parse import urlencode
from pprint import pprint
from urllib.parse import unquote
from urllib.parse import urldefrag
# Si vous écrivez des fonctions en plus, faites-le ici
# virer "API_"
def getJSON(page):
params = urlencode({
'format': 'json', # TODO: compléter ceci
'action': 'parse', # TODO: compléter ceci
'prop': 'text', # TODO: compléter ceci
'redirects' : "true",
'page': page})
API = "https://fr.wikipedia.org/w/api.php" # TODO: changer ceci
response = urlopen(API + "?" + params)
return response.read().decode('utf-8')
def getRawPage(page):
parsed = loads(getJSON(page))
try:
title = parsed["parse"]["title"] # TODO: remplacer ceci
content = parsed["parse"]["text"]["*"] # TODO: remplacer ceci
return title, content
except KeyError:
# La page demandée n'existe pas
return None, None
def getPage(page):
page = page.replace(" ", "_")
try:
title, json = getRawPage(page)
soup = BeautifulSoup(json, 'html.parser')
except:
return ("", [])
liste_p = soup.find_all("p", recursive=False)
liste_a=[]
for item in liste_p:
item.find_all("a", href=True)
liste_a += [elem for elem in item.find_all("a", href=True)]
new_list = []
for item in liste_a:
try:
if item["href"].split("/")[1]=="wiki":
elemt = unquote(urldefrag(item["href"].split("/")[2])[0]).replace("_", " ")
if elemt not in new_list:
if ":" not in elemt:
if "API" not in elemt:
new_list.append(elemt)
except:
continue
return title, new_list[:10] # TODO: écrire ceci
if __name__ == '__main__':
# Ce code est exécuté lorsque l'on exécute le fichier
print("Ça fonctionne !")
# Voici des idées pour tester vos fonctions :
print(getPage("Geoffrey_Midddller"))
# print(getRawPage("Utilisateur:A3nm/INF344"))
# print(getRawPage("Histoire"))
| true |
687c19e0e86641e76901f956a4bf55ccaa5452a5 | Python | InsomniaGoku/-silentcrusader | /option_model.py | UTF-8 | 28,152 | 2.796875 | 3 | [] | no_license | from math import log, e
# modified from 3rd party source, added some functions, need further improvement.
try:
from scipy.stats import norm
except ImportError:
print('models require scipy to work properly')
def implied_volatility( model, args, CallPrice=None, PutPrice=None, high=500.0, low=0.0 ):
'''Returns the estimated implied volatility'''
target = 10
if CallPrice:
target = CallPrice
if PutPrice:
target = PutPrice
# accuracy
epsilon = 0.005
decimals = 2
for i in range( 10000 ): # To avoid infinite loops
mid = (high + low) / 2
if mid < 0.00001:
mid = 0.00001
if CallPrice:
estimate = eval( model )( args, volatility=mid, performance=True ).CallPrice
if PutPrice:
estimate = eval( model )( args, volatility=mid, performance=True ).PutPrice
if abs( round( estimate, decimals ) - target ) <= epsilon:
break
elif estimate > target:
high = mid
elif estimate < target:
low = mid
return mid
class BS:
'''Black-Scholes
Used for pricing European options on stocks without dividends
b_s([underlyingPrice, strikePrice, interestRate, daysToExpiration], \
volatility=x, CallPrice=y, PutPrice=z)
eg:
c = b_s([1.4565, 1.45, 1, 30], volatility=20)
c.CallPrice # Returns the Call price
c.PutPrice # Returns the Put price
c.CallDelta # Returns the Call delta
c.PutDelta # Returns the Put delta
c.CallDelta2 # Returns the Call dual delta
c.PutDelta2 # Returns the Put dual delta
c.CallTheta # Returns the Call theta
c.PutTheta # Returns the Put theta
c.CallRho # Returns the Call rho
c.PutRho # Returns the Put rho
c.vega # Returns the option vega
c.gamma # Returns the option gamma
c = b_s([1.4565, 1.45, 1, 30], CallPrice=0.0359)
c.impliedVolatility # Returns the implied volatility from the Call price
c = b_s([1.4565, 1.45, 1, 30], PutPrice=0.0306)
c.impliedVolatility # Returns the implied volatility from the Put price
c = b_s([1.4565, 1.45, 1, 30], CallPrice=0.0359, PutPrice=0.0306)
c.PutCallParity # Returns the Put-Call parity
'''
def __init__( self, args, volatility=None, CallPrice=None, PutPrice=None, \
performance=None ):
self.underlyingPrice = float( args[ 0 ] )
self.strikePrice = float( args[ 1 ] )
self.interestRate = float( args[ 2 ] ) / 100
self.daysToExpiration = float( args[ 3 ] ) / 365
for i in [ 'CallPrice', 'PutPrice', 'CallDelta', 'PutDelta', \
'CallDelta2', 'PutDelta2', 'CallTheta', 'PutTheta', \
'CallRho', 'PutRho', 'vega', 'gamma', 'impliedVolatility', \
'PutCallParity','UpdateTime' ]:
self.__dict__[ i ] = None
self.arbitrage_series = []
self.position = 0
self.theo_adjustment = 0
self.theo_adjustment_step = -0.015
self.position_limit = 3
self.maxposition = self.position_limit
self.minposition = -self.position_limit
self.prev_parity = 0
self.Returns = 0
self.cumulative_Returns = 0
if volatility:
self.volatility = float( volatility ) / 100
self._a_ = self.volatility * self.daysToExpiration ** 0.5
self._d1_ = (log( self.underlyingPrice / self.strikePrice ) + \
(self.interestRate + (self.volatility ** 2) / 2) * \
self.daysToExpiration) / self._a_
self._d2_ = self._d1_ - self._a_
if performance:
[ self.CallPrice, self.PutPrice ] = self._price( )
else:
[ self.CallPrice, self.PutPrice ] = self._price( )
[ self.CallDelta, self.PutDelta ] = self._delta( )
[ self.CallDelta2, self.PutDelta2 ] = self._delta2( )
[ self.CallTheta, self.PutTheta ] = self._theta( )
[ self.CallRho, self.PutRho ] = self._rho( )
self.vega = self._vega( )
self.gamma = self._gamma( )
self.exerciceProbability = norm.cdf( self._d2_ )
if CallPrice:
self.CallPrice = round( float( CallPrice ), 6 )
self.impliedVolatility = implied_volatility( \
self.__class__.__name__, args, CallPrice=self.CallPrice )
if PutPrice and not CallPrice:
self.PutPrice = round( float( PutPrice ), 6 )
self.impliedVolatility = implied_volatility( \
self.__class__.__name__, args, PutPrice=self.PutPrice )
if CallPrice and PutPrice:
self.CallPrice = float( CallPrice )
self.PutPrice = float( PutPrice )
self.PutCallParity = self._parity( )
def _price( self ):
'''Returns the option price: [Call price, Put price]'''
if self.volatility == 0 or self.daysToExpiration == 0:
Call = max( 0.0, self.underlyingPrice - self.strikePrice )
Put = max( 0.0, self.strikePrice - self.underlyingPrice )
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
Call = self.underlyingPrice * norm.cdf( self._d1_ ) - \
self.strikePrice * e ** (-self.interestRate * \
self.daysToExpiration) * norm.cdf( self._d2_ )
Put = self.strikePrice * e ** (-self.interestRate * \
self.daysToExpiration) * norm.cdf( -self._d2_ ) - \
self.underlyingPrice * norm.cdf( -self._d1_ )
return [ Call, Put ]
def _delta( self ):
'''Returns the option delta: [Call delta, Put delta]'''
if self.volatility == 0 or self.daysToExpiration == 0:
Call = 1.0 if self.underlyingPrice > self.strikePrice else 0.0
Put = -1.0 if self.underlyingPrice < self.strikePrice else 0.0
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
Call = norm.cdf( self._d1_ )
Put = -norm.cdf( -self._d1_ )
return [ Call, Put ]
def _delta2( self ):
'''Returns the dual delta: [Call dual delta, Put dual delta]'''
if self.volatility == 0 or self.daysToExpiration == 0:
Call = -1.0 if self.underlyingPrice > self.strikePrice else 0.0
Put = 1.0 if self.underlyingPrice < self.strikePrice else 0.0
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
_b_ = e ** -(self.interestRate * self.daysToExpiration)
Call = -norm.cdf( self._d2_ ) * _b_
Put = norm.cdf( -self._d2_ ) * _b_
return [ Call, Put ]
def _vega( self ):
'''Returns the option vega'''
if self.volatility == 0 or self.daysToExpiration == 0:
return 0.0
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
return self.underlyingPrice * norm.pdf( self._d1_ ) * \
self.daysToExpiration ** 0.5 / 100
def _theta( self ):
'''Returns the option theta: [Call theta, Put theta]'''
_b_ = e ** -(self.interestRate * self.daysToExpiration)
Call = -self.underlyingPrice * norm.pdf( self._d1_ ) * self.volatility / \
(2 * self.daysToExpiration ** 0.5) - self.interestRate * \
self.strikePrice * _b_ * norm.cdf( self._d2_ )
Put = -self.underlyingPrice * norm.pdf( self._d1_ ) * self.volatility / \
(2 * self.daysToExpiration ** 0.5) + self.interestRate * \
self.strikePrice * _b_ * norm.cdf( -self._d2_ )
return [ Call / 365, Put / 365 ]
def _rho( self ):
'''Returns the option rho: [Call rho, Put rho]'''
_b_ = e ** -(self.interestRate * self.daysToExpiration)
Call = self.strikePrice * self.daysToExpiration * _b_ * \
norm.cdf( self._d2_ ) / 100
Put = -self.strikePrice * self.daysToExpiration * _b_ * \
norm.cdf( -self._d2_ ) / 100
return [ Call, Put ]
def _gamma( self ):
'''Returns the option gamma'''
return norm.pdf( self._d1_ ) / (self.underlyingPrice * self._a_)
def _parity( self ):
'''Put-Call Parity'''
#return self.CallPrice - self.PutPrice - self.underlyingPrice + \
# (self.strikePrice / \
# ((1 + self.interestRate) ** self.daysToExpiration))
#print (self.strikePrice/( -self.CallPrice + self.PutPrice + self.underlyingPrice))
return (self.strikePrice/( -self.CallPrice + self.PutPrice + self.underlyingPrice) - 1)\
* (1/self.daysToExpiration)
def update( self,update_time, update_data ):
'''use to accept parameter update from market'''
#print 'underlying',(update_data['BP1']+update_data['SP1'])/2
#print 'call ',(update_data['CALLBP1']+update_data['CALLSP1'])/2
#print 'put ',(update_data['PUTBP1']+update_data['PUTSP1'])/2
self.__dict__.update( {'underlyingPrice':(update_data['BP1']+update_data['SP1'])/2,
'CallPrice':(update_data['CALLBP1']+update_data['CALLSP1'])/2,
'PutPrice':(update_data['PUTBP1']+update_data['PUTSP1'])/2,
'UpdateTime':update_time}
)
#print self._price()
print self._parity()
self.Returns = self.position*( - self.prev_parity + self._parity())/self.position_limit/365
self.cumulative_Returns += self.Returns
Trade = False
if self._parity() > 0.05+self.position*self.theo_adjustment_step:
if self.position == self.minposition:
Trade = False
else:
self.position = max((self.position - 1),self.minposition)
Trade = True
if self._parity() < -0.05+self.position*self.theo_adjustment_step:
if self.position==self.maxposition:
Trade = False
else:
self.position = min((self.position + 1),self.maxposition)
Trade = True
if Trade==True:
self.prev_parity = self._parity()
self.arbitrage_series.append({'Time':self.UpdateTime,'Rate':self._parity(),'Position':self.position,
'Return':self.Returns,'CumR':self.cumulative_Returns})
self.Returns = 0
Trade = False
class merton:
'''merton
Used for pricing European options on stocks with dividends
merton([underlyingPrice, strikePrice, interestRate, annualDividends, \
daysToExpiration], volatility=x, CallPrice=y, PutPrice=z)
eg:
c = merton([52, 50, 1, 1, 30], volatility=20)
c.CallPrice # Returns the Call price
c.PutPrice # Returns the Put price
c.CallDelta # Returns the Call delta
c.PutDelta # Returns the Put delta
c.CallDelta2 # Returns the Call dual delta
c.PutDelta2 # Returns the Put dual delta
c.CallTheta # Returns the Call theta
c.PutTheta # Returns the Put theta
c.CallRho # Returns the Call rho
c.PutRho # Returns the Put rho
c.vega # Returns the option vega
c.gamma # Returns the option gamma
c = merton([52, 50, 1, 1, 30], CallPrice=0.0359)
c.impliedVolatility # Returns the implied volatility from the Call price
c = merton([52, 50, 1, 1, 30], PutPrice=0.0306)
c.impliedVolatility # Returns the implied volatility from the Put price
c = merton([52, 50, 1, 1, 30], CallPrice=0.0359, PutPrice=0.0306)
c.PutCallParity # Returns the Put-Call parity
'''
def __init__( self, args, volatility=None, CallPrice=None, PutPrice=None, \
performance=None ):
self.underlyingPrice = float( args[ 0 ] )
self.strikePrice = float( args[ 1 ] )
self.interestRate = float( args[ 2 ] ) / 100
self.dividend = float( args[ 3 ] )
self.dividendYield = self.dividend / self.underlyingPrice
self.daysToExpiration = float( args[ 4 ] ) / 365
for i in [ 'CallPrice', 'PutPrice', 'CallDelta', 'PutDelta', \
'CallDelta2', 'PutDelta2', 'CallTheta', 'PutTheta', \
'CallRho', 'PutRho', 'vega', 'gamma', 'impliedVolatility', \
'PutCallParity' ]:
self.__dict__[ i ] = None
if volatility:
self.volatility = float( volatility ) / 100
self._a_ = self.volatility * self.daysToExpiration ** 0.5
self._d1_ = (log( self.underlyingPrice / self.strikePrice ) + \
(self.interestRate - self.dividendYield + \
(self.volatility ** 2) / 2) * self.daysToExpiration) / \
self._a_
self._d2_ = self._d1_ - self._a_
if performance:
[ self.CallPrice, self.PutPrice ] = self._price( )
else:
[ self.CallPrice, self.PutPrice ] = self._price( )
[ self.CallDelta, self.PutDelta ] = self._delta( )
[ self.CallDelta2, self.PutDelta2 ] = self._delta2( )
[ self.CallTheta, self.PutTheta ] = self._theta( )
[ self.CallRho, self.PutRho ] = self._rho( )
self.vega = self._vega( )
self.gamma = self._gamma( )
self.exerciceProbability = norm.cdf( self._d2_ )
if CallPrice:
self.CallPrice = round( float( CallPrice ), 6 )
self.impliedVolatility = implied_volatility( \
self.__class__.__name__, args, self.CallPrice )
if PutPrice and not CallPrice:
self.PutPrice = round( float( PutPrice ), 6 )
self.impliedVolatility = implied_volatility( \
self.__class__.__name__, args, PutPrice=self.PutPrice )
if CallPrice and PutPrice:
self.CallPrice = float( CallPrice )
self.PutPrice = float( PutPrice )
self.PutCallParity = self._parity( )
def _price( self ):
'''Returns the option price: [Call price, Put price]'''
if self.volatility == 0 or self.daysToExpiration == 0:
Call = max( 0.0, self.underlyingPrice - self.strikePrice )
Put = max( 0.0, self.strikePrice - self.underlyingPrice )
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
Call = self.underlyingPrice * e ** (-self.dividendYield * \
self.daysToExpiration) * norm.cdf( self._d1_ ) - \
self.strikePrice * e ** (-self.interestRate * \
self.daysToExpiration) * norm.cdf( self._d2_ )
Put = self.strikePrice * e ** (-self.interestRate * \
self.daysToExpiration) * norm.cdf( -self._d2_ ) - \
self.underlyingPrice * e ** (-self.dividendYield * \
self.daysToExpiration) * norm.cdf( -self._d1_ )
return [ Call, Put ]
def _delta( self ):
'''Returns the option delta: [Call delta, Put delta]'''
if self.volatility == 0 or self.daysToExpiration == 0:
Call = 1.0 if self.underlyingPrice > self.strikePrice else 0.0
Put = -1.0 if self.underlyingPrice < self.strikePrice else 0.0
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
_b_ = e ** (-self.dividendYield * self.daysToExpiration)
Call = _b_ * norm.cdf( self._d1_ )
Put = _b_ * (norm.cdf( self._d1_ ) - 1)
return [ Call, Put ]
# Verify
def _delta2( self ):
'''Returns the dual delta: [Call dual delta, Put dual delta]'''
if self.volatility == 0 or self.daysToExpiration == 0:
Call = -1.0 if self.underlyingPrice > self.strikePrice else 0.0
Put = 1.0 if self.underlyingPrice < self.strikePrice else 0.0
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
_b_ = e ** -(self.interestRate * self.daysToExpiration)
Call = -norm.cdf( self._d2_ ) * _b_
Put = norm.cdf( -self._d2_ ) * _b_
return [ Call, Put ]
def _vega( self ):
'''Returns the option vega'''
if self.volatility == 0 or self.daysToExpiration == 0:
return 0.0
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
return self.underlyingPrice * e ** (-self.dividendYield * \
self.daysToExpiration) * norm.pdf( self._d1_ ) * \
self.daysToExpiration ** 0.5 / 100
def _theta( self ):
'''Returns the option theta: [Call theta, Put theta]'''
_b_ = e ** -(self.interestRate * self.daysToExpiration)
_d_ = e ** (-self.dividendYield * self.daysToExpiration)
Call = -self.underlyingPrice * _d_ * norm.pdf( self._d1_ ) * \
self.volatility / (2 * self.daysToExpiration ** 0.5) + \
self.dividendYield * self.underlyingPrice * _d_ * \
norm.cdf( self._d1_ ) - self.interestRate * \
self.strikePrice * _b_ * norm.cdf( self._d2_ )
Put = -self.underlyingPrice * _d_ * norm.pdf( self._d1_ ) * \
self.volatility / (2 * self.daysToExpiration ** 0.5) - \
self.dividendYield * self.underlyingPrice * _d_ * \
norm.cdf( -self._d1_ ) + self.interestRate * \
self.strikePrice * _b_ * norm.cdf( -self._d2_ )
return [ Call / 365, Put / 365 ]
def _rho( self ):
'''Returns the option rho: [Call rho, Put rho]'''
_b_ = e ** -(self.interestRate * self.daysToExpiration)
Call = self.strikePrice * self.daysToExpiration * _b_ * \
norm.cdf( self._d2_ ) / 100
Put = -self.strikePrice * self.daysToExpiration * _b_ * \
norm.cdf( -self._d2_ ) / 100
return [ Call, Put ]
def _gamma( self ):
'''Returns the option gamma'''
return e ** (-self.dividendYield * self.daysToExpiration) * \
norm.pdf( self._d1_ ) / (self.underlyingPrice * self._a_)
# Verify
def _parity( self ):
'''Put-Call Parity'''
return self.CallPrice - self.PutPrice - self.underlyingPrice + \
(self.strikePrice / \
((1 + self.interestRate) ** self.daysToExpiration))
def update( self, update_data ):
'''use to accept parameter update from market'''
self.__dict__.update( update_data )
class g_k:
"""Garman-Kohlhagen
Used for pricing European options on currencies
g_k([underlyingPrice, strikePrice, domesticRate, foreignRate, \
daysToExpiration], volatility=x, CallPrice=y, PutPrice=z)
eg:
c = g_k([1.4565, 1.45, 1, 2, 30], volatility=20)
c.CallPrice # Returns the Call price
c.PutPrice # Returns the Put price
c.CallDelta # Returns the Call delta
c.PutDelta # Returns the Put delta
c.CallDelta2 # Returns the Call dual delta
c.PutDelta2 # Returns the Put dual delta
c.CallTheta # Returns the Call theta
c.PutTheta # Returns the Put theta
c.CallRhoD # Returns the Call domestic rho
c.PutRhoD # Returns the Put domestic rho
c.CallRhoF # Returns the Call foreign rho
c.PutRhoF # Returns the Call foreign rho
c.vega # Returns the option vega
c.gamma # Returns the option gamma
c = g_k([1.4565, 1.45, 1, 2, 30], CallPrice=0.0359)
c.impliedVolatility # Returns the implied volatility from the Call price
c = g_k([1.4565, 1.45, 1, 2, 30], PutPrice=0.03)
c.impliedVolatility # Returns the implied volatility from the Put price
c = GK([1.4565, 1.45, 1, 2, 30], CallPrice=0.0359, PutPrice=0.03)
c.PutCallParity # Returns the Put-Call parity
"""
def __init__( self, args, volatility=None, CallPrice=None, PutPrice=None, \
performance=None ):
self.underlyingPrice = float( args[ 0 ] )
self.strikePrice = float( args[ 1 ] )
self.domesticRate = float( args[ 2 ] ) / 100
self.foreignRate = float( args[ 3 ] ) / 100
self.daysToExpiration = float( args[ 4 ] ) / 365
for i in [ 'CallPrice', 'PutPrice', 'CallDelta', 'PutDelta', \
'CallDelta2', 'PutDelta2', 'CallTheta', 'PutTheta', \
'CallRhoD', 'PutRhoD', 'CallRhoF', 'CallRhoF', 'vega', \
'gamma', 'impliedVolatility', 'PutCallParity' ]:
self.__dict__[ i ] = None
if volatility:
self.volatility = float( volatility ) / 100
self._a_ = self.volatility * self.daysToExpiration ** 0.5
self._d1_ = (log( self.underlyingPrice / self.strikePrice ) + \
(self.domesticRate - self.foreignRate + \
(self.volatility ** 2) / 2) * self.daysToExpiration) / self._a_
self._d2_ = self._d1_ - self._a_
# Reduces performance overhead when comPuting implied volatility
if performance:
[ self.CallPrice, self.PutPrice ] = self._price( )
else:
[ self.CallPrice, self.PutPrice ] = self._price( )
[ self.CallDelta, self.PutDelta ] = self._delta( )
[ self.CallDelta2, self.PutDelta2 ] = self._delta2( )
[ self.CallTheta, self.PutTheta ] = self._theta( )
[ self.CallRhoD, self.PutRhoD ] = self._rhod( )
[ self.CallRhoF, self.PutRhoF ] = self._rhof( )
self.vega = self._vega( )
self.gamma = self._gamma( )
self.exerciceProbability = norm.cdf( self._d2_ )
if CallPrice:
self.CallPrice = round( float( CallPrice ), 6 )
self.impliedVolatility = implied_volatility( \
self.__class__.__name__, args, CallPrice=self.CallPrice )
if PutPrice and not CallPrice:
self.PutPrice = round( float( PutPrice ), 6 )
self.impliedVolatility = implied_volatility( \
self.__class__.__name__, args, PutPrice=self.PutPrice )
if CallPrice and PutPrice:
self.CallPrice = float( CallPrice )
self.PutPrice = float( PutPrice )
self.PutCallParity = self._parity( )
def _price( self ):
'''Returns the option price: [Call price, Put price]'''
if self.volatility == 0 or self.daysToExpiration == 0:
Call = max( 0.0, self.underlyingPrice - self.strikePrice )
Put = max( 0.0, self.strikePrice - self.underlyingPrice )
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
Call = e ** (-self.foreignRate * self.daysToExpiration) * \
self.underlyingPrice * norm.cdf( self._d1_ ) - \
e ** (-self.domesticRate * self.daysToExpiration) * \
self.strikePrice * norm.cdf( self._d2_ )
Put = e ** (-self.domesticRate * self.daysToExpiration) * \
self.strikePrice * norm.cdf( -self._d2_ ) - \
e ** (-self.foreignRate * self.daysToExpiration) * \
self.underlyingPrice * norm.cdf( -self._d1_ )
return [ Call, Put ]
def _delta( self ):
'''Returns the option delta: [Call delta, Put delta]'''
if self.volatility == 0 or self.daysToExpiration == 0:
Call = 1.0 if self.underlyingPrice > self.strikePrice else 0.0
Put = -1.0 if self.underlyingPrice < self.strikePrice else 0.0
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
_b_ = e ** -(self.foreignRate * self.daysToExpiration)
Call = norm.cdf( self._d1_ ) * _b_
Put = -norm.cdf( -self._d1_ ) * _b_
return [ Call, Put ]
def _delta2( self ):
'''Returns the dual delta: [Call dual delta, Put dual delta]'''
if self.volatility == 0 or self.daysToExpiration == 0:
Call = -1.0 if self.underlyingPrice > self.strikePrice else 0.0
Put = 1.0 if self.underlyingPrice < self.strikePrice else 0.0
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
_b_ = e ** -(self.domesticRate * self.daysToExpiration)
Call = -norm.cdf( self._d2_ ) * _b_
Put = norm.cdf( -self._d2_ ) * _b_
return [ Call, Put ]
def _vega( self ):
'''Returns the option vega'''
if self.volatility == 0 or self.daysToExpiration == 0:
return 0.0
if self.strikePrice == 0:
raise ZeroDivisionError( 'The strike price cannot be zero' )
else:
return self.underlyingPrice * e ** -(self.foreignRate * \
self.daysToExpiration) * norm.pdf( self._d1_ ) * \
self.daysToExpiration ** 0.5
def _theta( self ):
'''Returns the option theta: [Call theta, Put theta]'''
_b_ = e ** -(self.foreignRate * self.daysToExpiration)
Call = -self.underlyingPrice * _b_ * norm.pdf( self._d1_ ) * \
self.volatility / (2 * self.daysToExpiration ** 0.5) + \
self.foreignRate * self.underlyingPrice * _b_ * \
norm.cdf( self._d1_ ) - self.domesticRate * self.strikePrice * \
_b_ * norm.cdf( self._d2_ )
Put = -self.underlyingPrice * _b_ * norm.pdf( self._d1_ ) * \
self.volatility / (2 * self.daysToExpiration ** 0.5) - \
self.foreignRate * self.underlyingPrice * _b_ * \
norm.cdf( -self._d1_ ) + self.domesticRate * self.strikePrice * \
_b_ * norm.cdf( -self._d2_ )
return [ Call / 365, Put / 365 ]
def _rhod( self ):
'''Returns the option domestic rho: [Call rho, Put rho]'''
Call = self.strikePrice * self.daysToExpiration * \
e ** (-self.domesticRate * self.daysToExpiration) * \
norm.cdf( self._d2_ ) / 100
Put = -self.strikePrice * self.daysToExpiration * \
e ** (-self.domesticRate * self.daysToExpiration) * \
norm.cdf( -self._d2_ ) / 100
return [ Call, Put ]
def _rhof( self ):
'''Returns the option foreign rho: [Call rho, Put rho]'''
Call = -self.underlyingPrice * self.daysToExpiration * \
e ** (-self.foreignRate * self.daysToExpiration) * \
norm.cdf( self._d1_ ) / 100
Put = self.underlyingPrice * self.daysToExpiration * \
e ** (-self.foreignRate * self.daysToExpiration) * \
norm.cdf( -self._d1_ ) / 100
return [ Call, Put ]
def _gamma( self ):
'''Returns the option gamma'''
return (norm.pdf( self._d1_ ) * e ** -(self.foreignRate * \
self.daysToExpiration)) / (self.underlyingPrice * self._a_)
def _parity( self ):
'''Returns the Put-Call parity'''
return self.CallPrice - self.PutPrice - (self.underlyingPrice / \
((1 + self.foreignRate) ** self.daysToExpiration)) + \
(self.strikePrice / \
((1 + self.domesticRate) ** self.daysToExpiration))
def update( self, update_data ):
'''use to accept parameter update from market'''
self.__dict__.update( update_data )
| true |
a9e0a00ac9b807417ca66cfc972073d44217a4d6 | Python | cc40330tw/Web-App-with-a-DB-backend | /ytfl.py | UTF-8 | 4,734 | 3.109375 | 3 | [] | no_license | #Pass information from Backend of Flask to the frontend of HTML template
from flask import Flask, redirect, url_for, render_template, request, session, flash
from datetime import timedelta
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.secret_key = "hellothisismysecretkey"
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///users_table.sqlite3' # "users_table" here is the name of the table that you're gonna be referencing
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.permanent_session_lifetime = timedelta(minutes=3) # store our permanent session data for 3 minutes
db = SQLAlchemy(app) # SQLAlchemy makes it easier to save information because we can write all our database stuff in python code rather than writing SQL queries
class users_table(db.Model): # The columns represent pieces of information;Rows represent in ;Rows represent individual items
_id = db.Column("id",db.Integer, primary_key=True) # id will be automatically be created for us because it's a primary key
name = db.Column(db.String(100)) # 100 here is the maximum length of the string that we want to store(100 characters)
email = db.Column(db.String(100)) # string也可以改成integer/float/boolean
def __init__(self,name,email): # We want to store users and each users has a name and an email (these 2 are what we need every time we define a new user object)(the init method will take the variables that we need to create a new object)
self.name = name
self.email = email
#@app.route("/<name>")
#def home(name):
#return "Hello! This is the main page <h1>HELLO<h1>"
#return render_template("index.html", content=name, r=2)
#return render_template("index.html",content=["Tim","Joe","Bill"])
'''@app.route("/<name>")
def user(name):
return f"Hello {name}!"
@app.route("/admin")
def admin():
return redirect(url_for("user", name="Admin!"))'''
'''@app.route("/")
def home():
return render_template("index.html",content="Testing")'''
@app.route("/")
def home():
return render_template("index.html")
@app.route("/view")
def view():
return render_template("view.html",values=users_table.query.all())
@app.route("/login",methods=["POST","GET"])
def login():
if request.method == "POST":
session.permanent = True #used to define this specific session as a permanent session which means it's gonna last as long as we define up there
user = request.form["nm"]
session["user"] = user
found_user = users_table.query.filter_by(name=user).first()
if found_user: # When an user types his name, we'll check if this user is already exist. If not then we'll create one
session["email"] = found_user.email
else:
usr = users_table(user, "")
db.session.add(usr) # add this user model to our database
db.session.commit()
flash("Login Succesful!")
#return redirect(url_for("user",usr=user))
return redirect(url_for("user"))
else:
if "user" in session: #代表若已經是signed in的狀態
flash("Already Logged in!")
return redirect(url_for("user"))
return render_template("login.html")
'''@app.route("/<usr>")
def user(usr):
return f"<h1>{usr}</h1>"'''
@app.route("/user",methods=["POST","GET"])
def user():
email = None
if "user" in session:
user = session["user"]
if request.method == "POST":
email = request.form["email"] # grab that email from the email field
session["email"] = email # store it in the session
found_user = users_table.query.filter_by(name=user).first()
found_user.email = email
db.session.commit() # next time we login this will be saved
flash("Email was saved!")
else: # if it's a GET request
if "email" in session:
email = session["email"] # get the email from the session
#return f"<h1>{user}</h1>"
#return render_template("User.html", user=user)
return render_template("User.html", email=email)
else:
flash("You are not logged in!")
return redirect(url_for("login"))
@app.route("/logout")
def logout():
#if "user" in session:
#user = session["user"]
flash("You have been logged out!", "info")
session.pop("user",None) #remove the user data from my session
session.pop("email",None)
return redirect(url_for("login"))
@app.route("/WhatisNew")
def WhatisNew():
return render_template("new.html")
if __name__ == "__main__":
db.create_all() # create the database above if it hasn't already exist in our program
app.run(debug=True)
| true |
419d96129b00319dbde8cfd451f5ff6ffc79feb6 | Python | pmauduit/osmroutes-1d | /route_analyser.py | UTF-8 | 6,536 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import argparse
import copy
from lib.OsmApi import OsmApi
OSM_API = OsmApi()
# Fetches osm data from the API
def get_osm_data(relation_id):
daugther_relations = []
colour = None
mother_relation = OSM_API.RelationGet(relation_id)
colour = mother_relation['tag'].get('colour', '#000000')
daughter_relations = [ member['ref'] for member in mother_relation['member']
if member['type'] == 'relation'
and member.get('ref', None) is not None ]
branches = []
coords = {}
# iterating on daughter relations
for daughter in daughter_relations:
current_daughter = OSM_API.RelationGet(daughter)
branche = []
for member in current_daughter['member']:
# The OSM wiki explicitly states that the stops must be nodes
if member['role'] == 'stop' and member['type'] == 'node':
current_stop = OSM_API.NodeGet(member['ref'])
name_stop = current_stop['tag'].get('name', None)
if name_stop is not None:
branche.append(name_stop)
coords[name_stop] = {'lat': current_stop['lat'], 'lon': current_stop['lon'] }
branches.append(branche)
return { 'colour': colour, 'branches': branches, 'coords': coords }
# check if a branch is not already in the list
def is_in(branches, elem):
for branch in branches:
if branch == elem:
return True
return False
# clean the equivalent branches
def clean_branches(branches):
new_branches = []
for branche in branches:
rev = list(branche)
rev.reverse()
if not is_in(new_branches, rev):
new_branches.append(branche)
return new_branches
# calculates the position of each stops on a 1D map
def compute_positions(branches):
seen = {}
ypos = 0
xpos = 0
for idx, branch in enumerate(branches):
# first branch, apply an arbitrary coordinate for each node
if seen == {}:
for stop in branch:
ancestors = 0 if idx == 0 else 1
nexts = 0 if idx == len(branch) - 1 else 1
seen[stop] = { 'x': xpos, 'y': ypos, 'ancestors': ancestors, 'nexts' : nexts }
xpos += 1
# else try to find a known node
else:
unkn_node = []
known_node = None
for idxstop, stop in enumerate(branch):
saved = seen.get(stop)
# node not found
if saved is None:
# no known node found yet, we can't
# currently calculate its position
if known_node is None:
unkn_node.append(stop)
# a known node has been found earlier
else:
# empty the unkn_node list
xpos = known_node['x'] - 1
ypos = known_node['y'] if known_node['ancestors'] == 0 else known_node['y'] + 1
# increments the nexts of the known_node
known_node['ancestors'] += 1
nexts = 0 if idxstop == len(branch) - 1 else 1
while len(unkn_node) > 0:
popped = unkn_node.pop()
ancestors = 0 if len(unkn_node) == 0 else 1
seen[popped] = { 'x': xpos, 'y': ypos, 'ancestors' : ancestors, 'nexts': 1 }
xpos -= 1
# then add the new unknown node
xpos = known_node['x'] + 1
ypos = known_node['y'] if known_node['nexts'] == 0 else known_node['y'] + 1
curr_node = { 'x': xpos, 'y': ypos, 'ancestors': 1, 'nexts': nexts }
seen[stop] = curr_node
known_node = curr_node
# node already saved in cache
else:
known_node = saved
xpos = saved['x'] - 1
saved['nexts'] += 1
ypos = saved['y'] if saved['nexts'] == 0 else saved['y'] + 1
# then we need to increment the number of nexts for the
# known node
while len(unkn_node) > 0:
popped = unkn_node.pop()
ancestors = 0 if len(unkn_node) == 0 else 1
seen[popped] = { 'x': xpos, 'y': ypos, 'ancestors': ancestors, 'nexts': 1 }
xpos -= 1
return seen
# normalizes the coordinates of each stops
def normalize_coordinates(stops):
min_x = 0
min_y = 0
for name,stop in stops.iteritems():
if min_x > stop['x']:
min_x = stop['x']
if min_y > stop['y']:
min_y = stop['y']
for name,stop in stops.iteritems():
stop['x'] -= min_x
stop['y'] -= min_y
return stops
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--relation", help="fetches the data from the OSM API", type=int)
parser.add_argument("--input", help="Loads the data from a file")
parser.add_argument("--output", help="Dumps the result of the script into a file")
args = parser.parse_args()
# Fetches OSM data via the API
if args.relation:
relation_id = int(args.relation)
datas = get_osm_data(relation_id)
if args.output:
with open(args.output, "w") as outfile:
json.dump(datas, outfile, indent=4)
else:
json.dump(datas, sys.stdout, indent=4)
# Loads OSM data from a JSON file
elif args.input:
json_data = open(args.input).read()
datas = json.loads(json_data)
print "%d stops geolocalized" % len(datas['coords'].keys())
# cleaning branches
datas['branches'] = clean_branches(datas['branches'])
# computes position (coordinates for each stops)
stops = compute_positions(datas['branches'])
# normalizes positions (simple translation)
stops = normalize_coordinates(stops)
if args.output:
with open(args.output, "w") as outfile:
json.dump(stops, outfile, indent=4)
else:
json.dump(stops, sys.stdout, indent=4)
# prints the help
else:
parser.print_help()
| true |
2ba9b0c96dd5604541dddd880e809a95fa79a0f0 | Python | shraysalvi/Tic-Tac-Toi--cordinates-based- | /tic-tac-toi.py | UTF-8 | 3,216 | 3.5625 | 4 | [] | no_license | string = " "
def PRINT(string):
print("---------")
print("|", string[0], end = " ")
print(string[1], end = " ")
print(string[2], end = " |\n")
print("|", string[3], end = " ")
print(string[4], end = " ")
print(string[5], end = " |\n")
print("|", string[6], end = " ")
print(string[7], end = " ")
print(string[8], end = " |\n")
print("---------")
PRINT(string)
a = 0
cordinates = [[string[0], string[1], string[2]],
[string[3], string[4], string[5]],
[string[6], string[7], string[8]]]
def impossible(check_str): # function to check impossible condition
x_str = check_str.count("X") #x in string
o_str = check_str.count("O") #o in string
if x_str - o_str <= -2 or x_str - o_str >= 2 or check_O(check_str) == check_X(check_str) == True:
return 1
return 0
def check_X(check_str): #function to check X wins
if check_str[0:3] == ['X', 'X', 'X'] or check_str[3:6] == ['X', 'X', 'X'] or check_str[6:9] == ['X', 'X', 'X'] or check_str[::4] == ['X', 'X', 'X'] or check_str[2:7:2] == ['X', 'X', 'X'] or check_str[:7:3] == ['X', 'X', 'X'] or check_str[1:8:3] == ['X', 'X', 'X'] or check_str[2::3] == ['X', 'X', 'X'] :
return 1
return 0
def check_O(check_str): # function to check O wins
if check_str[0:3] == ['O', 'O', 'O'] or check_str[3:6] == ['O', 'O', 'O'] or check_str[6:9] == ['O', 'O', 'O'] or check_str[::4] == ['O', 'O', 'O'] or check_str[2:7:2] == ['O', 'O', 'O'] or check_str[:7:3] == ['O', 'O', 'O'] or check_str[1:8:3] == ['O', 'O', 'O'] or check_str[2::3] == ['O', 'O', 'O']:
return 1
return 0
def check_not_finished(check_str): #function to check Game finished or not or even draw
l = [True for x in check_str if x == " "]
if any(l) == False and check_O(check_str) == False and check_X(check_str) == False:
return 1
chance = 0
a = 0
while a != 1:
cord = input("Enter Cordinates: ").split()
try:
cord = [ int(_) for _ in cord]
except ValueError:
print("You should enter numbers!")
i , j = cord
if type(i) == int and type(j) == int:
if i <= 3 and j <= 3 and i > 0 and j > 0:
if cordinates[i-1][j-1] == "_" or cordinates[i-1][j-1] ==" " :
if chance % 2 == 0:
cordinates[i-1][j-1] = "X"
else:
cordinates[i-1][j-1] = "O"
new_string = []
for i in cordinates:
for _ in i :
new_string.append(_)
PRINT(new_string)
if impossible(new_string):
print("Impossible")
a = 1
elif check_X(new_string):
print("X wins")
a = 1
elif check_O(new_string):
print("O wins")
a = 1
elif check_not_finished(new_string):
print("Draw")
a = 1
chance += 1
else:
print("This cell is occupied! Choose another one!")
else:
print("Coordinates should be from 1 to 3!")
| true |
22824bd497b62fba593acae474db94e1ab1939d8 | Python | RajivMotePro/wot2text | /test/com/rajivmote/wot/test_AsciiNormalizer.py | UTF-8 | 1,695 | 3.171875 | 3 | [] | no_license | import unittest
from com.rajivmote.wot.AsciiNormalizer import AsciiNormalizer
class TestAsciiNormalizer(unittest.TestCase):
def setUp(self):
self.func = AsciiNormalizer()
def test_OpenSingleQuote(self):
s = 'The so-called \u2018fob\u2019 was on the table.'
result = AsciiNormalizer.to_ascii(s)
self.assertEqual("The so-called 'fob' was on the table.", result)
def test_Apostrophe(self):
s = 'Tel\u2019aran\u2019rhiod'
result = AsciiNormalizer.to_ascii(s)
self.assertEqual("Tel'aran'rhiod", result)
def test_Elipses(self):
s = 'Something.\xa0.\xa0.\xa0 Strange'
result = AsciiNormalizer.to_ascii(s)
self.assertEqual("Something... Strange", result)
def test_EMDash(self):
s = "It was the best\u2014and worst\u2014of times."
result = AsciiNormalizer.to_ascii(s)
self.assertEqual("It was the best--and worst--of times.", result)
def test_ENDash(self):
s = "100\u2013500"
result = AsciiNormalizer.to_ascii(s)
self.assertEqual('100-500', result)
def test_DoubleQuotes(self):
s = "He blinked. \u201cIt is nothing,\u201d he said."
result = AsciiNormalizer.to_ascii(s)
self.assertEqual('He blinked. "It is nothing," he said.', result)
def test_HorizontalElipsis(self):
s = 'Something\u2026 Strange'
result = AsciiNormalizer.to_ascii(s)
self.assertEqual('Something... Strange', result)
def test_LowerCwithCedilla(self):
s = 'soup\xe7on'
result = AsciiNormalizer.to_ascii(s)
self.assertEqual("soupcon", result)
if __name__ == '__main__':
unittest.main()
| true |
3e271d490a924b57a9b5e1ba65192e618336d3b3 | Python | wbclark/crhc-cli | /tests/test_help.py | UTF-8 | 3,793 | 2.53125 | 3 | [] | no_license | """
Module responsible for test the help menu content
"""
from help import help_opt
def test_check_main_help_menu():
"""
Responsible for test the main help menu
"""
response = help_opt.help_main_menu()
content = "\
CRHC Command Line Tool\n\
\n\
Usage: \n\
crhc [command]\n\
\n\
Available Commands:\n\
inventory To list the Inventory data.\n\
swatch To list the Subscription data.\n\
endpoint To list all the available API endpoints on `console.redhat.com`\n\
get To consume the API endpoint directly.\n\
login To authenticate using your offline token.\n\
logout To cleanup the local conf file, removing all the token information.\n\
token To print the access_token. This can be used with `curl`, for example.\n\
whoami To show some information regarding to the user who requested the token.\n\
ts To execute some advanced options / Troubleshooting.\n\
\n\
Flags: \n\
--version, -v This option will present the app version.\n\
--help, -h This option will present the help.\n\
"
assert response == content
def test_check_inventory_help_menu():
"""
Responsible for test the inventory help menu
"""
response = help_opt.help_inventory_menu()
content = "\
Usage: \n\
crhc inventory [command]\n\
\n\
Available Commands:\n\
list List the inventory entries, first 50\n\
list_all List all the inventory entries\n\
\n\
Flags: \n\
--display_name Please, type the FQDN or Partial Hostname\n\
--help, -h This option will present the help.\
"
assert response == content
def test_check_swatch_help_menu():
"""
Responsible for test the subscription help menu
"""
response = help_opt.help_swatch_menu()
content = "\
Usage: \n\
crhc swatch [command]\n\
\n\
Available Commands:\n\
list List the swatch entries, first 100\n\
list_all List all the swatch entries\n\
socket_summary Print the socket summary\n\
Flags: \n\
--help, -h This option will present the help.\
"
assert response == content
def test_check_endpoint_help_menu():
"""
Responsible for test the endpoint help menu
"""
response = help_opt.help_endpoint_menu()
content = "\
Usage: \n\
crhc endpoint [command]\n\
\n\
Available Commands:\n\
list List all the endpoints available\
"
assert response == content
def test_check_get_help_menu():
"""
Responsible for test the get help menu
"""
response = help_opt.help_get_menu()
content = "\
Usage: \n\
crhc get [command]\n\
\n\
Available Commands:\n\
get <endpoint API URL HERE> It will retrieve all the available methods\
"
assert response == content
def test_check_login_help_menu():
"""
Responsible for test the login help menu
"""
response = help_opt.help_login_menu()
content = "\
Usage: \n\
crhc login [flags]\n\
\n\
Flags:\n\
--token Setting the offline token in order to get access to the content.\n\
\n\
Info:\n\
You can obtain a token at: https://console.redhat.com/openshift/token\n\
\n\
The command will be something similar to 'crhc login --token eyJhbGciOiJIUzI1NiIsIn...'\
"
assert response == content
def test_check_ts_help_menu():
"""
Responsible for test the ts help menu
"""
response = help_opt.help_ts_menu()
content = "\
Usage: \n\
crhc ts [command]\n\
\n\
Available Commands:\n\
dump dump the json files, Inventory and Subscription\n\
match match the Inventory and Subscription information\n\
clean cleanup the local 'cache/temporary/dump' files\
"
assert response == content
| true |
96e3dcde249591adcbf7b38ecb139e9342e4d4df | Python | Alex-Linhares/sdm | /python/imac27tests.py | UTF-8 | 5,004 | 2.546875 | 3 | [] | no_license | # cd /Users/AL/Dropbox/0. AL Current Work/3. To Submit/Dr K/AL/python/
import sdm
import sdm_utils
from numpy import *
def mem_write_x_at_x(count=10):
for i in range (count):
b=sdm.Bitstring()
sdm.thread_write(b,b)
def mem_write_x_at_random(count=10):
for i in range (count):
b=sdm.Bitstring()
c=sdm.Bitstring()
sdm.thread_write(b,c)
def linhares_fig7_1():
import sdm
import sdm_utils
sdm.initialize()
a = sdm_utils.table_7_1()
import pylab
pylab.plot(a)
pylab.show()
def linhares_critical1():
#cd /Users/AL/Dropbox/0. AL Current Work/3. To Submit/Dr K/AL/python/
import sdm
import sdm_utils
import time
start=time.clock()
#sdm.initialize()
sdm.initialize_from_file("/Users/AL/Desktop/mem45000_n1000_10000x_at_x.sdm")
mem_write_x_at_x(5000)
v = sdm.Bitstring()
sdm.thread_write(v,v)
print ("computing distances graph")
print (time.clock()-start, "seconds")
a = sdm_utils.critical_distance2(0, 1000, 1, v)
print (time.clock()-start)
print "saving file"
sdm.save_to_file("/Users/AL/Desktop/mem50000_n1000_10000x_at_x.sdm")
import pylab
pylab.plot(a)
pylab.show()
def scan_for_distances():
import time, cPickle;
sdm.initialize()
v = sdm.Bitstring()
for i in range (0,10,1):
sdm.thread_write(v,v)
import pylab
for i in range (1000,51000,1000):
print 'Computing distances for '+str(i)+' items registered'
#add 1000 itens to memory
mem_write_x_at_x(1000)
a = sdm_utils.critical_distance2(0, 1000, 1, v, read=sdm.thread_read_chada)
#get new distance values in a
#save a
cPickle.dump(a, open (str(i)+'10writes_Chada_Read.cPickle', 'wb'))
print 'saved '+str(i)+'.cPickle'
#print 'now lets see..'
#for i in range (1000,11000,1000):
# print (cPickle.load(open(str(i)+'.cPickle','rb')))
#from pylab import *
def TestFig1():
import os, cPickle
#os.chdir ("results/6_iter_readng/1000D/DrK_Read/x_at_x/")
import pylab
for i in range (1000,51000,1000):
a = (cPickle.load(open(str(i)+'_10writes.cPickle','rb')))
pylab.plot(a)
pylab.show()
from matplotlib.pylab import *
def Plot_Heatmap (data=[]):
# Make plot with vertical (default) colorbar
maxd = int(data.max())
mind = int(data.min())
avgd = int ((maxd+mind) / 2);
print 'minimum value=',mind
fig = plt.figure()
ax = fig.add_subplot(111)
#use aspect=20 when N=1000
#use aspect=5 when N=256
cax = ax.imshow(data, cmap=cm.YlGnBu, aspect=5.0, interpolation=None, norm=None, origin='lower')
ax.set_title('Critical Distance Behavior', fontsize=58)
ax.grid(True, label='Distance')
ax.set_xlabel('original distance', fontsize=100)
ax.set_ylabel("# items previously stored (000's)")
# Add colorbar, make sure to specify tick locations to match desired ticklabels
cbar = fig.colorbar(cax, ticks=[mind, avgd, maxd]) #had ZERO here before
cbar.ax.set_yticklabels([str(mind), str(avgd), str(maxd)])
cbar.ax.set_ylabel('distance obtained after 20 iteractive-readings', fontsize=24)
#########CONTOUR DELINEATES THE CRITICAL DISTANCE
# We are using automatic selection of contour levels;
# this is usually not such a good idea, because they don't
# occur on nice boundaries, but we do it here for purposes
# of illustration.
CS = contourf(data, 100, levels = [mind,avgd,maxd], alpha=0.1, cmap=cm.YlGnBu, origin='lower')
# Note that in the following, we explicitly pass in a subset of
# the contour levels used for the filled contours. Alternatively,
# We could pass in additional levels to provide extra resolution,
# or leave out the levels kwarg to use all of the original levels.
CS2 = contour(CS, levels=[88], colors = 'gray', origin='lower', hold='on', linestyles='dashdot')
title('Critical Distance Behavior', fontsize=40)
xlabel('original distance', fontsize=24)
ylabel("# items previously stored (000's)", fontsize=24)
# Add the contour line levels to the colorbar
#cbar.add_lines(CS2)
show()
from matplotlib.pylab import *
import os, cPickle
def GetDataForPlots(folder='',filenameext='MUST_BE_PROVIDED'):
p=q=r=s=[]
if len(folder)>0: os.chdir (folder)
for i in range(1,51):
S = 'N=256_iter_read=2_'+str(i*1000)+filenameext+'.cPickle'
p.append( (cPickle.load(open(S,'rb') ) ) )
q=concatenate(p,axis=0)
r = q[:,1]
print len(r)
print '& shape (r)=',shape(r)
r.shape=(50,256) #if N=256
#r.shape=(50,1000)
print 'r=',r
return r
def now():
#data=GetDataForPlots("results/6_iter_readng/1000D/DrK_Read/x_at_x/1_write", '')
#data=GetDataForPlots("results/6_iter_readng/1000D/DrK_Read/x_at_x/10_writes", '_10writes')
data=GetDataForPlots('','saved items_x_at_x_0_writes_DrK_cubed')
Plot_Heatmap (data)
| true |
bdd9f2a1a552f26fe150ee46294235070765c75e | Python | cgu2022/NKC---Python-Curriculum | /Problems/Unit 1/Unit1Set1.py | UTF-8 | 2,841 | 4.59375 | 5 | [] | no_license | #######################################################################################
# 1.1
# Make 1 variable storing a string, one storing an integer, one storing a float, and another storing a boolean.
#######################################################################################
# 1.2
# Create a string variable that holds your name!
#######################################################################################
# 1.3
# Make 2 boolean variables that are not equal to each other.
# WAIT HERE
#######################################################################################
# 1.4
# Change cards from 5 to 10 by reassigning the variable.
# Then change it to 20 by adding itself and 10 with reassigning
cards = 5
# WAIT HERE
#######################################################################################
# 1.5
# Below, we have 2 strings: one stores "dog" and the other stores "cat".
# Concatenate them together and store the result in a new variable.
string1 = "dog"
string2 = "cat"
# WAIT HERE
#######################################################################################
# 1.6
# Use a print statement to print whatever you want to the console!
#######################################################################################
# 1.7
# Use a print statement to print the variable print_this to the console.
print_this = "Success! You printed a variable!"
#######################################################################################
# 1.8
# Store what is held in the variable var1 in a new variable called var2
# Then, print out both variables to prove that they are equal.
var1 = 5
#######################################################################################
# 1.9
# Print out the sum of a, b, and c!
a = 1
b = 2
c = 3
#######################################################################################
# 1.10
# Print out 2 different variables on the same line!
# WAIT HERE
#######################################################################################
# 1.11
# make two int variables and cast them to string variables. add them together.
#######################################################################################
# 1.12
# Do division with two int variables which don't evenly divide into each other. cast the
# answer to an int and print the int.
######################################################################################
# 1.13
# Print these two variables together by casting and adding:
# Hint: There are two ways of adding the variables
a = 1
b = "45"
#Wait Here
#######################################################################################
# 1.14
# You are currently reading a comment! Comment out the following code using #
# It will no longer run as code!
# var123 = 10
| true |
a9f843fcd69f791614a79d02356a5c64deacb214 | Python | hanglomo/Jia-s-python | /class6-test1.py | UTF-8 | 623 | 4.78125 | 5 | [] | no_license |
#人的年龄
age=int(input("人的年龄是多少"))
if age>120 or age<0:
print("年龄不符合标准")
else:
print("合法年龄")
#考试成绩
a=int(input("数学考试成绩"))
b=int(input("语文考试成绩"))
if a>=60 or b>=60:
print("考试及格")
else:
print("考试不及格")
#奖励分类
a=int(input("你考了多少分"))
if a>=100:
print("奖励书一个")
elif 80<a<100:
print("奖励本一个")
elif 60<a<80:
print("奖励笔一根")
else:
print("无奖励")
| true |
d039455c17ab14f0f5e58fafa8efcdded43f8ca1 | Python | vvertash/DMD | /queries.py | UTF-8 | 12,899 | 3 | 3 | [] | no_license | import mysql.connector
# import datetime
from datetime import datetime, date, time
from datetime import timedelta
from math import sin, cos, sqrt, atan2, radians
import operator
import math
now = datetime.now()
mydb = mysql.connector.connect(
host="db4free.net",
user= "vertash",
password="todoproject",
database="car_system"
)
mycursor = mydb.cursor()
# first query
def query1():
mycursor.execute("SELECT * FROM Car WHERE Color = 'red' AND CID LIKE 'AN%'")
myresult = mycursor.fetchall()
answer = ""
for i in myresult:
answer += str(i) + "\n"
# returning the result
return answer
# second query
def query2(input):
answer = ""
sql = "SELECT * FROM Charge WHERE Date = %s"
mycursor.execute(sql, (input,))
myresult = mycursor.fetchall()
ans = [0] * 24
for i in range(24):
if (i < 10):
start = "0" + str(i) + ":00:00"
if (i != 9):
finish = "0" + str(i + 1) + ":00:00"
else:
finish = str(i + 1) + ":00:00"
else:
start = str(i) + ":00:00"
finish = str(i + 1) + ":00:00"
res = 0
for j in myresult:
if (j[3] <= finish and j[4] >= start):
res += 1
answer += (str(i) + "h-" + str(i + 1) + "h: " + str(res) + "\n")
# returning the result
return answer
# third query
def query3():
answer = ""
sql = "SELECT * FROM Rent ""WHERE (Start_date = %s OR Finish_date = %s) AND Finish_time >= %s AND Start_time <= %s "
morning = set()
afternoon = set()
evening = set()
for i in range(7):
N_days_ago = now - timedelta(days=i)
N_days_ago = N_days_ago.strftime("%d-%m-%Y")
mycursor.execute(sql,(N_days_ago,N_days_ago,'07:00:00', '10:00:00'))
result1 = mycursor.fetchall()
for i in result1:
morning.add(i[1])
mycursor.execute(sql, (N_days_ago, N_days_ago, '12:00:00', '14:00:00'))
result2 = mycursor.fetchall()
for i in result2:
afternoon.add(i[1])
mycursor.execute(sql, (N_days_ago, N_days_ago, '17:00:00','19:00:00'))
result3 = mycursor.fetchall()
for i in result3:
evening.add(i[1])
mycursor.execute("SELECT * FROM Car")
all = len(mycursor.fetchall())
answer += ("Morning: " + str((int)((len(morning)/all)*100)) + "\n")
answer += ("Afternoon: " + str((int)((len(afternoon)/all)*100)) + "\n")
answer += ("Evening: " + str((int)((len(evening)/all)*100)) + "\n")
# returning the result
return answer
# forth query
def query4():
answer = ""
for n in range(31):
q4 = "SELECT * FROM Rent WHERE Username = %s AND Start_date = %s"
N_days_ago = now - timedelta(days = n)
N_days_ago = N_days_ago.strftime("%d-%m-%Y")
mycursor.execute(q4, ("Danis", N_days_ago))
result = mycursor.fetchall()
n += 1
for i in result:
start_date = i[2] + " " + i[3]
finish_date = i[4] + " " + i[5]
date = datetime.strptime(start_date, '%d-%m-%Y %H:%M:%S')
date2 = datetime.strptime(finish_date,'%d-%m-%Y %H:%M:%S')
date3 = date2 - date
date3 = str(date3)
if (date3[1]!=':'):
date3 = (int)(date3[0])*10 + (int)(date3[1])
else:
date3 = (int)(date3[0])
for j in i:
answer += (str(j)+ " ")
answer += ("Total price: " + str(date3*i[6]) + "\n")
# returning the result
return answer
# fifth query
def query5(input):
answer = ""
inp_date = datetime.strptime(input, '%d-%m-%Y')
init = 0
duration = 0
counter = 0
distance = 0.0
day_count = 0
while (inp_date < now):
sql = "SELECT * FROM Rent WHERE Start_date = %s"
inp_date = inp_date + timedelta(days=init)
init += 1
inp_date1 = inp_date.strftime("%d-%m-%Y")
mycursor.execute(sql, (inp_date1, ))
result = mycursor.fetchall()
for myresult in result:
start_date = myresult[2] + " " + myresult[3]
finish_date = myresult[4] + " " + myresult[5]
date = datetime.strptime(start_date, '%d-%m-%Y %H:%M:%S')
date2 = datetime.strptime(finish_date, '%d-%m-%Y %H:%M:%S')
date3 = str(date2 - date)
if (date3[1]!=':'):
date3 = ((int)(date3[0])*10 + (int)(date3[1]))*3600 + (int)(date3[3:4]) * 60 + (int)(date3[6:7])
else:
date3 = (int)(date3[0])*3600 + (int)(date3[2:3]) * 60 + (int)(date3[5:6])
duration += date3
counter += 1
sql = "SELECT * FROM Manage WHERE Order_date = %s"
# find distance between GPS_start and GPS_car
mycursor.execute(sql, (inp_date1, ))
result = mycursor.fetchall()
for orders in result:
# approximate radius of earth in km
R = 6373.0
lat1 = radians(orders[1])
lon1 = radians(orders[2])
lat2 = radians(orders[5])
lon2 = radians(orders[6])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance += R * c
answer += str(R*c) + "\n"
day_count += 1
if counter == 0:
answer = "No orders after this date"
else:
answer += (str((float)(duration)/(float)(counter)) + "\n")
answer += str(distance/day_count)
# returning the result
return answer
# sixth query
def query6():
answer = ""
mycursor.execute("SELECT * FROM CC_Order WHERE Order_time BETWEEN '07:00:00' AND '10:00:00'")
myresult = mycursor.fetchall()
morning_pick_up = {}
morning_dest = {}
for result in myresult:
if (result[1], result[2]) in morning_pick_up:
morning_pick_up[(result[1], result[2])] += 1
else:
morning_pick_up[(result[1], result[2])] = 1
if (result[3], result[4]) in morning_dest:
morning_dest[(result[3], result[4])] += 1
else:
morning_dest[(result[3], result[4])] = 1
mycursor.execute("SELECT * FROM CC_Order WHERE Order_time BETWEEN '12:00:00' AND '14:00:00'")
myresult = mycursor.fetchall()
afternoon_pick_up = {}
afternoon_dest = {}
for result in myresult:
if (result[1], result[2]) in afternoon_pick_up:
afternoon_pick_up[(result[1], result[2])] += 1
else:
afternoon_pick_up[(result[1], result[2])] = 1
if (result[3], result[4]) in afternoon_dest:
afternoon_dest[(result[3], result[4])] += 1
else:
afternoon_dest[(result[3], result[4])] = 1
mycursor.execute("SELECT * FROM CC_Order WHERE Order_time BETWEEN '17:00:00' AND '19:00:00'")
myresult = mycursor.fetchall()
evening_pick_up = {}
evening_dest = {}
for result in myresult:
if (result[1], result[2]) in evening_pick_up:
evening_pick_up[(result[1], result[2])] += 1
else:
evening_pick_up[(result[1], result[2])] = 1
if (result[3], result[4]) in evening_dest:
evening_dest[(result[3], result[4])] += 1
else:
evening_dest[(result[3], result[4])] = 1
morning1 = sorted(morning_pick_up.items(), key=operator.itemgetter(1))
morning2 = sorted(morning_dest.items(), key=operator.itemgetter(1))
afternoon1 = sorted(afternoon_pick_up.items(), key=operator.itemgetter(1))
afternoon2 = sorted(afternoon_dest.items(), key=operator.itemgetter(1))
evening1 = sorted(evening_pick_up.items(), key=operator.itemgetter(1))
evening2 = sorted(evening_dest.items(),key=operator.itemgetter(1))
answer += (str(morning1[len(morning_pick_up) - 1][0]) + "\n")
answer += (str(morning2[len(morning_dest) - 1][0]) + "\n")
answer += (str(afternoon1[len(afternoon_pick_up) - 1][0]) + "\n")
answer += (str(afternoon2[len(afternoon_dest) - 1][0]) + "\n")
answer += (str(evening1[len(evening_pick_up) - 1][0]) + "\n")
answer += (str(evening2[len(evening_dest) - 1][0]) + "\n")
# returning the result
return answer
# seventh query
def query7():
answer = ""
mycursor.execute("SELECT * FROM Car")
cars = {}
allcars = mycursor.fetchall()
for i in allcars:
cars[i[0]] = 0
for n in range(93):
last3 = "SELECT * FROM Rent WHERE Start_date = %s"
N_days_ago = now - timedelta(days=n)
N_days_ago = N_days_ago.strftime("%d-%m-%Y")
mycursor.execute(last3, (N_days_ago, ))
result = mycursor.fetchall()
for j in result:
cars[j[1]] += 1
sorted_cars = sorted(cars.items(), key=operator.itemgetter(1))
to_trash = math.ceil((float)(len(allcars))/10)
answer += ("All cars with orders: " + str(dict(sorted_cars)) + "\n")
answer += ("Cars to remove: " + str(dict(sorted_cars[0:to_trash])) + "\n")
# returning the result
return answer
# eighth query
def query8(input):
answer = ""
input = datetime.strptime(input, '%d-%m-%Y')
users = {}
us = {}
usr_set = set()
# cars
for i in range(31):
month_later = input + timedelta(days=i)
month_later = month_later.strftime("%d-%m-%Y")
sql = "SELECT * FROM Rent WHERE Start_date = %s"
mycursor.execute(sql,(month_later, ))
result = mycursor.fetchall()
for n in result:
usr_set.add(n[0])
for j in usr_set:
users[j] = list()
for n in result:
if n[0] in us:
us[n[0]].append((n[1], n[2]))
else:
us[n[0]] = list()
us[n[0]].append((n[1], n[2]))
ans = {}
for name in us:
trips = us[name]
for trip in trips:
sql = "SELECT * FROM Charge WHERE CID = %s AND Date = %s"
mycursor.execute(sql, (trip[0], trip[1], ))
cnt = 0
myresult = mycursor.fetchall()
for ii in myresult:
cnt += 1
if name in ans:
ans[name] += cnt
else:
ans[name] = cnt
# writing the result
answer = str(ans)
# returning the result
return answer
def earlier(s1, s2):
if s1[6:10] < s2[6:10]:
return True
if s1[6:10] > s2[6:10]:
return False
if s1[3:5] < s2[3:5]:
return True
if s1[3:5] > s2[3:5]:
return False
if s1[0:2] < s2[0:2]:
return True
else:
return False
# ninth query
def query9():
answer = ""
ans = {}
first_date = ""
mycursor.execute("SELECT WID FROM Workshop")
myresult = mycursor.fetchall()
for wid1 in myresult:
wid = wid1[0]
sql = "SELECT * FROM PW_Order WHERE WID = %s"
mycursor.execute(sql,(wid, ))
result = mycursor.fetchall()
details = {}
for res in result:
if res[3] in details:
details[res[3]] += 1
else:
details[res[3]] = 1
if len(first_date) == 0:
first_date = res[0]
else:
if earlier(res[0], first_date):
first_date = res[0]
d = len(details)
if (d != 0):
ans[wid] = sorted(details.items(), key=operator.itemgetter(1))[d - 1]
first_date = datetime.strptime(first_date, '%d-%m-%Y')
days = now - first_date
days = int(str(days).split(" ")[0])
weeks = math.ceil(days / 7.0)
for item in ans:
i = ans[item]
num = math.ceil((float)(i[1]) / (float)(weeks))
answer += ("Workshop № " + str(item) + " most often requires " + i[0] +
" (about " + str(num) + " every week on average)." + "\n")
# returning the result
return answer
# tenth query
def query10():
answer = ""
mycursor.execute("SELECT * FROM Repair")
result = mycursor.fetchall()
car_type = {}
min_date = "01-01-2100"
min_date = datetime.strptime(min_date, '%d-%m-%Y')
for i in result:
car_type[i[3]] = 0
for i in result:
cur_date = i[5]
cur_date = datetime.strptime(cur_date, '%d-%m-%Y')
min_date = min(min_date, cur_date)
car_type[i[3]] += i[4]
new_date = now - min_date
new_date = str(new_date)
new_date = new_date.split(" ")
sorted_models = sorted(car_type.items(), key=operator.itemgetter(1))
sorted_models = sorted_models[len(car_type) - 1]
answer += ("The most expensive model: " + str(sorted_models[0]) + "\n")
answer += ("Average(per day) cost of repairs: " + str(sorted_models[1] / (int)(new_date[0])) + "\n")
# returning the result
return answer
| true |
e47bdb0ffcee09099b82a4bcb0212c03359c7e5c | Python | gregneat/T21 | /PythonCurriculum/Python/26. Python Graphics - New Waldo/Waldo.py | UTF-8 | 2,422 | 2.90625 | 3 | [] | no_license | from graphics import *;
from random import *;
class Waldo:
skinColor = color_rgb( 255, 194, 166 );
brownColor = color_rgb( 128, 64, 0 );
def __init__(self,point):
self.point = point;
x = point.getX();
y = point.getY();
self.head = Rectangle(point,Point(x+15,y+15));
self.head.setFill(self.skinColor);
hatP = [Point(x,y), Point(x+6,y-12), Point(x+9,y-12), Point(x+15,y)];
self.hat = Polygon(hatP);
self.hat.setFill("red");
self.poof = Circle(Point(x+7, y-15), 6);
self.poof.setFill("white");
self.Eyes = Rectangle( Point(x+2, y+3), Point(x+13, y+7));
self.Eyes.setFill("white");
self.PupilOne = Rectangle(Point(x+4, y+4), Point(x+6, y+6));
self.PupilTwo = Rectangle(Point(x+9, y+4), Point(x+11, y+6));
self.thing1 = Rectangle(Point(x, y+3), Point(x+2, y+5));
self.thing2 = Rectangle(Point(x+13, y+3), Point(x+15, y+5));
self.mouth1 = Line(Point(x+4, y+13), Point(x+9, y+13));
self.mouth2 = Line(Point(x+9, y+13), Point(x+12, y+12));
###########################################################
def draw(self,canvas):
self.hat.draw(canvas);
self.head.draw(canvas);
self.poof.draw(canvas);
self.Eyes.draw(canvas);
self.PupilOne.draw(canvas);
self.PupilTwo.draw(canvas);
self.thing1.draw(canvas);
self.thing2.draw(canvas);
self.mouth1.draw(canvas);
self.mouth2.draw(canvas);
def setFill(self,color):
self.hat.setFill(color);
def move(self,dx,dy):
self.point.x = self.point.x + dx;
self.point.y = self.point.y + dy;
self.hat.move(dx,dy);
self.head.move(dx,dy);
self.poof.move(dx,dy);
self.Eyes.move(dx,dy);
self.PupilOne.move(dx,dy);
self.PupilTwo.move(dx,dy);
self.thing1.move(dx,dy);
self.thing2.move(dx,dy);
self.mouth1.move(dx,dy);
self.mouth2.move(dx,dy);
def moveTo(self,x,y):
self.undraw();
self.__init__(Point(x,y));
def undraw(self):
self.hat.undraw();
self.head.undraw();
self.poof.undraw();
self.Eyes.undraw();
self.PupilOne.undraw();
self.PupilTwo.undraw();
self.thing1.undraw();
self.thing2.undraw();
self.mouth1.undraw();
self.mouth2.undraw();
def getX(self):
return self.point.x;
def getY(self):
return self.point.y;
def contains(self,p):
leftBound = self.point.x;
rightBound = self.point.x + 15;
upBound = self.point.y - 21;
lowBound = self.point.y + 15;
if(p.x >= leftBound and p.x <= rightBound and p.y >= upBound and p.y <= lowBound):
return True;
else:
return False;
| true |
5b09781f38fa824873f688fc3756479c210fadd9 | Python | parthenon/TolaActivity | /indicators/tests/test_iptt_targetperiods_report.py | UTF-8 | 14,919 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | """ Functional tests for the iptt report generation view
in the 'targetperiods' view (all indicators on report are same frequency):
these classes test monthly/annual/mid-end indicators generated report ranges, values, sums, and percentages
"""
from datetime import datetime, timedelta
from iptt_sample_data import iptt_utility
from factories.indicators_models import IndicatorFactory, CollectedDataFactory, PeriodicTargetFactory
from indicators.models import Indicator, CollectedData, PeriodicTarget
class TestPeriodicTargetsBase(iptt_utility.TestIPTTTargetPeriodsReportResponseBase):
def setUp(self):
self.program = None
super(TestPeriodicTargetsBase, self).setUp()
self.indicators = []
def tearDown(self):
CollectedData.objects.all().delete()
PeriodicTarget.objects.all().delete()
Indicator.objects.all().delete()
super(TestPeriodicTargetsBase, self).tearDown()
if self.program is not None:
self.program.delete()
self.indicators = []
def set_reporting_period(self, start, end):
self.program.reporting_period_start = datetime.strptime(start, '%Y-%m-%d')
self.program.reporting_period_end = datetime.strptime(end, '%Y-%m-%d')
self.program.save()
def add_indicator(self, targets=None, values=None):
indicator = IndicatorFactory(
target_frequency=self.indicator_frequency,
program=self.program)
self.indicators.append(indicator)
self.add_periodic_targets(indicator, targets=targets, values=values)
def add_periodic_targets(self, indicator, targets=None, values=None):
current = self.program.reporting_period_start
end = self.program.reporting_period_end
count = 0
while current < end:
(next_start, period_end) = self.increment_period(current)
target = PeriodicTargetFactory(indicator=indicator, start_date=current, end_date=period_end)
if targets is not None and len(targets) > count:
target.target = targets[count]
target.save()
value = 10 if values is None else values[count]
_ = CollectedDataFactory(indicator=indicator, periodic_target=target, achieved=value,
date_collected=current)
current = next_start
count += 1
class TestMonthlyTargetPeriodsIPTTBase(TestPeriodicTargetsBase):
indicator_frequency = Indicator.MONTHLY
def increment_period(self, current):
year = current.year if current.month < 12 else current.year + 1
month = current.month + 1 if current.month < 12 else current.month - 11
next_start = datetime(year, month, current.day)
period_end = next_start - timedelta(days=1)
return (next_start, period_end)
def test_one_year_range_has_twelve_range_periods(self):
self.set_reporting_period('2017-02-01', '2018-01-31')
self.add_indicator()
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 12,
self.format_assert_message(
"expected 12 ranges for monthly indicators over a year, got {0}".format(
len(ranges))))
def test_eight_month_range_has_eight_range_periods(self):
self.set_reporting_period('2018-01-01', '2018-08-30')
self.add_indicator()
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 8,
self.format_assert_message(
"expected 8 ranges for monthly indicators over 8 mos, got {0}".format(
len(ranges))))
def test_four_month_range_reports_targets(self):
self.set_reporting_period('2017-11-01', '2018-02-28')
self.add_indicator(targets=[10, 12, 16, 14], values=[20, 11, 12, 13])
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 4,
self.format_assert_message(
"expected 4 ranges for monthly indicators over 4 mos, got {0}".format(
len(ranges))))
self.assertEqual(int(ranges[0]['target']), 10,
self.format_assert_message(
"first monthly indicator {0}\n expected 10 for target, got {1}".format(
ranges[0], ranges[0]['target'])))
self.assertEqual(int(ranges[1]['actual']), 11,
self.format_assert_message(
"second monthly indicator {0}\n expected 11 for actual, got {1}".format(
ranges[1], ranges[1]['actual'])))
self.assertEqual(ranges[2]['met'], "75%",
self.format_assert_message(
"third monthly indicator {0}\n expected 75% for met (12/16) got {1}".format(
ranges[2], ranges[2]['met'])))
def test_fifteen_month_range_cumulative_reports_targets(self):
self.set_reporting_period('2016-11-01', '2018-01-31')
self.add_indicator(targets=[100]*15, values=[15]*15)
self.indicators[0].is_cumulative = True
self.indicators[0].save()
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 15,
self.format_assert_message(
"expected 15 ranges for monthly indicators over 15 mos, got {0}".format(
len(ranges))))
self.assertEqual(int(ranges[3]['target']), 100,
self.format_assert_message(
"fourth monthly indicator {0}\n expected 100 for target, got {1}".format(
ranges[3], ranges[3]['target'])))
self.assertEqual(int(ranges[8]['actual']), 135,
self.format_assert_message(
"eigth monthly indicator {0}\n expected 135 for actual, got {1}".format(
ranges[8], ranges[8]['actual'])))
self.assertEqual(ranges[5]['met'], "90%",
self.format_assert_message(
"sixth monthly indicator {0}\n expected 90% for met (90/100) got {1}".format(
ranges[5], ranges[5]['met'])))
class TestAnnualTargetPeriodsIPTTBase(TestPeriodicTargetsBase):
indicator_frequency = Indicator.ANNUAL
def increment_period(self, current):
next_start = datetime(current.year + 1, current.month, current.day)
period_end = next_start - timedelta(days=1)
return (next_start, period_end)
def test_two_year_range_has_two_range_periods(self):
self.set_reporting_period('2016-02-01', '2018-01-31')
self.add_indicator()
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 2,
self.format_assert_message(
"expected 2 ranges for yearly indicators over two years, got {0}".format(
len(ranges))))
def test_four_and_a_half_year_range_has_five_range_periods(self):
self.set_reporting_period('2014-06-01', '2018-12-31')
self.add_indicator()
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 5,
self.format_assert_message(
"expected 5 ranges for yearly indicators over 4.5 years, got {0}".format(
len(ranges))))
def test_three_year_range_reports_targets(self):
self.set_reporting_period('2015-08-01', '2018-07-31')
self.add_indicator(targets=[1000, 500, 200], values=[800, 500, 300])
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 3,
self.format_assert_message(
"expected 3 ranges for yearly indicators over 3 yrs, got {0}".format(
len(ranges))))
self.assertEqual(int(ranges[0]['target']), 1000,
self.format_assert_message(
"first yearly indicator {0}\n expected 1000 for target, got {1}".format(
ranges[0], ranges[0]['target'])))
self.assertEqual(int(ranges[1]['actual']), 500,
self.format_assert_message(
"second yearly indicator {0}\n expected 500 for actual, got {1}".format(
ranges[1], ranges[1]['actual'])))
self.assertEqual(ranges[2]['met'], "150%",
self.format_assert_message(
"third yearly indicator {0}\n expected 150% for met (300/200) got {1}".format(
ranges[2], ranges[2]['met'])))
def test_five_year_range_cumulative_reports_targets(self):
self.set_reporting_period('2015-11-01', '2020-11-30')
self.add_indicator(targets=[100]*6, values=[30]*6)
self.indicators[0].is_cumulative = True
self.indicators[0].save()
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 6,
self.format_assert_message(
"expected 6 ranges for yearly indicators over 5 yrs 1 month, got {0}".format(
len(ranges))))
self.assertEqual(int(ranges[3]['target']), 100,
self.format_assert_message(
"fourth yearly indicator {0}\n expected 100 for target, got {1}".format(
ranges[3], ranges[3]['target'])))
self.assertEqual(int(ranges[4]['actual']), 150,
self.format_assert_message(
"fifth yearly indicator {0}\n expected 150 for actual, got {1}".format(
ranges[4], ranges[4]['actual'])))
self.assertEqual(ranges[1]['met'], "60%",
self.format_assert_message(
"second yearly indicator {0}\n expected 60% for met (60/100) got {1}".format(
ranges[1], ranges[1]['met'])))
class TestMidEndTargetPeriodsIPTTBase(TestPeriodicTargetsBase):
indicator_frequency = Indicator.MID_END
def add_periodic_targets(self, indicator, targets=None, values=None):
assert targets is None or len(targets) == 2, "targets should be a tuple of two, midline and endline"
assert values is None or len(values) == 2, "values should be two tuples, midline and endline"
if targets is None:
target = PeriodicTargetFactory(indicator=indicator, period=PeriodicTarget.MIDLINE, customsort=0)
_ = CollectedDataFactory(indicator=indicator, periodic_target=target)
target = PeriodicTargetFactory(indicator=indicator, period=PeriodicTarget.ENDLINE, customsort=1)
_ = CollectedDataFactory(indicator=indicator, periodic_target=target)
return
for c, (target, (customsort, target_type)) in enumerate(
zip(targets, [(0, PeriodicTarget.MIDLINE), (1, PeriodicTarget.ENDLINE)])
):
target = PeriodicTargetFactory(indicator=indicator, period=target_type,
target=target, customsort=customsort)
for v in values[c] if values is not None else [10]:
_ = CollectedDataFactory(indicator=indicator, periodic_target=target, achieved=v)
def test_bare_mid_end_has_two_range_periods(self):
self.set_reporting_period('2016-02-01', '2018-01-31')
self.add_indicator()
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 2,
self.format_assert_message(
"expected 2 ranges for bare mid/end indicators, got {0}".format(
len(ranges))))
def test_mid_end_reports_targets(self):
self.set_reporting_period('2015-08-01', '2018-07-31')
self.add_indicator(targets=[1000, 200], values=[[800,], [500,]])
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 2,
self.format_assert_message(
"expected 2 ranges for mid-end indicators, got {0}".format(
len(ranges))))
self.assertEqual(int(ranges[0]['target']), 1000,
self.format_assert_message(
"single mid indicator {0}\n expected 1000 for target, got {1}".format(
ranges[0], ranges[0]['target'])))
self.assertEqual(int(ranges[1]['actual']), 500,
self.format_assert_message(
"single end indicator {0}\n expected 500 for actual, got {1}".format(
ranges[1], ranges[1]['actual'])))
self.assertEqual(ranges[0]['met'], "80%",
self.format_assert_message(
"single mid indicator {0}\n expected 80% for met (800/1000) got {1}".format(
ranges[0], ranges[0]['met'])))
def test_mid_end_multiple_indicatorsreports_targets(self):
self.set_reporting_period('2015-08-01', '2018-07-31')
self.add_indicator(targets=[1600, 1000], values=[[800, 200], [500, 500]])
ranges = self.get_response().indicators[0]['ranges'][1:]
self.assertEqual(len(ranges), 2,
self.format_assert_message(
"expected 2 ranges for mid-end indicators, got {0}".format(
len(ranges))))
self.assertEqual(int(ranges[0]['target']), 1600,
self.format_assert_message(
"single mid indicator {0}\n expected 1600 for target, got {1}".format(
ranges[0], ranges[0]['target'])))
self.assertEqual(int(ranges[1]['actual']), 1000,
self.format_assert_message(
"single end indicator {0}\n expected 1000 for actual, got {1}".format(
ranges[1], ranges[1]['actual'])))
self.assertEqual(ranges[0]['met'], "63%",
self.format_assert_message(
"single mid indicator {0}\n expected 63% for met (1000/1600 rounded) got {1}".format(
ranges[0], ranges[0]['met'])))
| true |
8aa6a5cd147f18c4aa8d00e9d50583e14e15e88e | Python | sohskd/mdp14rpi | /All communication/bt_communication.py | UTF-8 | 2,606 | 2.625 | 3 | [] | no_license | from bluetooth import *
from signalling import *
__author__ = 'Aung Naing Oo'
class BluetoothAPI(object):
def __init__(self):
"""
Connect to Galaxy s5 bluetooth
RFCOMM port: 7
MAC address: no need
"""
self.server_socket = None
self.client_socket = None
self.bt_is_connected = False
self.signalObject = SignallingApi()
def close_bt_socket(self):
"""
Close socket connections
"""
if self.client_socket:
self.client_socket.close()
print ("Closing client socket")
if self.server_socket:
self.server_socket.close()
print ("Closing server socket")
self.bt_is_connected = False
def bt_is_connect(self):
"""
Check status of Bluetooth connection
"""
return self.bt_is_connected
def connect_bluetooth(self):
"""
Connect to the s5
"""
# Creating the server socket and bind to port
btport = 1
try:
self.signalObject.signalling()
self.signalObject.signalTime(100) #wait for 5 seconds before timeout
self.server_socket = BluetoothSocket(RFCOMM)
self.server_socket.bind(("", btport))
self.server_socket.listen(1) # Listen for requests
self.port = self.server_socket.getsockname()[1]
uuid = "00001101-0000-1000-8000-00805f9b34fb"
advertise_service( self.server_socket, "BluetoothServer",
service_id = uuid,
service_classes = [ uuid, SERIAL_PORT_CLASS ],
profiles = [ SERIAL_PORT_PROFILE ],
)
print ("listening for requests...")
print ("Waiting for connection on RFCOMM channel %d" % self.port)
# Accept requests
self.client_socket, client_address = self.server_socket.accept()
print ("Accepted connection from ", client_address)
self.bt_is_connected = True
self.signalObject.signalTime(0) #disarm the signal
except Exception, e:
print ("Error: %s" %str(e))
print ("Bluetooth Connection can't be established")
# self.close_bt_socket()
pass #let it go through
def write_to_bt(self,message):
"""
Write message to s5
"""
#print "Enter message to send: "
#message = raw_input()
try:
self.client_socket.send(str(message))
#print "sending: ", message
except BluetoothError:
print ("Bluetooth Error. Connection reset by peer")
self.connect_bluetooth() # Reestablish connection
#print "quit write()"
def read_from_bt(self):
"""
Read incoming message from Nexus
"""
try:
msg = self.client_socket.recv(2048)
#print "Received: %s " % msg
return msg
except BluetoothError:
print ("Bluetooth Error. Connection reset by peer. Trying to connect...")
self.connect_bluetooth() # Reestablish connection
| true |
e5cd32adce1d17aab25708e58966b1fa11b945f3 | Python | mgh3326/programmers_algorithm | /KAKAO BLIND RECRUITMENT/2019/기둥과 보 설치/main.py | UTF-8 | 3,753 | 2.609375 | 3 | [] | no_license | def solution(n, build_frame):
answer = []
board_list = [[list() for _ in range(n + 1)] for _ in range(n + 1)]
for x, y, a, b in build_frame:
y = n - y
if a == 0: # 기둥
if b == 1: # 설치
if y == n or (0 in board_list[y + 1][x]) or 1 in board_list[y][x] or 1 in board_list[y][x - 1]: # 끝
board_list[y][x].append(0)
else: # 삭제
is_ok = True
if 0 in board_list[y - 1][x]: # 위에 기둥이 있을 때
# 아래 보가 있으면 되겠다
if 1 in board_list[y - 1][x] or 1 in board_list[y - 1][x - 1]:
pass
else:
is_ok = False
if 1 in board_list[y - 1][x]: # 위에 보가 있을 때
# 왼쪽 아래 기둥 있을때 혹은 양쪽이 보 연결
if (1 in board_list[y - 1][x - 1] and 1 in board_list[y - 1][x + 1]) or 0 in board_list[y][x + 1]:
pass
else:
is_ok = False
if 1 in board_list[y - 1][x - 1]: # 왼쪽 위에 보가 있을 때
# 오른쪽 아래 기둥 있을때 혹은 양쪽 보 연결
if (1 in board_list[y - 1][x - 2] and 1 in board_list[y - 1][x]) or 0 in board_list[y][x - 1]:
pass
else:
is_ok = False
if is_ok:
board_list[y][x].remove(0)
else: # 보
if b == 1: # 설치
if 0 in board_list[y + 1][x] or 0 in board_list[y + 1][x + 1] or (
1 in board_list[y][x - 1] and 1 in board_list[y][x + 1]): # 끝
board_list[y][x].append(1)
else: # 삭제
is_ok = True
if 0 in board_list[y][x]: # 본 위치에 기둥이 있을 때
# 왼쪽에 보가 있으면 되겠다
if 1 in board_list[y][x - 1]:
pass
else:
is_ok = False
if 0 in board_list[y][x + 1]: # 오른쪽에 기둥이 있을 때
# 오른쪽에 보가 있으면 되겠다
if 1 in board_list[y][x + 1]:
pass
else:
is_ok = False
if 1 in board_list[y][x - 1]: # 왼쪽 보가 있을 때
# (기둥이 있으면 되겠다.) 왼쪽 보 아래 본래 보 아래
if 0 in board_list[y][x] or 0 in board_list[y][x - 1]:
pass
else:
is_ok = False
if 1 in board_list[y][x + 1]: # 오른쪽 보가 있을 때
# (기둥이 있으면 되겠다.) 오른쪽 보, 오른쪽 오른쪽 보
if 0 in board_list[y][x + 2] or 0 in board_list[y][x + 1]:
pass
else:
is_ok = False
if is_ok:
board_list[y][x].remove(1)
for w in range(n + 1):
for _h in range(n + 1):
h = n - _h
print(w,h)
if len(board_list[h][w]) == 1:
answer.append([w, _h, board_list[h][w][0]])
elif len(board_list[h][w]) == 2:
answer.append([w, _h, 0])
answer.append([w, _h, 1])
return answer
print(
solution(
5,
[[1, 0, 0, 1], [1, 1, 1, 1], [2, 1, 0, 1], [2, 2, 1, 1], [5, 0, 0, 1], [5, 1, 0, 1], [4, 2, 1, 1], [3, 2, 1, 1]]
)
)
| true |