text stringlengths 8 6.05M |
|---|
# Generated by Django 2.0.5 on 2018-06-25 18:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calculation', '0029_auto_20180621_1142'),
]
operations = [
migrations.AddField(
model_name='product',
name='weight',
field=models.PositiveIntegerField(default=0, null=True, verbose_name='вес в килограммах одной единицы'),
),
]
|
from django.shortcuts import render,redirect
from owner import forms
from owner.models import owner
def Num_to_str(request):
if request.method=="GET":
form =forms.NumtostringForm(initial={})
# form=BookForm()
context={}
context["form"]=form
return render (request,"num_to_str_converter.html",context)
if request.method == "POST":
form=forms.NumtostringForm(request.POST,request.FILES)
if form.is_valid():
form.save()
return redirect("Numtostrconverter")
else:
return render(request,"num_to_str_converter.html",{"form":form})
|
from flask import Flask, request
from flask_restful import Resource, Api
from captura_de_informacoes import getTitulos
app = Flask(__name__)
api = Api(app)
class G1Titulos(Resource):
def get(self):
url= "https://g1.globo.com/"
titulos = getTitulos(url)
return geraResponse("Busca bem sucedida", "Resultado encontrado", "Titulos", titulos)
class GETitulos(Resource):
def get(self):
url= "https://ge.globo.com/"
titulos = getTitulos(url)
return geraResponse("Busca bem sucedida", "Resultado encontrado", "Titulos", titulos)
def geraResponse(status, mensagem, nome_do_conteudo=False, conteudo=False):
response = {}
response["status"] = status
response["mensagem"] = mensagem
if(nome_do_conteudo and conteudo):
response[nome_do_conteudo] = conteudo
response = app.make_response(response)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET'
response.headers['Access-Control-Allow-Headers'] = '*'
return response
api.add_resource(G1Titulos, '/noticias/geral')
api.add_resource(GETitulos, '/noticias/esportes')
if __name__ == '__main__':
app.run() |
'''
This is just a SampleServer that we used for testing purposes
while writing the client script.
This version only prints the incomming data for debugging purposes and doesn't do anything 'real' with it.
A version, similar to this, is implemented on our django server
that constantly runs in the background, listening for incoming data on port 10000
and then sends that data the the graphs by using django channels.
'''
from threading import Thread, Lock, active_count
import time
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('', 10000)
sock.bind(server_address)
sock.listen(5)
mutex_print = Lock()
def thread_client_socket(conn, addr):
mutex_print.acquire()
try:
print(str(addr)+' connected')
finally:
mutex_print.release()
while True:
try:
while True:
data = conn.recv(16)
mutex_print.acquire()
try:
received_data = str(data.decode())
#print(str(addr)+':'+str(received_data)) #Uncomment to print raw data with header
#Print to show that it's possible to check the 'header'
#and send the data to the apropriate destination
#First, get session id's by checking data header
if(str(received_data)[0:2] == 's1'):
print('Session id_1 = '+str(received_data)[1:])
elif(str(received_data)[0:2] == 's2'):
print('Session id_2 = '+str(received_data)[1:])
elif(str(received_data)[0:2] == 's3'):
print('Session id_3 = '+str(received_data)[1:])
#Get data for each graph, x,y,z, by checking received data header
elif(str(received_data)[0] == 'x'):
print('Graph X: = '+str(received_data)[1:])
elif(str(received_data)[0] == 'y'):
print('Graph Y: = '+str(received_data)[1:])
elif(str(received_data)[0] == 'z'):
print('Graph Z: = '+str(received_data)[1:])
finally:
mutex_print.release()
if not data:
break
finally:
conn.close()
break
mutex_print.acquire()
try:
print(str(addr)+' disconnected')
finally:
mutex_print.release()
while True:
connection, client_address = sock.accept()#stalling
thread = Thread(target = thread_client_socket, args = (connection, client_address))
thread.start()
mutex_print.acquire()
try:
print('number of threads: '+str(active_count()))
finally:
mutex_print.release()
|
# Generated by Django 3.0.3 on 2020-03-03 08:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_lesson_teacher'),
]
operations = [
migrations.AddField(
model_name='student',
name='interests',
field=models.ManyToManyField(to='myapp.Subject'),
),
migrations.AddField(
model_name='teacher',
name='preference',
field=models.ManyToManyField(to='myapp.Lesson'),
),
]
|
import ccxt
import ta
import config
import schedule
from ta.volatility import BollingerBands, AverageTrueRange
import pandas as pd
exchange = ccxt.binance({
'apiKey':config.API_KEY,
'secret':config.API_SECRET
})
# markets = exchange.load_markets()
bars = exchange.fetch_ohlcv('BTC/USDT',limit=100)
df = pd.DataFrame(bars[:-1], columns=['timestamp','open','high','low','close','volume'])
print(df)
bb_indicator=BollingerBands(df['close'])
df['upper_band'] = bb_indicator.bollinger_hband()
df['lower_band'] = bb_indicator.bollinger_lband()
df['moving_avg'] = bb_indicator.bollinger_mavg()
atr_indicator = AverageTrueRange(df['high'],df['low'],df['close'])
df['atr'] = atr_indicator.average_true_range()
df
|
from numpy import genfromtxt
import numpy as np
from PIL import Image
weight,height = 960,540
canvas = np.zeros((height,weight,3), dtype=np.uint8)
image = genfromtxt('DS2.txt',dtype='int')
for i in range(540):
for j in range(960):
canvas[i][j] = [255,255,255]
for i in range(34096):
for j in range(2):
if(j==0):
x = image[i][j]
elif(j == 1):
y = image[i][j]
canvas[x][y] = [0,0,0]
a = Image.fromarray(canvas,'RGB')
a.save('Lab№2.jpg')
a.show() |
from . import views
from django.conf.urls import url
from .views import userprofile,taskstatus
urlpatterns=[
url(r'^profile', userprofile,name='profile'),
url(r'^statuschanger/(.*)$', taskstatus,name='statuschanger'),
url(r'^$',views.homepage,name='homepage'),]
|
def filter_lucky(lst):
return [i for i in lst if '7' in str(i)]
'''
Write a function filterLucky/filter_lucky() that accepts a list of integers and
filters the list to only include the elements that contain the digit 7.
For example,
ghci> filterLucky [1,2,3,4,5,6,7,68,69,70,15,17]
[7,70,17]
Don't worry about bad input, you will always receive a finite list of integers
'''
|
#!/usr/bin/python
import sys
from bot_trust import *
if len(sys.argv) == 1:
filename = "sample.txt"
else:
filename = sys.argv[1]
case_list = get_data(filename)
i = 0
for case in case_list:
i += 1
step_count = process_case(case)
print('Case #{0}: {1}'.format(i, step_count)) |
apple num = 1
print(apple num)
# 檔名: exercise0402.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
rule count_matrix:
input:
expand("outData/htseq/{sample}_CountNum.txt",sample=SAMPLES)
output:
"outData/counts/all.csv"
params:
units=units
script:
"../scripts/count-matrix.py"
def get_deseq2_threads(wildcards=None):
# https://twitter.com/mikelove/status/918770188568363008
few_coeffs = False if wildcards is None else len(get_contrast(wildcards)) < 10
return 1 if len(samples) < 100 or few_coeffs else 6
rule deseq2_init:
input:
countTab="outData/counts/all.csv",
colData=config["diffexp"]["colData"]
output:
rds="outData/deseq2/all.rds",
countMatrix="outData/deseq2/norm_count.matrix.txt"
conda:
"../envs/deseq2.yaml"
log:
"logs/deseq2/init.log"
threads: get_deseq2_threads()
script:
"../scripts/deseq2-init.R"
rule heatmap_cor_plot:
input:
colData=config["diffexp"]["colData"],
countMatrix="outData/deseq2/norm_count.matrix.txt"
output:
heatmap_cor_plot=report("outData/deseq2/heatmap_cor.png")
conda:
"../envs/deseq2.yaml"
log:
"logs/deseq2/heatmap_cor_plot.log"
script:
"../scripts/heatmap_cor_plot.R"
rule heatmap_plot:
input:
colData=config["diffexp"]["colData"],
rds="outData/deseq2/all.rds"
output:
heatmap_plot=report("outData/deseq2/heatmap.png")
conda:
"../envs/deseq2.yaml"
log:
"logs/deseq2/heatmap_plot.log"
script:
"../scripts/heatmap_plot.R"
def get_contrast(wildcards):
return config["diffexp"]["contrasts"][wildcards.contrast]
rule deseq2:
input:
rds="outData/deseq2/all.rds"
output:
all_tab=report("outData/deseq2/{contrast}_all_genes_exprData.txt", "../report/diffexp.rst"),
sig_tab=report("outData/deseq2/{contrast}_sig_genes_exprData.txt", "../report/diffexp.rst"),
plot=report("outData/deseq2/{contrast}_volcano_plot.png")
params:
contrast=get_contrast
conda:
"../envs/deseq2.yaml"
log:
"logs/deseq2/{contrast}.diffexp.log"
threads: get_deseq2_threads
script:
"../scripts/diffexp.R"
|
import os
import random
import time
# default seed, wait in between for different seed
os.system('g++ -g -O2 -std=gnu++17 -static simple.cpp -o output/random1.exe')
time.sleep(1)
os.system('g++ -g -O2 -std=gnu++17 -static simple.cpp -o output/random2.exe')
time.sleep(1)
os.system('g++ -g -O2 -std=gnu++17 -static simple.cpp -o output/random3.exe')
# fixed seed
os.system('g++ -g -O2 -std=gnu++17 -D __POLY_RANDOM_SEED__=1234567890ull -static simple.cpp -o output/fixed1.exe')
os.system('g++ -g -O2 -std=gnu++17 -D __POLY_RANDOM_SEED__=1234567890ull -static simple.cpp -o output/fixed2.exe')
os.system('g++ -g -O2 -std=gnu++17 -D __POLY_RANDOM_SEED__=1234567890ull -static simple.cpp -o output/fixed3.exe')
# external seed
os.system('g++ -g -O2 -std=gnu++17 -D __POLY_RANDOM_SEED__=' + str(random.randrange(18446744073709551615)) + 'ull -static simple.cpp -o output/seeded1.exe')
os.system('g++ -g -O2 -std=gnu++17 -D __POLY_RANDOM_SEED__=' + str(random.randrange(18446744073709551615)) + 'ull -static simple.cpp -o output/seeded2.exe')
os.system('g++ -g -O2 -std=gnu++17 -D __POLY_RANDOM_SEED__=' + str(random.randrange(18446744073709551615)) + 'ull -static simple.cpp -o output/seeded3.exe')
# different types
os.system('g++ -g -O2 -std=gnu++17 -static types.cpp -o output/types.exe') |
# -*- coding: utf-8 -*-
class Solution:
def repeatedSubstringPattern(self, s):
length = len(s)
for i in range(1, length // 2 + 1):
if length % i == 0 and s[:i] * (length // i) == s:
return True
return False
if __name__ == "__main__":
solution = Solution()
assert solution.repeatedSubstringPattern("abab")
assert not solution.repeatedSubstringPattern("aba")
assert solution.repeatedSubstringPattern("abcabcabcabc")
|
from appium.webdriver.common.touch_action import TouchAction
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from tools.get_driver import GetDriver
from tools.get_log import GetLog
import allure
log = GetLog.get_log()
class Base:
# 初始化driver
@allure.step(title="初始化驱动对象")
def __init__(self):
allure.attach("获取driver对象:", "{}".format(GetDriver.get_driver()))
log.info("获取driver对象{}".format(GetDriver.get_driver()))
self.driver = GetDriver.get_driver()
# 定位元素
@allure.step(title="定位元素操作")
def base_find_element(self, loc, timeout=10, poll=0.5):
allure.attach("查找的元素:", "{}".format(loc))
log.info("正在查找元素:{} 超时时间:{} 访问评率:{}".format(loc, timeout, poll))
return (WebDriverWait(self.driver, timeout=timeout, poll_frequency=poll)
.until(lambda x: x.find_element(*loc)))
# 定位一组元素
@allure.step(title="定位一组元素操作")
def base_find_elements(self, loc, timeout=10, poll=0.5):
allure.attach("查找的一组元素:", "{}".format(loc))
log.info("正在查找一组元素:{} 超时时间:{} 访问评率:{}".format(loc, timeout, poll))
return (WebDriverWait(self.driver, timeout=timeout, poll_frequency=poll)
.until(lambda x: x.find_elements(*loc)))
# 点击定位一组元素
@allure.step(title="定位一组元素点击元素操作")
def base_click(self, loc, num):
allure.attach("点击的元素:", "{}".format(loc))
log.info("点击元素:{}".format(loc))
elements = self.base_find_elements(loc)
elements[num].click()
# 点击
@allure.step(title="点击元素操作")
def base_click_func(self, loc):
allure.attach("点击的元素:", "{}".format(loc))
log.info("点击元素:{}".format(loc))
element = self.base_find_element(loc)
element.click()
# 输入
@allure.step(title="输入的操作")
def base_input_func(self, loc, value):
allure.attach("向{}元素输入".format(loc), "输入的内容".format(value))
log.info("向{}元素输入".format(loc), "输入的内容".format(value))
element = self.base_find_element(loc)
element.clear()
element.send_keys(value)
# 获取元素文本
@allure.step(title="获取元素文本操作")
def base_get_text(self, loc):
allure.attach("获取元素文本", "{}".format(loc))
log.info("获取元素文本{}".format(loc))
return self.base_find_element(loc).text
# 获取toast消息
@allure.step(title="获取toast消息操作")
def base_get_toast(self, msg):
loc = By.XPATH, "//*[contains(@text,'{}')]".format(msg)
allure.attach(
"获取{}元素toast消息".format(self.base_find_element(loc), "值为{}".format(self.base_find_element(loc).text)))
log.info("获取{}元素toast消息".format(loc))
return self.base_find_element(loc, timeout=5, poll=0.2).text
# 拖拽
@allure.step(title="拖拽操作")
def base_drag_and_drop(self, loc1, loc2):
start_el = self.base_find_element(loc1)
end_el = self.base_find_element(loc2)
self.driver.drag_and_drop(start_el, end_el)
# 轻敲
@allure.step(title="轻敲元素操作")
def base_tap(self, loc):
action = TouchAction(self.driver)
action.tap(self.base_find_element(loc))
action.perform()
@allure.step(title="以坐标轻敲元素操作")
def base_tap_xy(self, x, y):
allure.attach("x={}".format(x), "y={}".format(y))
log.info("应用坐标点击元素, x={}, y={}".format(x, y))
action = TouchAction(self.driver)
action.tap(element=None, x=x, y=y)
action.perform()
@allure.step(title="以文本点击元素操作")
def base_click_text(self, text):
loc = By.XPATH, "//*[contains(@text, '{}')]".format(text)
self.base_click_func(loc)
@allure.step(title="获取一组以文本点击元素操作")
def base_texts_click(self, text, num=0):
loc = By.XPATH, "//*[contains(@text, '{}')]".format(text)
self.base_click_func(self.base_find_elements(loc)[num])
# 获取一组元素的文本
def base_get_list_text(self, loc):
return [element.text for element in self.base_find_elements(loc)]
|
# Twitter
CONSUMER_KEY = 'consumer_key'
CONSUMER_SECRET = 'consumer_secret'
ACCESS_TOKEN = 'access_token'
ACCESS_TOKEN_SECRET = 'access_token_secret'
MAX_TWI_CHARACTERS = 280
MAX_TWI_PHOTOS = 4
TWI_URL = 'twitter.com/SOME_TWITTER_ACCOUNT'
# RabbitMQ
RABBIT_HOST = 'localhost'
RABBIT_AMQP_PORT = '5672'
RABBIT_LOGIN = 'broadcaster'
RABBIT_PASSWORD = 'broadcaster'
RABBIT_AMQP_ADDRESS = \
f'amqp://{RABBIT_LOGIN}:{RABBIT_PASSWORD}@{RABBIT_HOST}:{RABBIT_AMQP_PORT}'
BROADCAST_QUEUE = 'broadcast'
TEMP_FILES_PATH = '/tmp/temp_files_parkun'
PERSONAL_FOLDER = 'broadcaster'
# VK
VK_APP_ID = 'vk_app_id'
VK_GROUP_ID = 'vk_group_id'
VK_API_TOKEN = 'vk_api_token'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
from bibpdf import database
from bibpdf.file_object import PdfFile, CommentFile, PdfTempFile, BibTempFile
from bibpdf.formatters import simple_format, misc, bibtex_format, file_name_format
path = os.path
class Action(object):
def __init__(self, func, arguments=list(), optional_arguments=None):
self.func = func
self.arguments = arguments
self.optional_arguments = optional_arguments
def execute(self):
if self.optional_arguments:
self.func(*self.arguments, **self.optional_arguments)
else:
self.func(*self.arguments)
def store_paper(args):
new_keyword = {x.strip() for x in ' '.join(args.keyword).split(',')} if args.keyword else set()
from bibpdf.readers import bibtex_read
bib_file = BibTempFile()
entry = bibtex_read.read(bib_file.open()).entries[0]
entry['keyword'] = entry['keyword'] | new_keyword if 'keyword' in entry else new_keyword
db = database.Database()
print(simple_format.apply(entry))
try:
temp_pdf_file = PdfTempFile()
print('\tFile: {0}'.format(temp_pdf_file.file_name))
except IOError:
temp_pdf_file = None
print('\tFile: None')
if input('(a)abort, (c)continue?') != 'c':
print("aborted")
return
actions = list()
entry['ID'] = misc.get_id(entry)
while entry['ID'] in db:
old_id = entry['ID']
old_entry = db[old_id]
print('citation conflict!')
print(simple_format.apply(old_entry))
choice = input('(a)abort, (u)update entry, Input new citation?')
if choice == 'a':
print('aborted')
return
elif choice == 'u':
break
else:
entry['ID'] = choice
for position in ('author', 'editor'):
if position in entry:
for idx, person in enumerate(entry[position]):
person_list = db.search_author(person[0], False)
first_names = {x[1] for x in person_list}
if not person_list or any(person[1] == name for name in first_names):
continue
print(("Who's this author? ({0[0]}, {0[1]})".format(person)))
for idx2, old_person in enumerate(person_list):
print(('{0}. {1}, {2}'.format(idx2, old_person[0].title(), old_person[1].title())))
choice = input("(a)abort, or type 'number,new_name'\n").lower().strip()
new_name = None
if ',' in choice:
choice, new_name = [a.strip() for a in choice.split(',', maxsplit=2)]
if choice == 'a':
print('aborted')
return
elif choice == 'n': # use new author
person[1] = new_name if new_name else person[1]
else:
number = int(choice)
if new_name:
person[1] = new_name
actions.append(Action(db.update_person, [person_list[number], person]))
else:
person[1] = person_list[number][1]
for action in actions:
action.execute()
if temp_pdf_file:
if 'pdf_file' not in entry and entry['ID'] in db and 'pdf_file' in db[entry['ID']]:
entry['pdf_file'] = db[entry['ID']]['pdf_file']
if 'pdf_file' not in entry:
file_name = file_name_format.apply(entry)
pdf_file = PdfFile(file_name, entry)
temp_pdf_file.move(pdf_file)
entry['pdf_file'] = [pdf_file.file_name]
else: # add or replace file
print("pdf_file exists!")
for idx, file_name in enumerate(entry['pdf_file']):
print(idx, file_name)
choice = input("(c)do nothing; (N) replace the Nth file; or put a short word as new file's suffix")
if choice != 'c':
try:
number = int(choice)
file_name = entry['pdf_file'][number]
pdf_file = PdfFile(file_name, entry)
temp_pdf_file.move(pdf_file)
except ValueError:
file_name = file_name_format.apply(entry, choice)
pdf_file = PdfFile(file_name, entry)
temp_pdf_file.move(pdf_file)
entry['pdf_file'].append(pdf_file.file_name)
db[entry['ID']] = entry
print('successfully inserted the following entry:')
print(simple_format.apply(entry))
def search_paper(args):
db = database.Database()
if args.author:
if args.keyword:
keyword = {x.strip() for x in ' '.join(args.keyword).split(',')}
author_list = db.search_author_keyword(args.author, keyword)
else:
author_list = db.search_author(args.author)
print(misc.author_list(author_list))
else:
keyword = {x.strip() for x in ' '.join(args.keyword).split(',')}
item_list = db.search_keyword(keyword)
print(misc.item_list(item_list))
def open_file(args):
db = database.Database()
if args.paper_id not in db:
print("{0} cannot be found in library".format(args.paper_id))
return
if not args.files:
args.files = ['pdf']
entry = db[args.paper_id]
for file_type in set(args.files):
if file_type == 'pdf':
pdf_files = [PdfFile(file_name) for file_name in entry['pdf_file']]
if len(pdf_files) == 0:
print("I don't have pdf file for {0}\n".format(args.paper_id))
else:
for file in pdf_files:
file.open()
if file_type == 'comment':
comment_files = [CommentFile(file_name) for file_name in entry['comment_file']]
if len(comment_files) == 0:
new_comment = CommentFile(entry['ID'], entry)
entry['comment_file'].append(new_comment.file_name)
new_comment.open()
db.add_file(entry['ID'], new_comment.file_name, 'comment')
else:
for file in comment_files:
file.open()
def output(args) -> str:
from bibpdf.readers import pandoc_read
db = database.Database()
if path.isfile(path.expanduser(args.source)):
item_list = [db[item_id] for item_id in pandoc_read.read(open(args.source, 'r', encoding='UTF-8'))]
elif args.source.lower() == 'all':
item_list = list(db.values())
else:
item_list = [db[item_id.strip()] for item_id in args.source.split(',')]
if args.format == 'bib':
print(bibtex_format.apply(item_list))
elif args.format == 'str':
print(simple_format.apply(item_list))
def delete_paper(args):
db = database.Database()
del db[args.paper_id]
print('{0} has been successfully deleted'.format(args.paper_id))
def modify_keyword(args):
to_add = None
to_delete = None
if args.add:
to_add = {x.strip() for x in ' '.join(args.add).split(',')}
if args.delete:
to_delete = {x.strip() for x in ' '.join(args.delete).split(',')}
db = database.Database()
if to_add or to_delete:
db.update_keyword(args.paper_id, to_add, to_delete)
entry = db[args.paper_id]
print(simple_format.apply(entry))
print('\tKeywords: {0}'.format(', '.join(entry['keyword'])))
def main():
parser = argparse.ArgumentParser("bibpdf", description="a tool to manage literature library",
epilog="citation is usually $first_author_last_name$year")
subparsers = parser.add_subparsers(help='commands')
search_parser = subparsers.add_parser('s', help='search paper')
search_parser.set_defaults(func=search_paper)
search_parser.add_argument('-a', '--author')
search_parser.add_argument('-k', '--keyword', nargs="+")
open_parser = subparsers.add_parser('o', help='open file')
open_parser.set_defaults(func=open_file)
open_parser.add_argument('paper_id')
open_parser.add_argument('-c', '--comment', dest='files', action='append_const', const='comment')
open_parser.add_argument('-p', '--pdf', dest='files', action='append_const', const='pdf')
add_parser = subparsers.add_parser('a', help='add entry')
add_parser.set_defaults(func=store_paper)
add_parser.add_argument('keyword', nargs="*", help='give a list of keyword separated by colons')
add_parser = subparsers.add_parser('d', help='delete entry')
add_parser.set_defaults(func=delete_paper)
add_parser.add_argument('paper_id')
output_parser = subparsers.add_parser('u', help='output information')
output_parser.set_defaults(func=output)
output_parser.add_argument('source', help="supply a list of paper ids or find Pandoc token file "
"to extract a minimal reference list")
output_format = output_parser.add_mutually_exclusive_group(required=True)
output_format.add_argument('-b', '--bibtex', dest="format", action='store_const', const='bib',
help='output bibtex file')
output_format.add_argument('-s', '--string', dest="format", action='store_const', const='str',
help='output a simple string')
key_parser = subparsers.add_parser('k', help='manipulate keywords')
key_parser.set_defaults(func=modify_keyword)
key_parser.add_argument('paper_id')
key_parser.add_argument('-a', '--add', nargs="+", help='keywords to add, separate by colon')
key_parser.add_argument('-d', '--delete', nargs="+", help='keywords to delete, separate by colon')
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
parser.print_help()
if __name__ == '__main__':
main()
|
import numpy as np
def onehot(labels):
n_sample = len(labels)
n_calss = max(labels)+1
onehot_labels = np.zeros((n_sample, n_calss))
onehot_labels[np.arange(n_sample), labels] = 1
return onehot_labels
if __name__ == '__main__':
labels = [1, 3, 2, 0, 6, 4]
print(onehot(labels)) |
import numpy as np
import pandas as pd
import neworder as no
from math import sqrt
import pytest
def test_errors() -> None:
df = pd.read_csv("./test/df.csv")
# base model for MC engine
model = no.Model(no.NoTimeline(), no.MonteCarlo.deterministic_identical_stream)
cats = np.array(range(4))
# identity matrix means no transitions
trans = np.identity(len(cats))
# invalid transition matrices
with pytest.raises(ValueError):
no.df.transition(model, cats, np.ones((1, 2)), df, "DC2101EW_C_ETHPUK11")
with pytest.raises(ValueError):
no.df.transition(model, cats, np.ones((1, 1)), df, "DC2101EW_C_ETHPUK11")
with pytest.raises(ValueError):
no.df.transition(model, cats, trans + 0.1, df, "DC2101EW_C_ETHPUK11")
# category data MUST be 64bit integer. This will almost certainly be the default on linux/OSX (LP64) but maybe not on windows (LLP64)
df["DC2101EW_C_ETHPUK11"]= df["DC2101EW_C_ETHPUK11"].astype(np.int32)
with pytest.raises(TypeError):
no.df.transition(model, cats, trans, df, "DC2101EW_C_ETHPUK11")
def test_basic() -> None:
# test unique index generation
idx = no.df.unique_index(100)
assert np.array_equal(idx, np.arange(no.mpi.rank(), 100 * no.mpi.size(), step=no.mpi.size()))
idx = no.df.unique_index(100)
assert np.array_equal(idx, np.arange(100 * no.mpi.size() + no.mpi.rank(), 200 * no.mpi.size(), step=no.mpi.size()))
N = 100000
# base model for MC engine
model = no.Model(no.NoTimeline(), no.MonteCarlo.deterministic_identical_stream)
c = [1,2,3]
df = pd.DataFrame({"category": [1] * N})
# no transitions, check no changes
t = np.identity(3)
no.df.transition(model, c, t, df, "category")
assert df.category.value_counts()[1] == N
# all 1 -> 2
t[0,0] = 0.0
t[0,1] = 1.0
no.df.transition(model, c, t, df, "category")
assert 1 not in df.category.value_counts()
assert df.category.value_counts()[2] == N
# 2 -> 1 or 3
t = np.array([
[1.0, 0.0, 0.0],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
])
no.df.transition(model, c, t, df, "category")
assert 2 not in df.category.value_counts()
for i in [1,3]:
assert df.category.value_counts()[i] > N/2 - sqrt(N) and df.category.value_counts()[i] < N/2 + sqrt(N)
# spread evenly
t = np.ones((3,3)) / 3
no.df.transition(model, c, t, df, "category")
for i in c:
assert df.category.value_counts()[i] > N/3 - sqrt(N) and df.category.value_counts()[i] < N/3 + sqrt(N)
# all -> 1
t = np.array([
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
])
no.df.transition(model, c, t, df, "category")
assert df.category.value_counts()[1] == N
def test(base_model: no.Model) -> None:
df = pd.read_csv("./test/df.csv")
cats = np.array(range(4))
# identity matrix means no transitions
trans = np.identity(len(cats))
no.df.transition(base_model, cats, trans, df, "DC2101EW_C_ETHPUK11")
assert len(df["DC2101EW_C_ETHPUK11"].unique()) == 1 and df["DC2101EW_C_ETHPUK11"].unique()[0] == 2
# NOTE transition matrix interpreted as being COLUMN MAJOR due to pandas DataFrame storing data in column-major order
# force 2->3
trans[2, 2] = 0.0
trans[2, 3] = 1.0
no.df.transition(base_model, cats, trans, df, "DC2101EW_C_ETHPUK11")
no.log(df["DC2101EW_C_ETHPUK11"].unique())
assert len(df["DC2101EW_C_ETHPUK11"].unique()) == 1 and df["DC2101EW_C_ETHPUK11"].unique()[0] == 3
# ~half of 3->0
trans[3, 0] = 0.5
trans[3, 3] = 0.5
no.df.transition(base_model, cats, trans, df, "DC2101EW_C_ETHPUK11")
assert np.array_equal(np.sort(df["DC2101EW_C_ETHPUK11"].unique()), np.array([0, 3]))
|
import numpy
from DiscreteEnvironment import DiscreteEnvironment
class HerbEnvironment(object):
def __init__(self, herb, resolution):
self.robot = herb.robot
self.lower_limits, self.upper_limits = self.robot.GetActiveDOFLimits()
self.discrete_env = DiscreteEnvironment(resolution, self.lower_limits, self.upper_limits)
# account for the fact that snapping to the middle of the grid cell may put us over our
# upper limit
upper_coord = [x - 1 for x in self.discrete_env.num_cells]
upper_config = self.discrete_env.GridCoordToConfiguration(upper_coord)
for idx in range(len(upper_config)):
self.discrete_env.num_cells[idx] -= 1
# add a table and move the robot into place
table = self.robot.GetEnv().ReadKinBodyXMLFile('models/objects/table.kinbody.xml')
self.robot.GetEnv().Add(table)
table_pose = numpy.array([[ 0, 0, -1, 0.7],
[-1, 0, 0, 0],
[ 0, 1, 0, 0],
[ 0, 0, 0, 1]])
table.SetTransform(table_pose)
# set the camera
camera_pose = numpy.array([[ 0.3259757 , 0.31990565, -0.88960678, 2.84039211],
[ 0.94516159, -0.0901412 , 0.31391738, -0.87847549],
[ 0.02023372, -0.9431516 , -0.33174637, 1.61502194],
[ 0. , 0. , 0. , 1. ]])
self.robot.GetEnv().GetViewer().SetCamera(camera_pose)
def GetSuccessors(self, node_id):
successors = []
# TODO: Here you will implement a function that looks
# up the configuration associated with the particular node_id
# and return a list of node_ids that represent the neighboring
# nodes
return successors
def ComputeDistance(self, start_id, end_id):
dist = 0
# TODO: Here you will implement a function that
# computes the distance between the configurations given
# by the two node ids
return dist
def ComputeHeuristicCost(self, start_id, goal_id):
cost = 0
# TODO: Here you will implement a function that
# computes the heuristic cost between the configurations
# given by the two node ids
return cost
|
import numpy
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize, StandardScaler, LabelEncoder
import keras
import sys
import numpy as np
import scipy
import scipy.io
from keras.utils import to_categorical
import yaml
def read_data(filename):
with open('settings.yaml', 'r') as fh:
try:
settings = dict(yaml.safe_load(fh))
except yaml.YAMLError as e:
raise (e)
""" Helper function to read and preprocess data for training with Keras. """
""" Read the prepared and normlized data, where the features number is 36 and the output is 10 classes """
pkd = np.array(pd.read_csv(filename))
x = pkd[:, 1:37]
y = pkd[:, 37:]
_, X, _, Y = train_test_split(x, y,test_size=settings['test_size'])
"""reshaped the input data for LSTM model """
X = X.reshape(X.shape[0], 1, X.shape[1])
return X, Y
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os
import sys
#spark_home = os.environ.get('SPARK_HOME', None)
#if not spark_home:
# raise ValueError('SPARK_HOME environment variable is not set')
#sys.path.insert(0, os.path.join(spark_home, 'python'))
#sys.path.insert(0, os.path.join(spark_home, 'python/lib/py4j-0.9-src.zip'))
#execfile(os.path.join(spark_home, 'python/pyspark/shell.py'))
import numpy as np
import math
import csv
from pyspark import SparkContext
#Parsing the Rating File
def Rating(line):
items = line.replace("\n", "").split(",")
if(len(items) == 6):
try:
## selecting the userID, trackId and the rating from the csv file
return int(items[3]), int(items[4]), int(items[5])
except ValueError:
pass
#Parsing the Track file
def TrackName(line):
items = line.replace("\n", "").split(",")
if(len(items) == 6):
## selecting the track id and the track Name from the csv file
try:
return int(items[4]), items[1]
except ValueError:
pass
def Calculate_MeanRating(userRatingGroup):
User_ID = userRatingGroup[0]
Rating_Sum = 0.0
Rating_Count = len(userRatingGroup[1])
if Rating_Count == 0:
return (User_ID, 0.0)
for item in userRatingGroup[1]:
Rating_Sum += float(item[1])
return (User_ID, 1.0 * Rating_Sum / Rating_Count)
def UserAvg_broadcast(sContext, UTrain_RDD):
UserRatingAverage_List = UTrain_RDD.map(lambda x: Calculate_MeanRating(x)).collect()
UserRatingAverage_Dict = {}
for (user, avgscore) in UserRatingAverage_List:
UserRatingAverage_Dict[user] = avgscore
URatingAverage_BC = sContext.broadcast(UserRatingAverage_Dict)
return URatingAverage_BC
def UserTrackRatings(userRatingGroup):
UserID = userRatingGroup[0]
tracksList = [item[0] for item in userRatingGroup[1]]
ratingList = [item[1] for item in userRatingGroup[1]]
return (UserID, (tracksList, ratingList))
def UserTrackRatings_broadcast(SContext, TrainRDD):
userTrackList = TrainRDD.map(lambda x: UserTrackRatings(x)).collect()
userTrackDict = {}
for (user, tupleList) in userTrackList:
userTrackDict[user] = tupleList
return (SContext.broadcast(userTrackDict))
def ConstructRating(tuple1, tuple2):
ratingpair = []
i, j = 0, 0
user1, user2 = tuple1[0], tuple2[0]
#Storing the track lists for two users
user1TrackList = sorted(tuple1[1])
user2TrackList = sorted(tuple2[1])
#iterating between the two user tracks lists
while i < len(user1TrackList) and j < len(user2TrackList):
if user1TrackList[i][0] < user2TrackList[j][0]:
i += 1
elif user1TrackList[i][0] == user2TrackList[j][0]: #append the ratings for the common tracks.
ratingpair.append((user1TrackList[i][1], user2TrackList[j][1]))
i += 1
j += 1
else:
j += 1
return ((user1, user2), ratingpair)
# --------------------------------------Cosine_Similarity-------------------------------------------#
# This function calculates the cosine similarity for two user pairs.
# Input : Output of ConstructRating function - ((user1, user2), ratingpair)
# Output : (userIDs, (cosine similarity, count of common tracks))
def Cosine_Similarity(tup):
numerator = 0.0
a, b, count = 0.0, 0.0, 0
for our_rating_pair in tup[1]:
numerator += our_rating_pair[0] * our_rating_pair[1]
a += (our_rating_pair[0]) ** 2
b += (our_rating_pair[1]) ** 2
count += 1
denominator = math.sqrt(a) * math.sqrt(b)
cosine = (numerator / denominator) if denominator else 0.0
return (tup[0], (cosine, count))
# --------------------------------------User_GroupBy-------------------------------------------#
# This function groups the records by userID.
# Input : Output of Cosine_Similarity function - (tup[0], (cosine, count))
# Output : (user1,(all the users, corresponding cos_simi, corresponding common tracks match count))
def User_GroupBy(record):
return [(record[0][0], (record[0][1], record[1][0], record[1][1])),
(record[0][1], (record[0][0], record[1][0], record[1][1]))]
# --------------------------------------SimilarUser_pull-------------------------------------------#
# Input : it takes the userID, cosine cos_simi and the number of neighbors as input
# Output : returns the corresponding number of neighbors.
def SimilarUser_pull(user, records, k = 200):
neighborList = sorted(records, key=lambda x: x[1], reverse=True) #take in x and return the next values of neighbour
neighborList = [x for x in neighborList if x[2] > 9] #filter out those whose count is small
return (user, neighborList[:k])
# --------------------------------------UserNeighbourBroadcast-------------------------------------------#
# This function will broadcast the userNeighborRDD value
def UserNeighbourBroadcast(sContext, neighbor):
userNeighborList = neighbor.collect()
userNeighbor = {}
for user, simrecords in userNeighborList:
userNeighbor[user] = simrecords #making a dicionary of user and corresponding neighbourlist
neighbourBroadcast = sContext.broadcast(userNeighbor)
return neighbourBroadcast
# --------------------------------------CalculatingError-------------------------------------------#
# Taking in actual and predicted RDDs as input and calculating RMSE and MSE.
def CalculatingError(predictedRDD, actualRDD):
#initial transformation and joining the RDD
predictedReformattedRDD = predictedRDD.map(lambda rec: ((rec[0], rec[1]), rec[2])) #Getting the necessary columns for error calculation
actualReformattedRDD = actualRDD.map(lambda rec: ((rec[0], rec[1]), rec[2]))
joinedRDD = predictedReformattedRDD.join(actualReformattedRDD) #Joining the necessary columns for both predictedRDD and actual RDD together
#Calculating the Errors
squaredErrorsRDD = joinedRDD.map(lambda x: (x[1][0] - x[1][1])*(x[1][0] - x[1][1]))
totalSquareError = squaredErrorsRDD.reduce(lambda v1, v2: v1 + v2)
numRatings = squaredErrorsRDD.count() #ratings count
return (math.sqrt(float(totalSquareError) / numRatings))
# --------------------------------------Prediction-------------------------------------------#
# this function predicts the rating.
# Input - the validationRDD, the neighbor dict whic has the user cosine similarity and corresponding count and Ids, average rating of each user and the number of neighbors
def Prediction(tup, neighborDict, userTrackDict, avgDict, topK):
user, track = tup[0], tup[1] #getting the userID and trackid
avgrate = avgDict.get(user, 0.0)
c = 0
simsum = 0.0 #Sum of cos_simi
WeightedRating_Sum = 0.0
neighbors = neighborDict.get(user, None)
if neighbors:
for record in neighbors:
if c >= topK: #if count is more than the number of neighbours
break
c += 1
tracklistpair = userTrackDict.get(record[0])
if tracklistpair is None:
continue
index = -1
try:
index = tracklistpair[0].index(track)
except ValueError:# if error, then this neighbor hasn't rated the track yet
continue
if index != -1:
neighborAvg = avgDict.get(record[0], 0.0)
simsum += abs(record[1])
WeightedRating_Sum += (tracklistpair[1][index] - neighborAvg) * record[1]
predRating = (avgrate + WeightedRating_Sum / simsum) if simsum else avgrate
return (user, track, predRating)
from collections import defaultdict
# --------------------------------------Neighborhood_size-------------------------------------------#
# this function is used to invoke the previous error calculation function and depending on the max number of neighbors and step size,
# it iterates and finds the corresponding error for all those number of pairs.
def Neighborhood_size(predicted_RDD, validate_RDD, userNeighborDict, UserTrackDict, UserRatingAverage_Dict, K_Range):
errors = [0] * len(K_Range)
err= 0
for k in K_Range:
predictedRatingsRDD = predicted_RDD.map(
lambda x: Prediction(x, userNeighborDict, UserTrackDict, UserRatingAverage_Dict, k)).cache()
errors[err] = CalculatingError(predictedRatingsRDD, validate_RDD)
err+= 1
return errors
# --------------------------------------Final_recommend-------------------------------------------#
def Final_recommend(user, neighbors, userTrackDict, k = 200, n = 5):
simSumDictionary = defaultdict(float)
weightedSumDictionary = defaultdict(float)
for (neighbor, simScore, numCommonRating) in neighbors[:k]:
tracklistpair = userTrackDict.get(neighbor)
if tracklistpair:
for index in range(0, len(tracklistpair[0])):
trackID = tracklistpair[0][index]
simSumDictionary[trackID] += simScore
weightedSumDictionary[trackID] += simScore * tracklistpair[1][index]
candidates = [(tID, 1.0 * wsum / simSumDictionary[tID]) for (tID, wsum) in weightedSumDictionary.items()]
candidates.sort(key=lambda x: x[1], reverse=True)
return (user, candidates[:n])
def BroadcastTrackListDictBroadcast(sContext, movRDD):
TrackNameList = movRDD.collect()
TrackNamesDictionary = {}
for (trackID, pname) in TrackNameList:
TrackNamesDictionary[trackID] = pname
return (sc.broadcast(TrackNamesDictionary))
def TrackNames(user, records, namedictionary):
tracklist = []
for record in records:
tracklist.append(namedictionary[record[0]])
return (user, tracklist)
if __name__ == "__main__":
if len(sys.argv) !=3:
print >> sys.stderr, "Usage: linreg <datafile>"
exit(-1)
sc = SparkContext(appName="KNN")
#Reading the Data
#input_file = sc.textFile('/user/team1/project/input/small1.csv')
input_file = sc.textFile(sys.argv[1])
#Removing the headers from the file
file_header = input_file.first()
input_file = input_file.filter(lambda x: x != file_header)
dataRDD1 = input_file.map(Rating).cache()
dataRDD = dataRDD1.filter(lambda x: x is not None)
trackRDD1 = input_file.map(TrackName).cache()
trackRDD = trackRDD1.filter(lambda x: x is not None)
#Splitting the data into 70% training and 30% testing dataset
training_RDD, testing_RDD = dataRDD.randomSplit([7,3])
PredictionRDD = testing_RDD.map(lambda x: (x[0], x[1])) #not including the target variable rating
TrainUserRating_RDD = training_RDD.map(lambda x: (x[0], (x[1], x[2]))).groupByKey().cache().mapValues(list)
UserRatingAverage = UserAvg_broadcast(sc, TrainUserRating_RDD)
UserTrackRatingList = UserTrackRatings_broadcast(sc, TrainUserRating_RDD)
cartesianUser_RDD = TrainUserRating_RDD.cartesian(TrainUserRating_RDD)
#taking unique user pairs from all user pairs combination
UserPairs = cartesianUser_RDD.filter(lambda x: x[0] < x[1])
#invoking the cosine function and other RDD transformation functions
UserPairActual = UserPairs.map(lambda x: ConstructRating(x[0], x[1]))
SimiliarUserRDD = UserPairActual.map(lambda x: Cosine_Similarity(x))
SimiliarUserGroupRDD = SimiliarUserRDD.flatMap(lambda x: User_GroupBy(x)).groupByKey()
UserNeighborhood_RDD = SimiliarUserGroupRDD.map(lambda x: SimilarUser_pull(x[0], x[1], 2))
UserNeighborhood_BC = UserNeighbourBroadcast(sc, UserNeighborhood_RDD)
ErrorValue = [0]
#K_range is the starting number of neighbors and the ending number and the step size
K_Range = range(10, 130, 10)
e = 0
ErrorValue[e] = Neighborhood_size(PredictionRDD, testing_RDD, UserNeighborhood_BC.value, UserTrackRatingList.value, UserRatingAverage.value, K_Range)
print('Error values are %s' %ErrorValue)
UserNeighborhood_RDD.map(lambda x: (x[1])).mapValues(list)
RecommendedTracksForUser = UserNeighborhood_RDD.map(lambda x: Final_recommend(x[0], x[1], UserTrackRatingList.value))
TrackNameDictionary = BroadcastTrackListDictBroadcast(sc, trackRDD)
RecommendationForUser = RecommendedTracksForUser.map(lambda x: TrackNames(x[0], x[1], TrackNameDictionary.value))
position = int(sys.argv[2])
tracks = RecommendationForUser.filter(lambda x:x[0]==position).collect()
print ('For user %s recommended track is \"%s\"' %(position,tracks) )
#print(RecommendationForUser.filter(lambda x:x[0]==position).collect())
sc.stop()
# <codecell>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
delete_nvl_polygon_element_query = """
UPDATE public.nvl_polygon AS npg SET deleted = TRUE,
active = FALSE WHERE ($1::BIGINT = 0 OR npg.user_id = $1::BIGINT) AND npg.id = $2::BIGINT RETURNING *;
"""
# delete_nvl_polygon_element_by_location_id_query = """
# UPDATE public.nvl_polygon AS npg SET deleted = TRUE,
# active = FALSE WHERE ($1::BIGINT = 0 OR npg.user_id = $1::BIGINT) AND npg.location_id = $2::BIGINT RETURNING *;
# """
delete_nvl_polygon_element_by_location_id_query = """
DELETE FROM public.nvl_polygon AS npg
WHERE ($1::BIGINT = 0 OR npg.user_id = $1::BIGINT) AND npg.location_id = $2::BIGINT RETURNING *;
""" |
#!usr/bin/env python3
import os
import random as rd
import numpy as np
import gdal
import matplotlib.pyplot as plt
def random_npp():
"""CREATE A RANDOM NPP GLOBAL MAP... AS AN np.array - 0.5°resolution """
mask = np.load('mask3.npy')[0]
rnpp = np.zeros(shape=(360,720),dtype=np.float32)
for j in range(rnpp.shape[0]):
for i in range(rnpp.shape[1]):
if mask[j][i]:
rnpp[j][i] = -9999.0
else:
if j <= 60 or j >= 270:
rnpp[j][i] = rd.random()*(rd.random() + rd.randrange(0,1))
elif j > 60 or j < 270:
if j < 140 or j > 210 :
rnpp[j][i] = rd.random() * (rd.random() + rd.randrange(0,2))
else:
rnpp[j][i] = rd.random() * (rd.random() + rd.randrange(0,3))
return rnpp
# Valores que vão sair de uma distribuição lognormal
#g1 = np.linspace(1.6, 7.1, 10)
#vcmax = np.linspace(3.e-5,25e-5,10)
#jmax = np.linspace(1e-4,3e-4,10)
#tleaf = np.linspace(1,100,50)/12 # years
#twood = np.linspace(0,80,90)
#troot = np.linspace(1,100,50)/12
# estes valores combinados devem somar 100%
#aleaf = np.linspace(25,90,33)
#aroot = np.linspace(25,90,33)
#awood = np.linspace(0,90,45)
g1 = np.linspace(1.6, 7.1, 10)
vcmax = np.linspace(3.e-5,25e-5,10)
jmax = np.linspace(1e-4,3e-4,10)
tleaf = np.arange(1,100,12)/12 # years
twood = np.arange(1,80,5)
troot = np.arange(1,100,12)/12
aleaf = np.arange(20,81,5)
aroot = np.arange(20,81,5)
awood = np.arange(20,81,5)
pls_list = []
colnames_a = ['aleaf','awood','aroot']
pls_grass1 = [[a/100,0.0,c/100] for a in aleaf for c in aroot if abs(a + 0.0 + c) == 100.]
pls_grass2 = [[c/100,0.0,a/100] for a in aleaf for c in aroot if abs(c + 0.0 + a) == 100.]
pls_grass3 = [[a/100,0.0,c/100] for c in aroot for a in aleaf if abs(a + 0.0 + c) == 100.]
pls_grass4 = [[c/100,0.0,a/100] for c in aroot for a in aleaf if abs(c + 0.0 + a) == 100.]
pls_woody1 = [[a/100,b/100,c/100] for a in aleaf for b in awood for c in aroot if ((a + b + c) == 100.) and (b > 19)]
pls_woody2 = [[a/100,b/100,c/100] for c in aroot for b in awood for a in aleaf if ((c + b + a) == 100.) and (b > 19)]
pls_woody3 = [[a/100,b/100,c/100] for b in awood for c in aroot for a in aleaf if ((c + b + a) == 100.) and (b > 19)]
pls_woody4 = [[a/100,b/100,c/100] for a in aleaf for c in aroot for b in awood if ((c + b + a) == 100.) and (b > 19)]
pls_woody5 = [[a/100,b/100,c/100] for c in aroot for b in awood for a in aleaf if ((c + b + a) == 100.) and (b > 19)]
pls_woody6 = [[a/100,b/100,c/100] for b in awood for a in aleaf for c in aroot if ((c + b + a) == 100.) and (b > 19)]
plsa =(pls_grass1 + pls_grass2 + pls_grass3 + pls_grass4 + pls_woody1 +\
pls_woody2 +pls_woody3+pls_woody4+pls_woody5+pls_woody6)
plsa_wood =(pls_woody1 + pls_woody2 + pls_woody3 + pls_woody4 + pls_woody5 + pls_woody6)
plsa_grass =(pls_grass1 + pls_grass2 + pls_grass3 + pls_grass4)
# CREATING ALLOCATION COMBINATIONS
for i in range(len(plsa_grass)):
x = plsa_grass.pop()
if x in plsa_grass:
pass
else:
plsa_grass.insert(0,x)
for i in range(len(plsa_wood)):
x = plsa_wood.pop()
if x in plsa_wood:
pass
else:
plsa_wood.insert(0,x)
# CREATING TURNOVER COMBINATIONS
colnames_t = ['tleaf','twood','troot']
turnover_wood = [[a,b,c] for a in tleaf for b in twood for c in troot]
turnover_grass = [[a,0.0,c] for a in tleaf for c in troot]
turnover = turnover_grass + turnover_wood
for i in range(len(turnover_grass)):
x = turnover_grass.pop()
if x in turnover_grass:
pass
else:
turnover_grass.insert(0,x)
for i in range(len(turnover_wood)):
x = turnover_wood.pop()
if x in turnover_wood:
pass
else:
turnover_wood.insert(0,x)
# CREATING PHYSIOLOGICAL COMBINATIONS
colenames_p = ['g1','vcmax','jmax']
phys = [[a,b,c] for a in g1 for b in vcmax for c in jmax]
sec_hand_wood = [a + b for a in turnover_wood for b in phys]
sec_hand_grass = [a + b for a in turnover_grass for b in phys]
sec_hand_arr_grass = np.array(sec_hand_grass)
sec_hand_arr_wood = np.array(sec_hand_wood)
# juntando as combinações de turnover + g1 + vcmax etc temos
# mais de 1300000 possíveis combinações
# selecionando randomicamente (usando uma distribuição discreta uniforme) 10000
plss_wood = sec_hand_arr_wood[np.random.random_integers(0,sec_hand_arr_wood.shape[0],10000)][:]
plss_grass = sec_hand_arr_grass[np.random.random_integers(0,sec_hand_arr_grass.shape[0],10000)][:]
pls_list = []
for alloc_pls in plsa_grass:
for x in range(10):
plst = alloc_pls + list(sec_hand_arr_grass[np.random.randint(0,10000)][:])
pls_list.append(plst)
for alloc_pls in plsa_wood:
for x in range(10):
plst = alloc_pls + list(sec_hand_arr_wood[np.random.randint(0,10000)][:])
pls_list.append(plst)
out_arr = np.array(pls_list).T
np.savetxt('pls_580.txt', out_arr, fmt='%.12f')
|
/Users/samnayrouz/anaconda3/lib/python3.6/_dummy_thread.py |
import os
import sys
import tmdbsimple as tmdb
import urllib.request
def get_image(moviePoster, movieTitle):
if (moviePoster != 'N/A'):
# Create imagePosters directory if not present
os.makedirs("./imagePosters", exist_ok=True)
baseURL = 'https://image.tmdb.org/t/p/'
posters = ['w92', 'w154', 'w185', 'w300_and_h450_bestv2', 'w342', 'w500', 'w780'] #'original']
for p in posters:
imagePage = baseURL + p + moviePoster
print(p, 'poster image:', imagePage)
filename = movieTitle.replace(" ", "") + '_' + p + '.jpg'
fullfilename = os.path.join('./imagePosters', filename)
# if not already existent, download
if not(os.path.isfile(fullfilename)):
# COMMENT ME OUT TO NOT DOWNLOAD EVERYTHING
urllib.request.urlretrieve(imagePage, fullfilename)
print('')
class Movie:
def __init__(self):
self.title = ''
self.ID = 0
self.viewers = []
self.runtime = 0
self.genres = []
self.release_date = ''
self.vote = 0
self.overview = ''
self.poster_path = ''
def __init__(self, tI='', vI=[], rU=0, gE=[], rD='', vO=0, oV=''):
self.title = tI
self.viewers = vI
self.runtime = rU
self.genres = gE
self.release_date = rD
self.vote = vO
self.overview = oV
def __init__(self, dictionary):
self.title = dictionary['Title']
self.ID = dictionary['ID']
self.viewers = dictionary['ViewedBy'].split(', ')
self.runtime = dictionary['Runtime']
self.genres = dictionary['Genres'].split(', ')
self.release_date = dictionary['ReleaseDate']
self.vote = dictionary['Vote']
self.overview = dictionary['Overview']
self.poster_path = dictionary['Poster']
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def get_nodules_pixel_coords(batch):
""" get numpy array of nodules-locations and diameter in relative coords
"""
nodules_dict = dict()
nodules_dict.update(numeric_ix=batch.nodules.patient_pos)
pixel_zyx = np.rint((batch.nodules.nodule_center - batch.nodules.origin) / batch.nodules.spacing).astype(np.int)
nodules_dict.update({'coord' + letter: pixel_zyx[:, i] for i, letter in enumerate(['Z', 'Y', 'X'])})
nodules_dict.update({'diameter_pixels': (np.rint(batch.nodules.nodule_size / batch.nodules.spacing).mean(axis=1)
.astype(np.int))})
pixel_nodules_df = pd.DataFrame.from_dict(nodules_dict).loc[:, ('numeric_ix', 'coordZ', 'coordY',
'coordX', 'diameter_pixels')]
return pixel_nodules_df
def num_of_cancerous_pixels(batch, max_num=10):
""" Calculate number of cancerous pixels in items from batch
"""
stats = dict()
n_print = min(max_num, len(batch))
for i in range(n_print):
stats.update({'Scan ' + str(i): int(np.sum(batch.get(i, 'masks')))})
stats = {'Number of cancerous pixels: ': stats}
stats_df = pd.DataFrame.from_dict(stats, orient='index').loc[:, ['Scan '+ str(i) for i in range(n_print)]]
return stats_df
def show_slices(batches, scan_indices, ns_slice, grid=True, **kwargs):
""" Plot slice with number n_slice from scan with index given by scan_index from batch
"""
font_caption = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 18}
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 15}
# fetch some arguments, make iterables out of args
def iterize(arg):
return arg if isinstance(arg, (list, tuple)) else (arg, )
components = kwargs.get('components', 'images')
batches, scan_indices, ns_slice, components = [iterize(arg) for arg in (batches, scan_indices,
ns_slice, components)]
clims = kwargs.get('clims', (-1200, 300))
clims = clims if isinstance(clims[0], (tuple, list)) else (clims, )
# lengthen args
n_boxes = max(len(arg) for arg in (batches, scan_indices, ns_slice, clims))
def lengthen(arg):
return arg if len(arg) == n_boxes else arg * n_boxes
batches, scan_indices, ns_slice, clims, components = [lengthen(arg) for arg in (batches, scan_indices, ns_slice,
clims, components)]
# plot slices
_, axes = plt.subplots(1, n_boxes, squeeze=False, figsize=(10, 4 * n_boxes))
zipped = zip(range(n_boxes), batches, scan_indices, ns_slice, clims, components)
for i, batch, scan_index, n_slice, clim, component in zipped:
slc = batch.get(scan_index, component)[n_slice]
axes[0][i].imshow(slc, cmap=plt.cm.gray, clim=clim)
axes[0][i].set_xlabel('Shape: {}'.format(slc.shape[1]), fontdict=font)
axes[0][i].set_ylabel('Shape: {}'.format(slc.shape[0]), fontdict=font)
title = 'Scan' if component == 'images' else 'Mask'
axes[0][i].set_title('{} #{}, slice #{} \n \n'.format(title, scan_index, n_slice), fontdict=font_caption)
axes[0][i].text(0.2, -0.25, 'Total slices: {}'.format(len(batch.get(scan_index, component))),
fontdict=font_caption, transform=axes[0][i].transAxes)
# set inverse-spacing grid
if grid:
inv_spacing = 1 / batch.get(scan_index, 'spacing').reshape(-1)[1:]
step_mult = 50
xticks = np.arange(0, slc.shape[0], step_mult * inv_spacing[0])
yticks = np.arange(0, slc.shape[1], step_mult * inv_spacing[1])
axes[0][i].set_xticks(xticks, minor=True)
axes[0][i].set_yticks(yticks, minor=True)
axes[0][i].set_xticks([], minor=False)
axes[0][i].set_yticks([], minor=False)
axes[0][i].grid(color='r', linewidth=1.5, alpha=0.5, which='minor')
plt.show()
|
from battle.battleeffect.RegularAttack import RegularAttack
from battle.battleeffect.EffectType import EffectType
from battle.round.RoundAction import RoundAction
from ui.UI import UI
import random
# this represents a generic fighter of any kind.
class Fighter:
def __init__(self, name, hp, strength, defense, agility, magic):
self.name = name
self.current_hp = hp
self.max_hp = hp
self.strength = strength
self.defense = defense
self.agility = agility
self.magic = magic
self.spells = []
# for now, just create a normal attack
def create_round_action(self, target_fighter):
action = RegularAttack(self, None)
# target = target_fighter
return RoundAction(action, target_fighter)
#TODO: clean this up
def receive_battle_effect(self, battle_effect):
if battle_effect.effect_type == EffectType.physical:
damage = battle_effect.calculate_power() - self.defense
if damage > 0:
UI().show_text("\t" + self.name + " takes " + str(damage) + " damage!!!")
self.current_hp -= damage
self.faint()
# TODO: apply physical strategy
elif battle_effect.effect_type == EffectType.magical:
damage = battle_effect.calculate_power() # TODO: magical damage? Resistance?
if damage > 0:
UI().show_text("\t" + self.name + " takes " + str(damage) + " magical damage!!!")
self.current_hp -= damage
return
elif battle_effect.effect_type == EffectType.healing:
healing = battle_effect.calculate_power()
if healing > 0:
UI().show_text("\t" + self.name + " recovers " + str(healing) + " HP!")
self.current_hp += healing
if self.current_hp > self.max_hp:
self.current_hp = self.max_hp
return
def faint(self):
if self.current_hp <= 0:
self.current_hp = 0
UI().show_text("\t" + self.name + " has fainted!")
def get_priority_strategy(self):
return self.agility + (self.agility / 10 * random.choice(range(0, 6)))
|
from multiprocessing import cpu_count
from os.path import isfile
import shutil
import itertools
from unittest import mock
import distributed
import pytest
from aospy import Var, Proj
from aospy.automate import (
_user_verify,
_MODELS_STR,
_RUNS_STR,
_VARIABLES_STR,
_REGIONS_STR,
_compute_or_skip_on_error,
_get_all_objs_of_type,
_get_attr_by_tag,
_merge_dicts,
_n_workers_for_local_cluster,
_permuted_dicts_of_specs,
_prune_invalid_time_reductions,
AospyException,
CalcSuite,
submit_mult_calcs,
)
from .data.objects import examples as lib
from .data.objects.examples import (
example_proj,
example_model,
example_run,
var_not_time_defined,
condensation_rain,
convection_rain,
precip,
ps,
sphum,
globe,
sahel,
bk,
p,
dp,
)
@pytest.fixture
def obj_lib():
return lib
@pytest.fixture
def all_vars():
return [condensation_rain, convection_rain, precip, ps, sphum]
@pytest.fixture
def all_projects():
return [example_proj]
@pytest.fixture
def all_models():
return [example_model]
@pytest.fixture
def all_runs():
return [example_run]
@pytest.fixture
def all_regions():
return [globe, sahel]
@pytest.mark.parametrize(
('obj', 'tag', 'attr_name', 'expected'),
[(example_proj, 'all', _MODELS_STR, [example_model]),
(example_proj, 'default', _MODELS_STR, []),
(example_model, 'all', _RUNS_STR, [example_run]),
(example_model, 'default', _RUNS_STR, [])])
def test_get_attr_by_tag(obj, tag, attr_name, expected):
actual = _get_attr_by_tag(obj, tag, attr_name)
assert actual == expected
def test_get_attr_by_tag_invalid():
with pytest.raises(KeyError):
_get_attr_by_tag(example_proj, 'alll', _MODELS_STR)
@pytest.fixture
def calcsuite_specs():
"""Aux specs after being processed by CalcSuite."""
return {
'time_offset': [None],
'date_range': ['default'],
'intvl_in': ['monthly'],
'region': [{globe, sahel}],
'dtype_out_time': [['av', 'reg.av']],
'dtype_in_vert': [False],
'dtype_in_time': ['ts'],
'var': [condensation_rain, convection_rain],
'intvl_out': ['ann'],
'dtype_out_vert': [None]
}
def test_permuted_dict_of_specs(calcsuite_specs):
actual = _permuted_dicts_of_specs(calcsuite_specs)
expected = [
{'time_offset': None,
'date_range': 'default',
'intvl_in': 'monthly',
'region': {globe, sahel},
'dtype_out_time': ['av', 'reg.av'],
'dtype_in_vert': False,
'dtype_in_time': 'ts',
'var': condensation_rain,
'intvl_out': 'ann',
'dtype_out_vert': None},
{'time_offset': None,
'date_range': 'default',
'intvl_in': 'monthly',
'region': {globe, sahel},
'dtype_out_time': ['av', 'reg.av'],
'dtype_in_vert': False,
'dtype_in_time': 'ts',
'var': convection_rain,
'intvl_out': 'ann',
'dtype_out_vert': None}
]
assert actual == expected
def test_merge_dicts():
# no conflicts
dict1 = dict(a=1)
dict2 = {'b': 3, 43: False}
dict3 = dict(c=['abc'])
expected = {'a': 1, 'b': 3, 'c': ['abc'], 43: False}
assert expected == _merge_dicts(dict1, dict2, dict3)
# conflicts
dict4 = dict(c=None)
expected = {'a': 1, 'b': 3, 'c': None, 43: False}
assert expected == _merge_dicts(dict1, dict2, dict3, dict4)
def test_user_verify():
with mock.patch('builtins.input', return_value='YES'):
_user_verify()
with pytest.raises(AospyException):
with mock.patch('builtins.input', return_value='no'):
_user_verify()
@pytest.mark.parametrize(
('type_', 'expected'),
[(Var, [var_not_time_defined, condensation_rain, convection_rain,
precip, ps, sphum, bk, p, dp]),
(Proj, [example_proj])])
def test_get_all_objs_of_type(obj_lib, type_, expected):
actual = _get_all_objs_of_type(type_, obj_lib)
assert set(expected) == set(actual)
@pytest.fixture
def calcsuite_init_specs():
return dict(
library=lib,
projects=[example_proj],
models=[example_model],
runs=[example_run],
variables=[condensation_rain, convection_rain],
regions='all',
date_ranges='default',
output_time_intervals=['ann'],
output_time_regional_reductions=['av', 'reg.av'],
output_vertical_reductions=[None],
input_time_intervals=['monthly'],
input_time_datatypes=['ts'],
input_time_offsets=[None],
input_vertical_datatypes=[False],
)
@pytest.fixture
def calcsuite_init_specs_single_calc(calcsuite_init_specs):
specs = calcsuite_init_specs.copy()
specs['variables'] = [condensation_rain]
specs['regions'] = [None]
specs['output_time_regional_reductions'] = ['av']
yield specs
# Teardown procedure
for direc in [example_proj.direc_out, example_proj.tar_direc_out]:
shutil.rmtree(direc, ignore_errors=True)
@pytest.fixture
def calcsuite_init_specs_two_calcs(calcsuite_init_specs):
specs = calcsuite_init_specs.copy()
specs['variables'] = [condensation_rain, convection_rain]
specs['regions'] = [None]
specs['output_time_regional_reductions'] = ['av']
yield specs
# Teardown procedure
for direc in [example_proj.direc_out, example_proj.tar_direc_out]:
shutil.rmtree(direc, ignore_errors=True)
@pytest.fixture
def calc(calcsuite_init_specs_single_calc):
return CalcSuite(calcsuite_init_specs_single_calc).create_calcs()[0]
def test_compute_or_skip_on_error(calc, caplog):
result = _compute_or_skip_on_error(calc, dict(write_to_tar=False))
assert result is calc
calc.start_date = 'dummy'
result = _compute_or_skip_on_error(calc, dict(write_to_tar=False))
log_record = caplog.record_tuples[-1][-1]
assert log_record.startswith("Skipping aospy calculation")
assert result is None
@pytest.fixture
def external_client():
# Explicitly specify we want only 4 workers so that when running on
# continuous integration we don't request too many.
cluster = distributed.LocalCluster(n_workers=4)
client = distributed.Client(cluster)
yield client
client.close()
cluster.close()
def assert_calc_files_exist(calcs, write_to_tar, dtypes_out_time):
"""Check that expected calcs were written to files"""
for calc in calcs:
for dtype_out_time in dtypes_out_time:
assert isfile(calc.path_out[dtype_out_time])
if write_to_tar:
assert isfile(calc.path_tar_out)
else:
assert not isfile(calc.path_tar_out)
@pytest.mark.filterwarnings('ignore:Using or importing the ABCs from')
@pytest.mark.parametrize(
('exec_options'),
[dict(parallelize=True, write_to_tar=False),
dict(parallelize=True, write_to_tar=True)])
def test_submit_mult_calcs_external_client(calcsuite_init_specs_single_calc,
external_client, exec_options):
exec_options.update(client=external_client)
calcs = submit_mult_calcs(calcsuite_init_specs_single_calc, exec_options)
write_to_tar = exec_options.pop('write_to_tar', True)
assert_calc_files_exist(
calcs, write_to_tar,
calcsuite_init_specs_single_calc['output_time_regional_reductions'])
@pytest.mark.parametrize(
('exec_options'),
[dict(parallelize=False, write_to_tar=False),
dict(parallelize=True, write_to_tar=False),
dict(parallelize=False, write_to_tar=True),
dict(parallelize=True, write_to_tar=True),
None])
def test_submit_mult_calcs(calcsuite_init_specs_single_calc, exec_options):
calcs = submit_mult_calcs(calcsuite_init_specs_single_calc, exec_options)
if exec_options is None:
write_to_tar = True
else:
write_to_tar = exec_options.pop('write_to_tar', True)
assert_calc_files_exist(
calcs, write_to_tar,
calcsuite_init_specs_single_calc['output_time_regional_reductions'])
def test_submit_mult_calcs_no_calcs(calcsuite_init_specs):
specs = calcsuite_init_specs.copy()
specs['input_vertical_datatypes'] = []
with pytest.raises(AospyException):
submit_mult_calcs(specs)
@pytest.mark.parametrize(
('exec_options'),
[dict(parallelize=True, write_to_tar=False),
dict(parallelize=True, write_to_tar=True)])
def test_submit_two_calcs_external_client(calcsuite_init_specs_two_calcs,
external_client, exec_options):
exec_options.update(client=external_client)
calcs = submit_mult_calcs(calcsuite_init_specs_two_calcs, exec_options)
write_to_tar = exec_options.pop('write_to_tar', True)
assert_calc_files_exist(
calcs, write_to_tar,
calcsuite_init_specs_two_calcs['output_time_regional_reductions'])
@pytest.mark.parametrize(
('exec_options'),
[dict(parallelize=False, write_to_tar=False),
dict(parallelize=True, write_to_tar=False),
dict(parallelize=False, write_to_tar=True),
dict(parallelize=True, write_to_tar=True),
None])
def test_submit_two_calcs(calcsuite_init_specs_two_calcs, exec_options):
calcs = submit_mult_calcs(calcsuite_init_specs_two_calcs, exec_options)
if exec_options is None:
write_to_tar = True
else:
write_to_tar = exec_options.pop('write_to_tar', True)
assert_calc_files_exist(
calcs, write_to_tar,
calcsuite_init_specs_two_calcs['output_time_regional_reductions'])
def test_n_workers_for_local_cluster(calcsuite_init_specs_two_calcs):
calcs = CalcSuite(calcsuite_init_specs_two_calcs).create_calcs()
expected = min(cpu_count(), len(calcs))
result = _n_workers_for_local_cluster(calcs)
assert result == expected
@pytest.fixture
def calc_suite(calcsuite_init_specs):
return CalcSuite(calcsuite_init_specs)
class TestCalcSuite(object):
def test_init(self, calc_suite, calcsuite_init_specs, obj_lib):
assert calc_suite._specs_in == calcsuite_init_specs
assert calc_suite._obj_lib == obj_lib
def test_permute_core_specs(self, calc_suite):
expected = [dict(proj=example_proj, model=example_model,
run=example_run)]
actual = calc_suite._permute_core_specs()
assert expected == actual
# TODO: cases w/ multiple projs and/or models and/or runs, with
# different default children for each
def test_get_regions(self, calc_suite, all_regions):
assert calc_suite._get_regions()[0] == set(all_regions)
# TODO: case w/ not all regions
# TODO: case w/ Region objects in 'regions' sub-module
def test_get_variables(self, calc_suite, all_vars):
assert not hasattr(calc_suite, 'variables')
assert calc_suite._get_variables() == {condensation_rain,
convection_rain}
# TODO: case w/ Var objects in 'variables' sub-module
# TODO: case w/ 'all'
def test_get_aux_specs(self, calc_suite, all_regions):
spec_names = [name for name in calc_suite._AUX_SPEC_NAMES
if name not in [_VARIABLES_STR, _REGIONS_STR]]
expected = {name: calc_suite._specs_in[name] for name in spec_names}
expected[_VARIABLES_STR] = {condensation_rain, convection_rain}
expected[_REGIONS_STR] = [{globe, sahel}]
expected['date_ranges'] = ['default']
expected['output_time_regional_reductions'] = [['av', 'reg.av']]
actual = calc_suite._get_aux_specs()
assert actual == expected
def test_permute_aux_specs(self, calc_suite, calcsuite_specs):
expected = _permuted_dicts_of_specs(calcsuite_specs)
actual = calc_suite._permute_aux_specs()
assert len(actual) == len(expected)
for act in actual:
assert act in expected
@pytest.mark.parametrize('var', [var_not_time_defined, condensation_rain])
def test_prune_invalid_time_reductions(var):
time_options = ['av', 'std', 'ts', 'reg.av', 'reg.std', 'reg.ts']
spec = {
'var': var,
'dtype_out_time': None
}
assert _prune_invalid_time_reductions(spec) is None
for i in range(1, len(time_options) + 1):
for time_option in list(itertools.permutations(time_options, i)):
spec['dtype_out_time'] = time_option
if spec['var'].def_time:
assert _prune_invalid_time_reductions(spec) == time_option
else:
assert _prune_invalid_time_reductions(spec) == []
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
fil ="test_p\wildlife.csv"
#col_list=["Precipitation","IndicatedDamage","PilotWarned","CostTotal"]
#col_list=["TimeOfDay", "SpeedKnots","AltitudeFeet", "Sky","PhaseOfFlight","MilesFromAirport", "IndicatedDamage","CostTotal", "CostRepair"]
col_list=["WildlifeSpecies","CostTotal","PhaseOfFlight","PilotWarned", "Precipitation"]
flight= pd.read_csv("test_p\wildlife.csv", usecols=col_list)
# mammal=["American black bear","Armadillo","Black-tailed jackrabbit","Black-tailed prairie dog","Cattle","Common gray fox","Coyote","Deer","Domestic cat","Domestic dog","Foxes","Gunnison's prairie dog","Hares","Lagomorphs","Mink","Moose","Mule deer","Muskrat","North American beaver","North American porcupine","Pocket gophers","Prairie dog","Pronghorn","Rabbits","Raccoon","Red fox","River otter","Skunks","Small Indian mongoose","Striped skunk","Unknown mammal","Virginia opossum","Wapiti","White-tailed deer","White-tailed jackrabbit","Woodrats","Yellow-bellied marmot"]
# reptile=["American alligator","Common snapping turtle","Eastern box turtle","Florida soft shell turtle","Gopher tortoise","Green iguana","Painted turtle","Turtles",""]
Y1=0
Y2=0
N1=0
N2=0
Ysum=0
Nsum=0
Ysum2=0
Nsum2=0
weather=["Fog","Rain","Snow"]
for i in range (len(flight["PilotWarned"])):
if str(flight["PilotWarned"][i]) == "Y":
if str(flight["Precipitation"][i]) in weather:
Y1+=1
num=(flight["CostTotal"][i])
num = num.replace(",", "")
num =int (num)
Ysum+=num
elif str(flight["Precipitation"][i]) not in weather :
Y2+=1
num=(flight["CostTotal"][i])
num = num.replace(",", "")
num =int (num)
Ysum2+=num
elif str(flight["PilotWarned"][i]) == "N":
if str(flight["Precipitation"][i]) in weather:
N1+=1
num=(flight["CostTotal"][i])
num = num.replace(",", "")
num =int (num)
Nsum+=num
elif str(flight["Precipitation"][i]) not in weather :
N2+=1
num=(flight["CostTotal"][i])
num = num.replace(",", "")
num =int (num)
Nsum2+=num
print ( "Предупрежден",Y1,"пилот и осадки = ", Ysum ,"\nПредупрежден",Y2," и нет осадков = ", Ysum2,"\n\nНе предупрежден",N1," и осадки = ", Nsum,"\nНе предупрежден",N2,"пилот и нет осадков",Nsum2 )
# y=0
# n=0
# Ysum=0
# Nsum=0
# for i in range (len(flight["PilotWarned"])):
# if str(flight["PilotWarned"][i]) == "Y":
# y+=1
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# Ysum+=num
# elif str(flight["PilotWarned"][i]) == "N":
# n+=1
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# Nsum+=num
# print (y , Ysum, n , Nsum)
# Take_off=0
# Climb=0
# Approach=0
# En_Route=0
# Landing_Roll=0
# Descent=0
# for i in range (len(flight["PhaseOfFlight"])):
# if flight["PhaseOfFlight"][i] == "Descent":
# if flight["WildlifeSpecies"][i] in mammal or flight["WildlifeSpecies"][i] in reptile:
# Descent+=1
# elif flight["PhaseOfFlight"][i] == "Take-off run":
# if flight["WildlifeSpecies"][i] in mammal or flight["WildlifeSpecies"][i] in reptile:
# Take_off+=1
# elif flight["PhaseOfFlight"][i] == "Climb":
# if flight["WildlifeSpecies"][i] in mammal or flight["WildlifeSpecies"][i] in reptile:
# Climb+=1
# elif flight["PhaseOfFlight"][i] == "Approach":
# if flight["WildlifeSpecies"][i] in mammal or flight["WildlifeSpecies"][i] in reptile:
# Approach+=1
# elif flight["PhaseOfFlight"][i] == "En Route":
# if flight["WildlifeSpecies"][i] in mammal or flight["WildlifeSpecies"][i] in reptile:
# En_Route+=1
# elif flight["PhaseOfFlight"][i] == "Landing Roll":
# if flight["WildlifeSpecies"][i] in mammal or flight["WildlifeSpecies"][i] in reptile:
# Landing_Roll+=1
# print (Take_off , Climb, Approach, En_Route, Landing_Roll , Descent)
# un=0
# maU=0
# n=0
# nU=0
# k=0
# for i in range (len(flight["WildlifeSpecies"])):
# if "Unknown bird" in str(flight["WildlifeSpecies"][i]) :
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# maU+=num
# un+=1
# elif str(flight["WildlifeSpecies"][i]) != 'nan' :
# n+=1
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# nU+=1
# else :
# k+=1
# print (un ,maU, n, nU , k)
# R=0
# NR=0
# MSK=0
# for i in range (len(flight["SpeedKnots"])):
# if str(flight["AltitudeFeet"][i]) != 'nan' and str(flight["CostRepair"][i]) != "NaN" and str(flight["SpeedKnots"][i]) != 'nan':
# R+=1
# num=(flight["CostRepair"][i])
# num = num.replace(",", "")
# num =int (num)
# if num >MSK:
# MSK=num
# else:
# NR+=1
# print (R)
# print (NR)
# print (MSK)
# TD=0
# MTD=0
# SK=0
# MSK=0
# AF=0
# MAF=0
# S=0
# MS=0
# PF=0
# MPF=0
# MA=0
# MMA=0
# for i in range (len(flight["TimeOfDay"])):
# if str(flight["TimeOfDay"][i]) != 'nan' and str(flight["IndicatedDamage"][i]) == "Caused damage":
# TD+=1
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# if num >MTD:
# MTD=num
# for i in range (len(flight["SpeedKnots"])):
# if str(flight["SpeedKnots"][i]) != 'nan' and str(flight["IndicatedDamage"][i]) == "Caused damage":
# SK+=1
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# if num >MSK:
# MSK=num
# for i in range (len(flight["AltitudeFeet"])):
# if str(flight["AltitudeFeet"][i]) != 'nan' and str(flight["IndicatedDamage"][i]) == "Caused damage":
# AF+=1
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# print (MAF ,"MAF")
# if num >MAF:
# MAF=num
# for i in range (len(flight["Sky"])):
# if str(flight["Sky"][i]) != 'nan' and str(flight["IndicatedDamage"][i]) == "Caused damage" and str(flight["Sky"][i]) != 'No Cloud':
# S+=1
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# print (MS , "MS")
# if num >MS:
# MS=num
# for i in range (len(flight["PhaseOfFlight"])):
# if str(flight["PhaseOfFlight"][i]) != 'nan' and str(flight["IndicatedDamage"][i]) == "Caused damage":
# PF+=1
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# print (MPF ," MPF")
# if num >MPF:
# MPF=num
# for i in range (len(flight["MilesFromAirport"])):
# if str(flight["MilesFromAirport"][i]) != 'nan' and str(flight["IndicatedDamage"][i]) == "Caused damage":
# MA+=1
# num=(flight["CostTotal"][i])
# num = num.replace(",", "")
# num =int (num)
# if num >MMA:
# MMA=num
# print ( TD , MTD , SK , MSK , AF , MAF, S, MS , PF,MPF, MA, MMA)
# N=0
# W=0
# C=0
# Sn=0
# Warn=0
# NotWarn=0
# Na=0
# N1=0
# N2=0
# N3=0
# RF1=0
# RF2=0
# RF3=0
# SN1=0
# SN2=0
# SN3=0
# EL=0
#money=0.0
#print (flight["Precipitation"])
#for i in range (len(flight["CostTotal"])):
# if flight["CostTotal"][i] !="0" and str(flight["CostTotal"][i]) != 'nan':
# num=(flight["CostTotal"][i])
# print(type(num))
# num = num.replace(",", "")
# print(num)
# num =float (num)
# money+=num
# print (money)
#if flight["Precipitation"][i] != "NaN" or flight["Precipitation"][i] != "nan" or flight["Precipitation"][i] != nan and (flight["IndicatedDamage"][i]=="Caused damage"):
#flights.append(flight["Precipitation"][i])
# if (flight["IndicatedDamage"][i]=="Caused damage"):
# # if flight["PilotWarned"][i]=="Y":
# # Warn+=1
# # elif flight["PilotWarned"][i]=="N":
# # NotWarn+=1
# # else:
# # Na+=1
# # print(Warn) # 1422
# # print (NotWarn) # 2728
# # print(Na) # 3391
# if flight["Precipitation"][i] == "NaN" or flight["Precipitation"][i] == "None":
# if flight["PilotWarned"][i]=="Y":
# N1+=1
# elif flight["PilotWarned"][i]=="N":
# N2+=1
# else:
# N3+=1
# elif flight["Precipitation"][i] == ('Rain'):
# if flight["PilotWarned"][i]=="Y":
# RF1+=1
# elif flight["PilotWarned"][i]=="N":
# RF2+=1
# else:
# RF3+=1
# elif flight["Precipitation"][i] == ('Fog'):
# if flight["PilotWarned"][i]=="Y":
# RF1+=1
# elif flight["PilotWarned"][i]=="N":
# RF2+=1
# else:
# RF3+=1
# elif flight["Precipitation"][i] == ('Snow'):
# if flight["PilotWarned"][i]=="Y":
# SN1+=1
# elif flight["PilotWarned"][i]=="N":
# SN2+=1
# else:
# SN3+=1
# else:
# EL+=1
# print ("No Precipitation , but pilot was warned", N1 , "\nNo Precipitation, but pilot wasn't warned", N2, "\nNo Precipitation " , N3, "\nNext")
# print ("Precipitation , but pilot was warned", RF1 , "\nPrecipitation, but pilot wasn't warned", RF2, "\nOther" , RF3, "\nNext")
# print ("Snow , but pilot was warned", SN1 , "\Snow, but pilot wasn't warned", SN2, "\nOther" , SN3, "\nNext")
# print ("There is a damaged ship, but there was No Precipitation ", EL)
# print (N+C) #7061
# print (W) # 449
# print (Sn) # 31
# print (flights)
# for i in range (len(flights)):
# if fligths[i]
#Читаем данные по рейсам
# flights = pd.read_csv('flights.csv',
# parse_dates=['time_hour'], # разобрать дату
# dtype={'carrier' : 'str' # код перевозчика - текст
# })
# #Читаем список авиакомпаний
# airlines = pd.read_csv('airlines.csv',
# dtype={'carrier':'str'}) # код перевозчика - текст
# airlines.rename({'name':'airline'}, axis='columns', inplace=True)
|
# https://www.roblox.com/?v=rc&rbx_source=3&rbx_medium=cpa&rbx_campaign=1820
# roblox
'''
Adsmain
roblox
Auto
'''
from selenium import webdriver
from time import sleep
# import xlrd
import random
import os
import time
import sys
sys.path.append("..")
# import email_imap as imap
# import json
import re
# from urllib import request, parse
from selenium.webdriver.support.ui import Select
# import base64
import Chrome_driver
import email_imap as imap
import name_get
import db
import selenium_funcs
import Submit_handle
import random
def name_get_random(submit):
# names = []
# username = submit['firstname']+submit['lastname']
# names.append(username)
# username = name_get.gen_two_words(split=' ', lowercase=False)
# names.append(username)
username = name_get.gen_one_word_digit(lowercase=False,digitmax=100000)
# names.append(username)
# num_name = random.randint(0,2)
return username
def web_submit(submit,chrome_driver,debug=0):
# test
# Excel_10054 = 'Data2000'
Excel_tag = 'Auto'
if debug == 1:
site = 'https://www.roblox.com/?v=rc&rbx_source=3&rbx_medium=cpa&rbx_campaign=1820'
submit['Site'] = site
chrome_driver.get(submit['Site'])
# chrome_driver.maximize_window()
# chrome_driver.refresh()
# click
# sleep(2000)
sleep(2)
print('Loading finished')
# mm
# index_ = random.randint(2,10)
# js = '$("#MonthDropdown > option:nth-child('+str(index_)+')").attr("selected","selected")'
# chrome_driver.execute_script(js)
# sleep(2)
# chrome_driver.find_element_by_xpath('//*[@id="MonthDropdown"]').click()
num = random.randint(0,10)
element = chrome_driver.find_element_by_xpath('//*[@id="MonthDropdown"]')
s1 = Select(element)
print(len(s1.options))
options = s1.options
for i in range(60):
if len(options) <= 1:
sleep(1)
else:
break
for option in options:
print(option.text)
sc = option.get_attribute("selected")
if sc == 'true':
chrome_driver.execute_script('arguments[0].removeAttribute(arguments[1])',option, 'selected')
sc = option.get_attribute("selected")
print(sc)
# option.removeAttribute('selected')
# print(sc)
print('================')
# js="$('#MonthDropdown > option:nth-child(1)').removeAttr('selected')"
# chrome_driver.execute_script(js)
s1.select_by_index(num)
# dd
# index_ = random.randint(2,22)
js="$('#DayDropdown > option:nth-child(1)').removeAttr('selected')"
chrome_driver.execute_script(js)
# js = '$("#DayDropdown > option:nth-child('+str(index_)+')").attr("selected","selected")'
# chrome_driver.execute_script(js)
# sleep(2)
# chrome_driver.find_element_by_xpath('//*[@id="DayDropdown"]').click()
num = random.randint(0,22)
element = chrome_driver.find_element_by_xpath('//*[@id="DayDropdown"]')
s1 = Select(element)
print(len(s1.options))
for option in s1.options:
print(option.text)
# print(option.value)
s1.select_by_index(num)
# return
# year
# index_ = random.randint(20,40)
js="$('#YearDropdown > option:nth-child(1)').removeAttr('selected')"
chrome_driver.execute_script(js)
# js = '$("#YearDropdown > option:nth-child('+str(index_)+')").attr("selected","selected")'
# chrome_driver.execute_script(js)
# sleep(2)
# chrome_driver.find_element_by_xpath('//*[@id="YearDropdown"]').click()
year = random.randint(1985,2005)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="YearDropdown"]'))
print(len(s1.options))
for option in s1.options:
print(option.text)
print(option.value)
s1.select_by_value(str(year))
# sleep(3000)
# username
username = Submit_handle.get_name_real()
chrome_driver.find_element_by_xpath('//*[@id="signup-username"]').send_keys(username)
# pwd
pwd = Submit_handle.get_pwd_real()
chrome_driver.find_element_by_xpath('//*[@id="signup-password"]').send_keys(pwd)
# gender
if submit[Excel_tag]['gender']== 'Female':
chrome_driver.find_element_by_xpath('//*[@id="FemaleButton"]/div').click()
else:
chrome_driver.find_element_by_xpath('//*[@id="MaleButton"]/div').click()
# signup
chrome_driver.find_element_by_xpath('//*[@id="signup-button"]').click()
db.update_plan_status(2,submit['ID'])
sleep(30)
chrome_driver.close()
chrome_driver.quit()
def test():
Mission_list = ['10000']
Excel_name = ['Auto','']
Mission_list = ['10066']
Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com']
submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
# [print(item,':',submit[excel][item]) for item in submit[excel] if submit[excel][item]!=None]
# [print(item,':',submit[excel][item]) for item in submit[excel] if item == 'homephone']
submit['Mission_Id'] = '10066'
chrome_driver = Chrome_driver.get_chrome(submit)
web_submit(submit,chrome_driver,1)
def test1():
num_gender = random.randint(0,1)
print(num_gender)
if __name__=='__main__':
test() |
"""
Removes silent parts from songs.
"""
__author__ = 'David Flury'
__email__ = "david@flury.email"
import os
import glob
import argparse
import multiprocessing
from pydub import AudioSegment
from joblib import Parallel, delayed
from pydub.silence import split_on_silence
audio_extensions = ['.wav']
suffix = 'unsilenced'
def remove_silence(file, length=5000):
sound = AudioSegment.from_file(file, format='wav', frame_rate=44100, channels=2, sample_width=2)
chunks = split_on_silence(sound, min_silence_len=1000, silence_thresh=-30)
combined = AudioSegment.empty()
for chunk in chunks[:5]:
combined += chunk
result_file = '%s_%s.wav' % (file, suffix)
chopped = combined[:length]
chopped.export(result_file, format='wav')
print('Removed silence from file: %s' % result_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Removes silent parts from songs.')
parser.add_argument('--path', default='C:\\temp\\unmix.io\\real-test\\', type=str, help='Working path')
parser.add_argument('--job_count', default=int(multiprocessing.cpu_count() / 2), type=int, help='Maximum number of concurrently running jobs')
args = parser.parse_args()
print('Arguments:', str(args))
files = [] # Load all files into list
print('Load all music files...')
for file in glob.iglob(args.path + '**/*', recursive=True):
extension = os.path.splitext(file)[1].lower()
file_name = os.path.basename(file)
if extension in audio_extensions and suffix not in file_name:
files.append(file)
print('Found %d music files' % len(files))
print('Remove silence from files with maximum %d jobs...' % args.job_count)
Parallel(n_jobs=args.job_count)(delayed(remove_silence)(file) for file in files)
print('Finished processing')
|
class Node(object):
"""Node of double link list.
"""
def __init__(self, ele):
self.ele = ele
self.next = None
self.prev = None
class DoubleLinkList(object):
"""Double link list.
"""
def __init__(self, node=None):
self.__head = node
def is_empty(self):
"""Return True if double link list is empty, or False if not.
"""
return self.__head is None
def length(self):
"""Return the length of double link list.
"""
cur = self.__head
count = 0
while cur != None:
count += 1
cur = cur.next
return count
def travel(self):
"""Ergodic and print the double link list."""
cur = self.__head
while cur != None:
print(cur.ele, end=' ')
cur = cur.next
def add(self, item):
"""Add the item in the start of double link list.
"""
node = Node(item)
node.next = self.__head
self.__head = node
node.next.prev = node
def append(self, item):
"""Append the item in the end of double link list.
"""
node = Node(item)
if self.is_empty():
self.__head = node
else:
cur = self.__head
while cur.next != None:
cur = cur.next
cur.next = node
node.prev = cur
def insert(self, pos, item):
"""Insert the item to the appointed position of double link list.
"""
if pos < 0:
self.add(item)
elif pos > self.length()-1:
self.append(item)
else:
prior = self.__head
count = 0
while count < pos - 1:
count += 1
prior = prior.next
node = Node(item)
node.next = prior.next
node.prev = prior
prior.next.prev = node
prior.next = node
def remove(self, item):
"""Remove the first item in double link list."""
cur = self.__head
while cur != None:
if cur.ele == item:
if cur == self.__head: # the first node of DLL
self.__head = cur.next
if cur.next: # the first node but not only node
cur.next.prev = None
else:
cur.prev.next = cur.next
if cur.next: # not the last node
cur.next.prev = cur.prev
return
else:
cur = cur.next
def search(self, item):
"""Return True if item is in the double link list, or False if not.
"""
cur = self.__head
while cur != None:
if cur.ele == item:
return True
else:
cur = cur.next
return False
if __name__ == "__main__":
dll = DoubleLinkList()
print('is dll empty?', dll.is_empty())
print('length of sll: ', dll.length())
dll.append(1)
print('is dll empty?', dll.is_empty())
print('length of sll: ', dll.length())
dll.append(2)
dll.append(3)
dll.append(4)
dll.append(5)
dll.append(6)
dll.add(10)
dll.insert(-2, 100)
dll.insert(5, 200)
dll.insert(10, 300)
dll.travel()
print('')
dll.remove(0)
dll.travel()
print('')
dll.remove(6)
dll.travel()
print('')
dll.remove(5)
dll.travel()
print('')
dll.remove(4)
dll.travel()
print('')
dll.remove(3)
dll.travel()
print('')
dll.remove(8)
dll.travel()
print('')
dll.remove(10)
dll.travel()
print('')
dll.remove(2)
dll.travel()
print('')
dll.remove(1)
dll.travel()
print('')
dll.remove(300)
dll.travel()
print('')
dll.remove(200)
dll.travel()
print('')
dll.remove(100)
dll.travel()
print('')
print('is dll empty?', dll.is_empty())
print('length of dll: ', dll.length()) |
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# ADC121C_MQ2
# This code is designed to work with the ADC121C_I2CGAS_MQ2 I2C Mini Module available from ControlEverything.com.
# https://shop.controleverything.com/products/propane-butane-methane-alcohol-gas-sensor
import smbus
import time
import math
# Get I2C bus
bus = smbus.SMBus(1)
Measure_RL = 5.0
MQ_Sample_Time = 5
Measure_RoInCleanAir = 9.83
# I2C address of the device
ADC121C_MQ2_DEFAULT_ADDRESS = 0x50
# ADC121C_MQ2 Register Map
ADC121C_MQ2_REG_CONVERSION = 0x00 # Conversion Result Register
ADC121C_MQ2_REG_ALERT_STATUS = 0x01 # Alert Status Register
ADC121C_MQ2_REG_CONFIG = 0x02 # Configuration Register
ADC121C_MQ2_REG_LOW_LIMIT = 0x03 # Alert Low Limit Register
ADC121C_MQ2_REG_HIGH_LIMIT = 0x04 # Alert High Limit Register
ADC121C_MQ2_REG_HYSTERESIS = 0x05 # Alert Hysteresis Register
ADC121C_MQ2_REG_LOWCONV = 0x06 # Lowest Conversion Register
ADC121C_MQ2_REG_HIGHCONV = 0x07 # Highest Conversion Register
# ADC121C_MQ2 Configuration Register
ADC121C_MQ2_CONFIG_CYCLE_TIME_DIS = 0x00 # Automatic Conversion Mode Disabled, 0 ksps
ADC121C_MQ2_CONFIG_CYCLE_TIME_32 = 0x20 # Tconvert x 32, 27 ksps
ADC121C_MQ2_CONFIG_CYCLE_TIME_64 = 0x40 # Tconvert x 64, 13.5 ksps
ADC121C_MQ2_CONFIG_CYCLE_TIME_128 = 0x60 # Tconvert x 128, 6.7 ksps
ADC121C_MQ2_CONFIG_CYCLE_TIME_256 = 0x80 # Tconvert x 256, 3.4 ksps
ADC121C_MQ2_CONFIG_CYCLE_TIME_512 = 0xA0 # Tconvert x 512, 1.7 ksps
ADC121C_MQ2_CONFIG_CYCLE_TIME_1024 = 0xC0 # Tconvert x 1024, 0.9 ksps
ADC121C_MQ2_CONFIG_CYCLE_TIME_2048 = 0xE0 # Tconvert x 2048, 0.4 ksps
ADC121C_MQ2_CONFIG_ALERT_HOLD_CLEAR = 0x00 # Alerts will self-clear
ADC121C_MQ2_CONFIG_ALERT_FLAG_NOCLEAR = 0x10 # Alerts will not self-clear
ADC121C_MQ2_CONFIG_ALERT_FLAG_DIS = 0x00 # Disables alert status bit in the Conversion Result register
ADC121C_MQ2_CONFIG_ALERT_FLAG_EN = 0x08 # Enables alert status bit in the Conversion Result register
ADC121C_MQ2_CONFIG_ALERT_PIN_DIS = 0x00 # Disables the ALERT output pin
ADC121C_MQ2_CONFIG_ALERT_PIN_EN = 0x04 # Enables the ALERT output pin
ADC121C_MQ2_CONFIG_POLARITY_LOW = 0x00 # Sets the ALERT pin to active low
ADC121C_MQ2_CONFIG_POLARITY_HIGH = 0x01 # Sets the ALERT pin to active high
class ADC121C_MQ2():
def data_config(self):
"""Select the Configuration Register data from the given provided values"""
DATA_CONFIG = (ADC121C_MQ2_CONFIG_CYCLE_TIME_32 | ADC121C_MQ2_CONFIG_ALERT_HOLD_CLEAR | ADC121C_MQ2_CONFIG_ALERT_FLAG_DIS)
bus.write_byte_data(ADC121C_MQ2_DEFAULT_ADDRESS, ADC121C_MQ2_REG_CONFIG, DATA_CONFIG)
time.sleep(0.1)
"""Read data back from ADC121C_MQ2_REG_CONVERSION(0x00), 2 bytes
raw_adc MSB, raw_adc LSB"""
data = bus.read_i2c_block_data(ADC121C_MQ2_DEFAULT_ADDRESS, ADC121C_MQ2_REG_CONVERSION, 2)
# Convert the data to 12-bits
self.raw_adc = (data[0] & 0x0F) * 256.0 + data[1]
def measure_rsAir(self):
"""Calculate the sensor resistance in clean air from raw_adc"""
vrl = self.raw_adc * (5.0 / 4096.0)
self.rsAir = ((5.0 - vrl) / vrl) * Measure_RL
def measure_Ro(self):
"""Calculate Rs/Ro ratio from the resistance Rs & Ro"""
Measure_Ro = 0.0
for i in range(0, MQ_Sample_Time):
Measure_Ro += self.rsAir
time.sleep(0.1)
Measure_Ro = Measure_Ro / MQ_Sample_Time
Measure_Ro = Measure_Ro / Measure_RoInCleanAir
return Measure_Ro
def measure_Rs(self):
Measure_Rs = 0.0
for i in range(0, MQ_Sample_Time):
Measure_Rs += self.rsAir
time.sleep(0.1)
Measure_Rs = Measure_Rs / MQ_Sample_Time
return Measure_Rs
def measure_ratio(self):
self.ratio = self.measure_Rs() / self.measure_Ro()
print "Ratio = %.3f "%self.ratio
def calculate_ppm_LPG(self):
"""Calculate the final concentration value"""
a = -0.57
b = 2.30
ppm = math.exp(((math.log(self.ratio, 10)) - b) / a)
return {'lpg' : ppm}
def calculate_ppm_CH4(self):
"""Calculate the final concentration value"""
a = -0.37
b = 2.30
ppm = math.exp(((math.log(self.ratio, 10)) - b) / a)
return {'ch4' : ppm}
def calculate_ppm_H2(self):
"""Calculate the final concentration value"""
a = -0.47
b = 2.30
ppm = math.exp(((math.log(self.ratio, 10)) - b) / a)
return {'h2' : ppm}
from ADC121C_MQ2 import ADC121C_MQ2
adc121c_mq2 = ADC121C_MQ2()
while True :
adc121c_mq2.data_config()
adc121c_mq2.measure_rsAir()
adc121c_mq2.measure_Rs()
adc121c_mq2.measure_Ro()
adc121c_mq2.measure_ratio()
data1 = adc121c_mq2.calculate_ppm_LPG()
data2 = adc121c_mq2.calculate_ppm_CH4()
data3 = adc121c_mq2.calculate_ppm_H2()
print "LPG Concentration : %.3f ppm" %(data1['lpg'])
print "Methane Concentration : %.3f ppm" %(data2['ch4'])
print "Hydrogen Concentration : %.3f ppm" %(data3['h2'])
print " ********************************* "
time.sleep(1)
|
from eda import Eda
|
from __future__ import absolute_import
import os
from .version import __VERSION__ as __version__
#from .marshaltools import *
from .surveyfields import SurveyFields, ZTFFields
from .BaseTable import BaseTable
from .MarshalLightcurve import MarshalLightcurve
from .ProgramList import ProgramList
from .filters import load_filters
load_filters()
here = __file__
# basedir = os.path.split(here)[0]
# example_data = os.path.join(basedir, 'example_data')
|
import os
import numpy as np
import torch
import torch.utils.data
from PIL import Image, ImageOps
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from engine import train_one_epoch, evaluate
import utils
import transforms as T
path = '/home/yonga/keremWorkSpace/CancerCellsCounterWithMaskR-Cnn/Dataset'
datasetSize= 0
class PennFudanDataset(torch.utils.data.Dataset):
# Dataseti oluşturan onu modifiye eden sınıf
def __init__(self, root, transforms=None): # yapıcı method
global datasetSize
self.root = root# root : dataset için resimlerin yolu
self.transforms = transforms #Bu transforms.py dosyasından çektiğimiz Compose sınıfı, amacı dataseti modifiye etmek.
self.imgs = list(sorted(os.listdir(os.path.join(root, "images")))) #bütün orjinal resimlerin isim listesi
self.masks = list(sorted(os.listdir(os.path.join(root, "masks")))) # bütün maskelerin isim listesi
"""
#Burada Elimde olan Gpu ile alabildiğim maksimum dataset boyutu 444 orjinal resim olduğu için geriye kalanları listeden siliyorum.
theValue = 100
del self.imgs[theValue:]
del self.masks[theValue:]
"""
datasetSize = len(self.imgs)
def __getitem__(self, idx): #Bu method pennfudandataset sınıfına ait bir nesnenin döndürülmesi halinde çağırılan özel bir methoddur.
# imageleri ve maskeleri değişkenlere atar
img_path = os.path.join(self.root, "images", self.imgs[idx])
mask_path = os.path.join(self.root, "masks", self.masks[idx])
#idx değişkeni veriseti dönerken kendiliğinden artan indextir.Yani veriseti listesinin boyutu kadar döner.
img = Image.open(img_path).convert("RGB") # Image operatörü PIL tipinde bir resime erişmek içindir.Burada-
#yolunu verdiğimiz her tek resim için img değişkenine bir tane resim atanır ve bu resmi rgb formatına çeviririz.
mask = Image.open(mask_path)#aynı şeyi maskeler için yapıyoruz fakat!, maskeleri rgb ye çevirmiyoruz çünkü her ayrı renk-
#pikseli ayrı bir blob'a denk gelecek şekilde ayarlandığı için greyscale kalmalı!
mask = np.array(mask)
# maskeyi numpy array şekline çeviriyoruz
obj_ids = np.unique(mask) # burada dizide olan eleman çeşidini görmek için np.uniqiue methodunu kullanıyoruz.
#yani bunun içeriği 3 tane blob var ise arkaplan dahil olmak üzere 4 tane eleman olacak ([0,1,2,3]).
# aynı zamanda np.unique methodu verileri küçükten büyüğe sıralar.
obj_ids = obj_ids[1:] # ilk eleman arkaplan olduğundan onu siliyoruz.
masks = mask == obj_ids[:, None, None]#blobların olduğu pikselleri True , olmadığı pikselleri false yaparak masks numpy dizisini ayarlıyoruz
# her maskenin bounding box koordinatlarını alma aşaması :
num_objs = len(obj_ids) # blob sayısını num_objs değişkenine setler.
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])# masks dizisindeki true olan(yani blobun olduğu x ve y koordinatı) x dizisi ve y dizisi olarak döndürür-
#ve bu iki diziyi pos'a setler
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
#x ve y koordinatlarının max ve min değerlerini değişkenlere atar.-
# Bu değişkenler bizim bounding box dikdörtgenin köşegen koordinatlarıdır.
boxes.append([xmin, ymin, xmax, ymax])#sonunda boxes'dizisine eleman olarak verilir.
boxes = torch.as_tensor(boxes, dtype=torch.float32)#boxes dizisini torch.float32 tipinde bir tensor'a çevirdik
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64) #blob sayısını labels adındaki diziye atıyoruz.-
#Bu dizide blob sayısı kadar eleman olsa ve bunların hepsi 1 olsa yeterlidir.Çünkü sadece 1 etiketimiz var.
masks = torch.as_tensor(masks, dtype=torch.uint8) #maskelerimizide tensor'a çeviriyoruz.
image_id = torch.tensor([idx])# idx değerini bir nevi image_id olarak tutuyoruz(amaç resmin idx'ine erişebilme)
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) # bounding box'un alanını buluyoruz.
#print('ALAN ================== ' ,area)
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)#blob sayısı kadar 0 matrisi oluşturur
#şimdi tüm bu özelliklerimizi target adında bir listeye atalım.
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
#Bu if blogunda dataset'deki veriler modifiye edilecekse yani bir transforms değişkenimiz içerisinde objemiz var ise
if self.transforms is not None:
img, target = self.transforms(img, target)#img ve target'i modifiye et
return img, target
def __len__(self):
return len(self.imgs)#veri setindeki resimlerin sayısını döndürür.
def get_instance_segmentation_model(num_classes):# pretrained modeli döndüren method
#num_classes'ı aşağıda 2 vereceğiz-
#2 vermemizin sebebi 1 etiketimizin bloblar diğer etiketimizin arkaplan olmasıdır.
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
#Burada torchvision models üzerinden maskrcnn_resnet50_fpn modelimizi indiriyoruz
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
def get_transform(train):#Burada penfudandataset'e verdiğimiz transforms objemizin özelliklerini belirliyoruz
transforms = []
transforms.append(T.ToTensor())# PIL image 'i Pytorch Tensoruna çevirme özelliğini verdik
if train:
transforms.append(T.RandomHorizontalFlip(0.5))#resimleri rassal olarak döndürmesini sağlıyoruz
#amacımız tabiki veri setinin çeşitliliğini arttırmak
return T.Compose(transforms)
# dataseti tanımlıyoruz ve resimleri modifiye etmesini istiyoruz
dataset = PennFudanDataset(path, get_transform(train=True))
dataset_test = PennFudanDataset(path, get_transform(train=False))
datasetTestSize = int((datasetSize*30)/100)
print('Dataset Test Boyutu : ', datasetTestSize)
print('Dataset Eğitim Boyutu : ', datasetSize-datasetTestSize)
# dataseti ve test için kullanılacak dataseti ayarlıyoruz
torch.manual_seed(1)
indices = torch.randperm(len(dataset)).tolist()
dataset = torch.utils.data.Subset(dataset, indices[:-datasetTestSize])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-datasetTestSize:])
#dataseti kardık ve teste 50 veri, geriye kalan veriyide datasetimize aktardık.
# training ve validation için dataloaderı ayarlıyoruz
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=3, shuffle=True, num_workers=0,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=0,
collate_fn=utils.collate_fn)
#buradaki num_workers değeri eğitimi threadlere bölüyor.
#Eğer bir tane ekran kartımız var ise bunları 0 yapmalıyız
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#device = torch.device('cpu')
#Çalıştırılacak cihaz olarak uygunsa Gpu çalışması için cuda, uygun değilse cpu ayarlamasını sağlıyoruz.
num_classes = 2 #Daha öncede açıkladığım gibi 2 tane sınıfımız var biri arkaplan biri blob etiketimiz.
#pretrained modelimizi çekiyoruz
model = get_instance_segmentation_model(num_classes)
# modele uygun cihazda çalışması için bildiri yapıyoruz
model.to(device)
# optimizer yapıcısı
params = [p for p in model.parameters() if p.requires_grad]
#optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)
optimizer =torch.optim.Adamax(params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# 3 epoch eğiticez
num_epochs = 1000
#Eğitimimizin başladığı bölüm
for epoch in range(num_epochs):
# 1 epoch eğitim her epochu ekrana bastıran method
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=100)
# learning rate'i güncelliyoruz
lr_scheduler.step()
# dataset_test ile test yaptığımız kısım
evaluate(model, data_loader_test, device=device)
if epoch % 10 ==0:
filePath = '/home/yonga/keremWorkSpace/CancerCellsCounterWithMaskR-Cnn/TrainAndPrediction//maskRCNN_model_'+str(epoch)+'.h5'
torch.save(model.state_dict(), filePath)
#burada modelimizi daha sonra kullanmak için dışarıya kaydediyoruz
torch.save(model, "/home/yonga/keremWorkSpace/CancerCellsCounterWithMaskR-Cnn/TrainAndPrediction/modelv1.h5")
"""
#burada kaydettiğimiz modeli kullanmak için onu dışarıdan çağırıyoruz
savedmodel = torch.load("/home/yonga/keremWorkSpace/BalıkSayma/FishCounterWithMaskRCNN/model.h5")
#modeli evalation moduna alıyoruz burada device belirtmemiz gerekmedi bunu araştırmam gerekiyor
#kodumuz sorunsuz bir şekilde gpu 'da çalışıyor sanırım önceden modeli gpuda çalışacak şekilde-
#kaydettiğimiz için.
savedmodel.eval()
import torchvision.transforms as trans #torchvision'un transforms.py dosyasını çekiyoruz
#Burada transforms'un ne yapacağını ona söylüyoruz
loader = trans.Compose([trans.ToTensor()])#loader resmi PIL image'den tensora çeviriyor
unloader = trans.ToPILImage() # unloader ise resmi tensordan PIL image'e çeviriyor
#Bu method image'i PIL image tipinde açar ve bunu tensor'a çevirir
def image_loader(image_name):
image = Image.open(image_name)
image = loader(image)
return image
#resmimizin yolu
p = "/home/yonga/keremWorkSpace/BalıkSayma/FishCounterWithMaskRCNN/Basler_raL2048-48gm__22248034__20181106_144201677_0114.tiff"
tahminResmi = image_loader(p)
torch.cuda.empty_cache()#gpu'nun cache'ini temizler, yer açar
import time
start = time.process_time()
with torch.no_grad():
prediction = savedmodel([tahminResmi.to(device)])
print(time.process_time() - start)
print(prediction[0]['masks'].shape)
""" |
import datetime as dt
import numpy as np
import pandas as pd
from scipy.interpolate import CubicSpline
### cubic spline
# x : pd.Series
# -> pd.Series
def spline(x):
tv = np.array([t.replace(tzinfo=dt.timezone.utc).timestamp() for t in x.index.to_pydatetime()])
p = CubicSpline(tv, x)
tq = np.arange(int(np.floor(np.min(tv))), int(np.ceil(np.max(tv))))
xq = p(tq)
return pd.Series(xq, index=pd.to_datetime(tq, unit='s'))
### straight lines
# x : pd.Series
# -> pd.Series
def straight(x):
tv = np.array([t.replace(tzinfo=dt.timezone.utc).timestamp() for t in x.index.to_pydatetime()])
tq = np.arange(int(np.floor(np.min(tv))), int(np.ceil(np.max(tv))))
xq = np.interp(tq, tv, x)
return pd.Series(xq, index=pd.to_datetime(tq, unit='s'))
|
import unittest
import teradata
import pyodbc
from config.db import (
db_teradata_prod,
db_teradata_prod_1
)
from .db import session_scope
from mmvizutil.db.query import (
Query,
db_query_df,
db_query_list
)
from mmvizutil.df.chart import (
df_box_melt
)
from mmvizutil.db.teradata import (
query_box
)
from mmvizutil.alchemy.db import (
db_alchemy_query,
AlchemyOperation
)
def db_query_goal_prod_type():
query = Query()
query.value = "select distinct goal_prod_typ from PROD_STND_CRCOG_VW.SALES_GOAL_VW"
return query
def db_query_agent_years():
query = Query()
query.value = "select advisor_year_count num_1 from PROD_STND_CRCOG_VW.VIZ_NFF_DASHB_VW"
return query
udaExec = teradata.UdaExec(appName="TeradataTest", configureLogging=False, logConsole=False)
class TestTeradataQuery(unittest.TestCase):
def test_query(self):
query = "select current_timestamp"
with pyodbc.connect(**db_teradata_prod_1) as connection:
cursor = connection.cursor()
for row in cursor.execute(query):
print(row)
def test_query_list(self):
with udaExec.connect(method="odbc", **db_teradata_prod) as connection:
result = db_query_list(connection, db_query_goal_prod_type())
print(result)
def test_query_box_df(self):
with udaExec.connect(method="odbc", **db_teradata_prod) as connection:
# result = db_query_list(connection, query_box(db_query_agent_years()))
# result = db_query_df(connection, query_box(db_query_agent_years()))
result = df_box_melt(db_query_df(connection, query_box(db_query_agent_years())))
print(result)
def test_alchemy_query(self):
query = "select current_timestamp"
with session_scope() as session:
result = db_alchemy_query(session, query, {}, AlchemyOperation.FIRST)
print(result)
|
import numpy as np
import scipy.io
import tensorflow as tf
from logger import logger
from constants import VGG19_LAYERS
class VGG(object):
"""VGG provides an interface to extract parameter from pre-trained neural network
and formulate Tensorflow layers"""
def __init__(self, trained, pooling):
logger.info('Loading pre-trained network data......')
self.network = scipy.io.loadmat(trained)
self.layers, self.mean_pixel = self.init_net()
self.pooling = pooling
def init_net(self):
mean_mat = self.network['normalization'][0][0][0] # shape: (224, 224, 3)
mean_pixel = np.mean(mean_mat, axis=(0, 1)) # length: 3
layers = self.network['layers'].reshape(-1) # length: 43
return layers, mean_pixel
def load_net(self, input_image):
# construct layers using parameters
logger.info('Parsing layers......')
parsed_net = {}
current_image = input_image
for layer_name, input_layer in zip(VGG19_LAYERS, self.layers):
layer_kind = layer_name[:4]
if layer_kind == 'conv':
current_image = self._get_conv_layer(current_image, input_layer)
elif layer_kind == 'relu':
current_image = self._get_relu_layer(current_image)
elif layer_kind == 'pool':
current_image = self._get_pool_layer(current_image)
parsed_net[layer_name] = current_image
return parsed_net
def _get_conv_layer(self, input_image, input_layer):
# get kernel and bias
kernels, bias = input_layer[0][0][0][0]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
# formulate conv layer
conv = tf.nn.conv2d(input_image, tf.constant(kernels), strides=(1, 1, 1, 1), padding='SAME')
layer = tf.nn.bias_add(conv, bias)
return layer
def _get_relu_layer(self, input_image):
return tf.nn.relu(input_image)
def _get_pool_layer(self, input_image):
if self.pooling == 'avg':
layer = tf.nn.avg_pool(input_image, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME')
elif self.pooling == 'max':
layer = tf.nn.max_pool(input_image, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME')
return layer
|
# Day 15: Linked List - https://www.hackerrank.com/challenges/30-linked-list
class Node:
'''Create a node'''
def __init__(self, data):
self.data = data
self.next = None
class Solution:
def display(self, head):
current = head
while current:
print(current.data, end=' ')
current = current.next
def insert(self, head, data):
current = head
if not current:
newNode = Node(data)
return newNode
current.next = Solution.insert(self, current.next, data)
return current
'''mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
mylist.display(head)'''
|
from django.db import models
class Product(models.Model):
title = models.CharField(max_length=128)
description = models.TextField(null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
publish = models.DateTimeField(
auto_now_add=False,
auto_now=False,
null=True,
blank=True
)
def get_absolute_url(self):
return f'products/{self.id}'
@property
def elastic_score(self):
return 0.95
|
# coding:utf-8
# Test Intersection Manager with UDP
# Get vehicle proposal, return the result
# Starting of installing the collision detect algorithm
import sys
from datetime import datetime
import socket
import struct
import json
sys.path.append('Users/better/PycharmProjects/GUI_Qt5/Intersection')
import funcs
import math
import copy
# preparation as a server
server_address = ('localhost', 6789)
max_size = 4096
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind(server_address)
STOP_CHAT = True
# load vehicle info
f = open('IM_00.json', 'r')
sendData = json.load(f)
f.close()
# Initiate intersection grid
grid = {}
intersec_grid = []
check_grid = []
t_ahead = 35
for i in range(270, 330, 10):
for j in range(270, 330, 10):
grid[(i, j)] = True
# whole time step that IM will predict in current time_step
for i in range(t_ahead):
intersec_grid.append(copy.deepcopy(grid))
print(intersec_grid)
# Initiate veh rotating angle
veh_num = 10
r = []
for i in range(veh_num):
r.append(0)
# Initiate bezier curve parameter
beze_t = []
up_left_x = []
up_left_y = []
down_left_x = []
down_left_y = []
up_right_x = []
up_right_y = []
down_right_x = []
down_right_y = []
for i in range(veh_num):
beze_t.append(0)
up_left_x.append(0)
up_left_y.append(0)
down_left_x.append(0)
down_left_y.append(0)
up_right_x.append(0)
up_right_y.append(0)
down_right_x.append(0)
down_right_y.append(0)
# Initiate time step
time_step = 0
print(intersec_grid[1])
print(intersec_grid[2])
print(intersec_grid[1])
print(intersec_grid[2])
def sendResult():
while STOP_CHAT:
check = 0
print('starting the server at', datetime.now())
print('waiting for a client to call.')
data, client = server.recvfrom(max_size)
data = data.decode('utf-8')
recData = json.loads(data)
print(recData)
#print(recData["arrival_time"])
veh_id = recData["Veh_id"]
current = tuple(recData["position"])
origin = tuple(recData["origin"])
destination = tuple(recData["destination"])
speed = recData["speed"]
current_time = recData["current_time"]
if light_veh_pattern1(veh_id, current, origin, destination, speed, current_time):
sendData[recData["Veh_id"]]["result"] = 1
else:
sendData[recData["Veh_id"]]["result"] = 0
# if recData["arrival_time"] < 5:
# sendData[recData["Veh_id"]]["result"] = 1
print(sendData)
# Send Json
mes = bytes(json.dumps(sendData[recData["Veh_id"]]), encoding='utf-8')
server.sendto(mes, client)
server.close()
# vehicles travel from W_1 to S_6
# origin and destination is a pattern of (x,y)
def light_veh_pattern1(veh_num, current, origin, destination, speed, current_time):
new_position = current
time = 0
check_grid = []
return_time = 0
# Initiate intersection grid
if current_time > time_step:
#time_step = current_time
for i in range(t_ahead):
for i in range(270, 330, 10):
for j in range(270, 330, 10):
grid[(i, j)] = True
# Before veh get out of the intersection
while new_position[1] <= destination[1]:
# Check if all parts of veh have been in intersection
if new_position[0] + speed < origin[0]:
if not intersec_grid[time][(270, 270)]:
break
else:
new_position = (new_position[0] + speed, new_position[1])
intersec_grid[time][(270, 270)] = False
else:
# Calculate rotation angle
if (((new_position[1] - 270 + speed) / 60) * 90 > 15):
r[veh_num] = ((new_position[1] - 270 + 3) / 60) * 90
else:
r[veh_num] = 0
# Calculate trajectory by using Bezier Curve
x = pow(1 - (beze_t[veh_num] / 60), 2) * 270 + 2 * (beze_t[veh_num] / 60) * (
1 - beze_t[veh_num] / 60) * 330 + pow(
beze_t[veh_num] / 60, 2) * 330
y = pow(1 - (beze_t[veh_num] / 60), 2) * 273 + 2 * (beze_t[veh_num] / 60) * (
1 - beze_t[veh_num] / 60) * 273 + pow(
beze_t[veh_num] / 60, 2) * 330
beze_t[veh_num] += 2
new_position = (x, y)
# Calculate the big Square's coordinate
up_left_x[veh_num] = funcs.coordinate_up_left_x(new_position[0], r[veh_num])
up_left_y[veh_num] = funcs.coordinate_up_left_y(new_position[1])
down_left_x[veh_num] = funcs.coordinate_down_left_x(new_position[0], r[veh_num])
down_left_y[veh_num] = funcs.coordinate_down_left_y(new_position[1], r[veh_num])
up_right_x[veh_num] = funcs.coordinate_up_right_x(new_position[0], r[veh_num])
up_right_y[veh_num] = funcs.coordinate_up_right_y(new_position[1])
down_right_x[veh_num] = funcs.coordinate_down_right_x(new_position[0], r[veh_num])
down_right_y[veh_num] = funcs.coordinate_down_right_y(new_position[1], r[veh_num])
# Up left
if (up_left_x[veh_num] // 10 * 10, up_left_y[veh_num] // 10 * 10) in intersec_grid[time]:
if intersec_grid[time][(up_left_x[veh_num] // 10 * 10, up_left_y[veh_num] // 10 * 10)] == False:
return False
else:
intersec_grid[time][(up_left_x[veh_num] // 10 * 10, up_left_y[veh_num] // 10 * 10)] = False
check_grid.append((up_left_x[veh_num] // 10 * 10, up_left_y[veh_num] // 10 * 10))
# print(time)
# print(new_position)
# print((up_left_x[veh_num] // 10 * 10, up_left_y[veh_num] // 10 * 10))
# print('success')
# Up right
if ((up_right_x[veh_num]) // 10 * 10, up_right_y[veh_num] // 10 * 10) in intersec_grid[time]:
if intersec_grid[time][((up_right_x[veh_num]) // 10 * 10, up_right_y[veh_num] // 10 * 10)] == False:
if ((up_right_x[veh_num]) // 10 * 10, up_right_y[veh_num] // 10 * 10) not in check_grid:
return False
else:
intersec_grid[time][((up_right_x[veh_num]) // 10 * 10, up_right_y[veh_num] // 10 * 10)] = False
check_grid.append(((up_right_x[veh_num]) // 10 * 10, up_right_y[veh_num] // 10 * 10))
else:
intersec_grid[time][((up_right_x[veh_num]) // 10 * 10, up_right_y[veh_num] // 10 * 10)] = False
check_grid.append(((up_right_x[veh_num]) // 10 * 10, up_right_y[veh_num] // 10 * 10))
# print(time)
# print(new_position)
# print((up_left_x[veh_num] // 10 * 10, up_left_y[veh_num] // 10 * 10))
# print('success')
# Down left
if (down_left_x[veh_num] // 10 * 10, (down_left_y[veh_num]) // 10 * 10) in intersec_grid[time]:
if intersec_grid[time][(down_left_x[veh_num] // 10 * 10, (down_left_y[veh_num]) // 10 * 10)] == False:
if (down_left_x[veh_num] // 10 * 10, (down_left_y[veh_num]) // 10 * 10) not in check_grid:
return False
else:
intersec_grid[time][(down_left_x[veh_num] // 10 * 10, (down_left_y[veh_num]) // 10 * 10)] = False
check_grid.append((down_left_x[veh_num] // 10 * 10, (down_left_y[veh_num]) // 10 * 10))
else:
intersec_grid[time][(down_left_x[veh_num] // 10 * 10, (down_left_y[veh_num]) // 10 * 10)] = False
check_grid.append((down_left_x[veh_num] // 10 * 10, (down_left_y[veh_num]) // 10 * 10))
# print(time)
# print(new_position)
# print((up_left_x[veh_num] // 10 * 10, up_left_y[veh_num] // 10 * 10))
# print('success')
# Down right
if ((down_right_x[veh_num]) // 10 * 10, (down_right_y[veh_num]) // 10 * 10) in intersec_grid[time]:
if intersec_grid[time][((down_right_x[veh_num]) // 10 * 10, (down_right_y[veh_num]) // 10 * 10)] == False:
if ((down_right_x[veh_num]) // 10 * 10, (down_right_y[veh_num]) // 10 * 10) not in check_grid:
return False
else:
intersec_grid[time][((down_right_x[veh_num]) // 10 * 10, (down_right_y[veh_num]) // 10 * 10)] = False
else:
intersec_grid[time][((down_right_x[veh_num]) // 10 * 10, (down_right_y[veh_num]) // 10 * 10)] = False
# print(time)
# print(new_position)
# print((up_left_x[veh_num] // 10 * 10, up_left_y[veh_num] // 10 * 10))
# print('success')
check_grid = []
time += 1
# if time > 8:
# break
print(time)
if time == 35:
for i in range(t_ahead):
print(intersec_grid[i])
return True
def test_collision():
print()
#print(light_veh_pattern1(1, (262, 273), (270, 273), (330, 330), 2, 0))
sendResult() |
import math
print ("x=")
x=int(input())
z=math.sqrt((3*x+2)*(3*x+2)-24*x)/(3*math.sqrt(x)-2/math.sqrt(x))
print("z=", z)
|
A=int(input("A= "))
hundred=int(A/100)
tens=int(A/10%10)
ones=int(A%10)
print(tens)
print(ones)
|
from Pyskell.Language.PyskellTypeSystem import *
from inspect import isclass
from collections import defaultdict
def ct(obj):
return str(type_of(obj))
__magic_methods__ = ["__{}__".format(s) for s in {
"len", "getitem", "setitem", "delitem", "iter", "reversed", "contains",
"missing", "delattr", "call", "enter", "exit", "eq", "ne", "gt", "lt",
"ge", "le", "pos", "neg", "abs", "invert", "round", "floor", "ceil",
"trunc", "add", "sub", "mul", "div", "truediv", "floordiv", "mod",
"divmod", "pow", "lshift", "rshift", "or", "and", "xor", "radd", "rsub",
"rmul", "rdiv", "rtruediv", "rfloordiv", "rmod", "rdivmod", "rpow",
"rlshift", "rrshift", "ror", "rand", "rxor", "isub", "imul", "ifloordiv",
"idiv", "imod", "idivmod", "irpow", "ilshift", "irshift", "ior", "iand",
"ixor", "nonzero"}]
def replace_magic_methods(some_class, fn):
for attr in __magic_methods__:
setattr(some_class, attr, fn)
return
class Syntax(object):
def __init__(self, error_message):
self.__syntax_error_message = error_message
self.invalid_syntax = SyntaxError(self.__syntax_error_message)
replace_magic_methods(Syntax, lambda x, *a: x.__raise())
def __raise(self):
raise self.invalid_syntax
class Instance(Syntax):
def __init__(self, type_class, some_class):
super(Instance, self).__init__("Instance Error")
if not (isclass(type_class) and issubclass(type_class, TypeClass)):
raise TypeError("{} is not a type-class".format(type_class))
self.type_class = type_class
self.cls = some_class
def where(self, **kwargs):
self.type_class.make_instance(self.cls, **kwargs)
class TS(Syntax):
"""Type Signature"""
def __init__(self, sig):
super(TS, self).__init__("Syntax Error in Type Signature")
if not isinstance(sig, Signature):
raise SyntaxError("Signature expected in TS() found {}"
.format(sig))
elif len(sig.signature.args) < 2:
raise SyntaxError("Type Signature Argument Not Enough")
self.signature = sig.signature
def __call__(self, fn):
func_args = type_sig_build(self.signature)
func_type = make_func_type(func_args)
return TypedFunction(fn, func_args, func_type)
class Signature(Syntax):
def __init__(self, args, constraints):
super(Signature, self).__init__("Syntax Error in Type Signature")
self.signature = TypeSignature(constraints, args)
def __rshift__(self, other):
other = other.signature if isinstance(other, Signature) else other
return Signature(self.signature.args + (other,),
self.signature.constraints)
def __rpow__(self, other):
return TS(self)(other)
class Constraints(Syntax):
def __init__(self, constraints=()):
super(Constraints, self).__init__("Syntax Error in Type Signature")
self.constraints = defaultdict(list)
if len(constraints) > 0:
if isinstance(constraints[0], tuple):
for con in constraints:
self.__add_tc_constraints(con)
else:
self.__add_tc_constraints(constraints)
def __add_tc_constraints(self, con):
if len(con) != 2 or not isinstance(con, tuple):
raise SyntaxError("Invalid Type-class Constraint: {}"
.format(str(con)))
if not isinstance(con[1], str):
raise SyntaxError("{} is not type variable".format(con[1]))
if not (isclass(con[0]) and issubclass(con[0], TypeClass)):
raise SyntaxError("{} is not a type-class".format(con[0]))
self.constraints[con[1]].append(con[0])
return
def __getitem__(self, item):
return Constraints(item)
def __div__(self, other):
return Signature((), self.constraints) >> other
def __truediv__(self, other):
return self.__div__(other)
C = Constraints()
py_func = PythonFunctionType
class SyntaxUndefined(Undefined):
pass
replace_magic_methods(SyntaxUndefined, lambda *x: Undefined())
undefined = SyntaxUndefined()
def t(type_constructor, *parameters):
if issubclass(type_constructor, ADT) and isclass(type_constructor) and \
len(type_constructor.__parameters__) != len(parameters):
raise TypeError("Incorrect number of type parameter {}"
.format(type_constructor.__name__))
parameters = [i.signature if isinstance(i, Signature) else i
for i in parameters]
return TypeSignatureHigherKind(type_constructor, parameters)
def typify_py_func(fn, high=None):
if not is_py_func_type(fn):
raise TypeError("Provided not Python Function Type")
type_name_list = ["a" + str(i) for i in range(fn.func_code.co_argcount + 1)]
if high is not None:
type_name_list[-1] = high(type_name_list[-1])
return TS(Signature(type_name_list, []))
|
class Client:
def __init__(self, _id, name, phone, isCompany):
self.id = _id
self.name = name
self.phone = phone
self.isCompany = isCompany
class DiscountThreshold:
def __init__(self, _id, confID, startDate, endDate, discount):
self.id = _id
self.confID = confID
self.startDate = startDate
self.endDate = endDate
self.discount = discount
class Conference:
def __init__(self, _id, name, price, studentDiscount, startDate, endDate):
self.id = _id
self.name = name
self.price = price
self.studentDiscount = studentDiscount
self.startDate = startDate
self.endDate = endDate
class ConferenceDay:
def __init__(self, _id, confID, date , limit):
self.id = _id
self.confID = confID
self.date = date
self.limit = limit
self.freePlaces = limit
class Workshop:
def __init__(self, _id, dayID, name, start, end, limit, price):
self.id = _id
self.dayID = dayID
self.name = name
self.start = start
self.end = end
self.limit = limit
self.price = price
self.freePlaces = limit
class ConferenceReservation:
def __init__(self, _id, confID, clientID, registrationDate):
self.id = _id
self.confID = confID
self.clientID = clientID
self.registrationDate = registrationDate
self.toPay = 0
class DayReservation:
def __init__(self, _id, dayID, reservationID, participantsNumber, studentParticipantsNumber, toPay):
self.id = _id
self.dayID = dayID
self.reservationID = reservationID
self.participantsNumber = participantsNumber
self.studentParticipantsNumber = studentParticipantsNumber
self.toPay = toPay
class Participant:
def __init__(self, _id, firstName, lastName, EMailAddress):
self.id = _id
self.firstName = firstName
self.lastName = lastName
self.EMailAddress = EMailAddress
class DayAdmission:
def __init__(self, _id, participantID, dayReservationID, isStudent):
self.id = _id
self.participantID = participantID
self.dayReservationID = dayReservationID
self.isStudent = isStudent
class WorkshopReservation:
def __init__(self, _id, workshopID, dayReservationID, participantsNumber):
self.id = _id
self.workshopID = workshopID
self.dayReservationID = dayReservationID
self.participantsNumber = participantsNumber
self.notEnrolled = participantsNumber
class WorkshopAdmission:
def __init__(self, dayAdmissionID, workshopReservationID):
self.dayAdmissionID = dayAdmissionID
self.workshopReservationID = workshopReservationID
class Payment:
def __init__(self, _id, conferenceReservationID, amount, date):
self.id = _id
self.conferenceReservationID = conferenceReservationID
self.amount = amount
self.date = date
|
l=['k','a','b','a','l','i']
def check(st):
lis=list(st)
for i in lis:
if(i in l):
if(l.count(i)<=lis.count(i)):
continue
else:
return 0
break
else:
return 0
break
else:
return 1
n=int(input())
l1=[]
c=0
for i in range(n):
s=input()
l1.append(s)
for i in l1:
r=check(i)
if(r==1):
c+=1
print(c)
|
from rest_framework import permissions
class UserPermissions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS or request.method == 'CREATE':
return True
return obj == request.user |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 18 15:52:56 2021
@author: ftan1
"""
import numpy as np
import pydicom as pyd
import datetime
import os
import glob
import sys
import sigpy_e.cfl as cfl
if __name__ == '__main__':
# set image dir
image_dir = sys.argv[1]
image_file = image_dir + '/MRI_Raw_imoco'
# load image
im = np.abs(cfl.read_cfl(image_file))
#im = np.transpose(im[::-1,::-1,:], axes=[2,0,1])
im = np.transpose(im[:,::-1,::-1], axes=[0,2,1])
# exam number, series number
dcm_ls = glob.glob(image_dir + '/*.dcm')
# load original dicom
series_mimic_dir = dcm_ls[0]
ds = pyd.dcmread(series_mimic_dir)
# parse exam number, series number
dcm_file = os.path.basename(series_mimic_dir)
exam_number, series_mimic, _ = dcm_file.replace('Exam', '').replace('Series', '_').replace('Image','_').replace('.dcm','').split('_')
exam_number = int(exam_number)
series_write = int(series_mimic) + 10 # adding 10, this should ensure no overlap with other series numbers for Philips numbering which uses series number (in order acquired) *100
# modified time
dt = datetime.datetime.now()
#ds.FileModTime = dt.strftime('%Y%m%d%H%M%S.%f') + '-0800' # PST
ds.SeriesDescription = "3D UTE iMoCo"
# Update SliceLocation information
series_mimic_slices = np.double(ds.Columns) # assume recon is isotropic
SliceLocation_center = ds.SliceLocation - (series_mimic_slices-1)/2*ds.SpacingBetweenSlices
ImagePosition_zcenter = ds.ImagePositionPatient[2] + (series_mimic_slices-1)/2*ds.SpacingBetweenSlices
im_shape = np.shape(im)
ds.Columns, ds.Rows= im_shape[-2], im_shape[-1]
spatial_resolution = ds.SliceThickness
ds.SpacingBetweenSlices = spatial_resolution
ds.PixelSpacing = [spatial_resolution, spatial_resolution]
ds.SliceThickness = spatial_resolution
ds.ReconstructionDiameter = spatial_resolution*im_shape[-1]
SliceLocation_original = ds.SliceLocation
ImagePositionPatient_original = ds.ImagePositionPatient
series_write_dir = image_dir #+ str(series_write)
try:
os.mkdir(series_write_dir)
except OSError as error:
# print(error)
pass
im = np.abs(im) / np.amax(np.abs(im)) * 4095 #65535
im = im.astype(np.uint16)
# Window and level for the image
ds.WindowCenter = int(np.amax(im)/2)
ds.WindowWidth = int(np.amax(im))
# dicom series UID
ds.SeriesInstanceUID = pyd.uid.generate_uid()
# not currently accounting for oblique slices...
for z in range(im_shape[0]):
ds.InstanceNumber = z+1;
ds.SeriesNumber = series_write
ds.SOPInstanceUID = pyd.uid.generate_uid()
ds.file_meta.MediaStorageSOPInstanceUID = ds.SOPInstanceUID # SOPInstanceUID should == MediaStorageSOPInstanceUID
Filename = '{:s}/E{:d}S{:d}I{:d}.DCM'.format(series_write_dir, exam_number, series_write, z+1)
ds.SliceLocation = SliceLocation_original + (im_shape[0]/2 - (z+1)) * spatial_resolution;
ds.ImagePositionPatient = pyd.multival.MultiValue(float, [float(ImagePositionPatient_original[0]), float(ImagePositionPatient_original[1]), ImagePositionPatient_original[2] - (im_shape[0]/2 - (z+1)) * spatial_resolution])
b = im[z,:,:].astype('<u2')
ds.PixelData = b.T.tostring()
#ds.is_little_endian = False
ds.save_as(Filename)
|
import numpy as np
import matplotlib.pyplot as plt
# dibujamos una funcion cualquiera en un grafico por ejemplo f(x)= cos(x) + cos(2x)
resol=1000
x=np.linspace(-5,5,resol)
f=lambda _x: np.cos(_x)+np.cos(_x*2)
plt.plot(x,f(x))
#-----------------------------------------------
# "Tiramos" una bolita en algun punto x,f(x) de esa grafica
bolita=np.random.rand(1)*10-5
plt.plot(bolita,f(bolita),'o',c='red')
#------------------------------------------------
h=10e-4 # h vendria a ser la diferencial (delta) de x para calcular la derivada
lr=0.02 # radio de aprendisaje: determina cuan "grande" es el paso hacia abajo en la curva
ya_casi=10e-4 #para evaluar si la pendiente esta tan cerca de cero como para detener las iteraciones
for i in range(500):
deriv=(f(bolita+h)-f(bolita))/h #calculamos la derivada de f(x) en el punto "bolita". Es decir,
#la pendiente de la recta tangente que pasa por ese punto de la curva
bolita-=deriv*lr #damos un paso (deriv*lr) hacia donde la pendiente disminuye
plt.plot(bolita,f(bolita),'.',c='blue')
if deriv**2< ya_casi**2: #evaluamos si ya está tan cerca de cero como "ya_casi"
print(f'la bolita bajó hasta la posicion {f(bolita)} en {i} pasos')
break
plt.show()
|
import turtle
canvas = turtle.Screen()
canvas.bgcolor("lightgreen")
leo = turtle.Turtle()
leo.shape("arrow")
leo.color("pink")
leo.pensize(5)
def draw_square (size):
for i in range (4):
leo.forward(size)
leo.left(90)
def draw_squares (number, size):
"""
Draw squares
:param number: number of squares
:param size: size of the squares, input length of side
:return: nothing
"""
for i in range(number):
draw_square(size)
leo.penup()
leo.forward(2*size)
leo.pendown()
leo.stamp()
draw_squares(5,20)
canvas.exitonclick() |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 27 09:18:57 2018
@author: Administrator
"""
'''
rf:0.12836
lasso:
adboost: 0.41471
gbdt:0.13519
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#加载数据
train = pd.read_csv('./input/train.csv',index_col=0)
test = pd.read_csv('./input/test.csv',index_col=0)
#查看y的分布情况
y = train.pop('SalePrice')
log_y = np.log1p(y)
#画图显示
fig= plt.figure(figsize=(8,5))
ax1 = plt.subplot2grid([1,2],[0,0]) #不做log处理
ax1.hist(y)
ax2 = plt.subplot2grid([1,2],[0,1]) #做log处理
ax2.hist(log_y)
#合并train和test
data = pd.concat([train,test],axis=0)
data['MSSubClass'] = data['MSSubClass'].astype(str) #这个特征转字符型
#one_hot编码
data_dummy = pd.get_dummies(data)
#有部分缺失值
data_dummy.isnull().sum().sort_values(ascending=False).head(15)
data_mean = data_dummy.mean()
data_dummy.fillna(data_mean,inplace=True) #均值填充
#标准化那些数据
data_dummy.loc[:,data.columns !='object'] = (data_dummy.loc[:,data.columns !='object'] - data_dummy.loc[:,data.columns !='object'].mean())/data_dummy.loc[:,data.columns !='object'].std()
#划分出train_data,test_date
train_data = data_dummy.loc[train.index]
test_data = data_dummy.loc[test.index]
#建立模型
#========================================
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
#1/lr
lasso = Lasso()
model_lasso = GridSearchCV(lasso,param_grid={'alpha':np.logspace(-3,2,100)},cv=5)
model_lasso.fit(train_data,log_y)
print('最好的参数:',model_lasso.best_estimator_)
print('得分是:',model_lasso.best_score_)
lasso_y = np.expm1(model_lasso.predict(test_data))
'''
最好的参数: Lasso(alpha=0.001, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
得分是: 0.8768867547599901
'''
#2/RF
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
model_rf= GridSearchCV(rf,param_grid={'n_estimators':[30,60,90,100],'max_depth':np.arange(3,10)},cv=5)
model_rf.fit(train_data,log_y)
print('最好的参数:',model_rf.best_estimator_)
print('得分是:',model_rf.best_score_)
lr_y = np.expm1(model_rf.predict(test_data))
'''
最好的参数: RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=9,
max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=90, n_jobs=1,
oob_score=False, random_state=None, verbose=0, warm_start=False)
得分是: 0.8683524221007549
'''
#3/boosting
from sklearn.ensemble import AdaBoostRegressor
adboost = AdaBoostRegressor(base_estimator = lasso)
model_adboost = GridSearchCV(adboost,param_grid={'learning_rate':np.logspace(-3,-1,20),'n_estimators':[20,40,60,80,100]})
model_adboost.fit(train_data,log_y)
print('最好的参数:',model_adboost.best_estimator_)
print('得分是:',model_adboost.best_score_)
adboost_y = np.expm1(model_adboost.predict(test_data))
'''
最好的参数: AdaBoostRegressor(base_estimator=Lasso(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False),
learning_rate=0.1, loss='linear', n_estimators=100,
random_state=None)
得分是: 0.017396793490224997
'''
#4/gbdt
from sklearn.ensemble import GradientBoostingRegressor
gbdt = GradientBoostingRegressor()
model_gbdt= GridSearchCV(gbdt,param_grid={'learning_rate':np.logspace(-3,-1,20),'n_estimators':[20,40,60,80,100]})
model_gbdt.fit(train_data,log_y)
print('最好的参数:',model_gbdt.best_estimator_)
print('得分是:',model_gbdt.best_score_)
gbdt_y = np.expm1(model_gbdt.predict(test_data))
'''
最好的参数: GradientBoostingRegressor(alpha=0.9, criterion='friedman_mse', init=None,
learning_rate=0.1, loss='ls', max_depth=3, max_features=None,
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
n_estimators=100, presort='auto', random_state=None,
subsample=1.0, verbose=0, warm_start=False)
得分是: 0.8942227440070227
'''
#5/xgboost
from xgboost import XGBRegressor
xgb = XGBRegressor()
model_xgb = GridSearchCV(xgb,param_grid=({'n_estimators':[20,40,60,80,100],'learning_rate':np.logspace(-3,-1,20)}))
model_xgb.fit(train_data,log_y)
print('最好的参数:',model_xgb.best_estimator_)
print('得分是:',model_xgb.best_score_)
xgb_y = model_xgb.predict(test_data)
'''
最好的参数: XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=100,
n_jobs=1, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=True, subsample=1)
得分是: 0.8885704897402684
'''
#输出预测结果
output_lasso = pd.DataFrame({'Id':test_data.index,'SalePrice':lasso_y})
output_lasso.to_csv('lasso.csv',index=None)
output_rf = pd.DataFrame({'Id':test_data.index,'SalePrice':lr_y})
output_rf.to_csv('lr.csv',index=None)
output_adaboost = pd.DataFrame({'Id':test_data.index,'SalePrice':adboost_y})
output_adaboost.to_csv('adboost_y.csv',index=None)
output_gbdt = pd.DataFrame({'Id':test_data.index,'SalePrice':gbdt_y})
output_gbdt.to_csv('gbdt_y.csv',index=None)
output_xgb = pd.DataFrame({'Id':test_data.index,'SalePrice':xgb_y})
output_xgb.to_csv('xgb_y.csv',index=None)
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''
ds9.py
Created by Carlos J. Diaz on 2013-12.
Busco las estrellas; alineo las imagenes y las combino
'''
#Importo lo necesario para el desarrollo del programa
from pyraf import iraf
import os, string, sys
from function2 import sex,sex2cat,sex2catb
#Y las funciones del iraf
iraf.images()
iraf.imcoords()
iraf.immatch()
iraf.noao()
iraf.imred()
iraf.ccdred()
iraf.imutil()
##################### ALGUNAS FUNCIONES ... ###################
def file_len(fname):
#return os.system('wc -l '+fname,stdout=a)
import subprocess
proc = subprocess.Popen(['wc -l '+fname], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
return int(out.split()[0])
def plusoff(x):
o=open(x,'r')
f=open('p'+x,'a')
w=0
for linea in o:
a=linea.split()
if w>=16:
f.write(('%6.5s %6.5s' % (float(a[0])+39,float(a[1])+39))+'\n')
else:
pass
w=w+1
#################### COMIENZO DE DS9.PY ############################
os.system('rm csbf*')
os.system('rm FINAL*.fits')
os.system('rm aFINAL*.fits')
os.system('rm *.obj.*')
os.system('rm *.cat*')
os.system('rm cFINAL*.fits')
#CON DIV.PY HEMOS DIVIDIDO LAS IMAGENES EN GRUPOS
#O CICLOS DE POSICIONES. PARA CADA UNO DE ELLOS HACEMOS
#UNA COMBINACION.
o=open('lista_group','r')
qr=open('Errorfind.log','w')
for grupo in o:
print grupo
grupo=string.strip(grupo)
gruposq=string.strip(grupo)+'sq'
shiftgrupo=grupo+'shiftsq'
f=open(gruposq,'r')
lista_grupo=[];c=0;lis_total=[]
for imagen in f:
if c==0:
c=1
imagen0=imagen
lista_grupo.append(string.strip(imagen))
else:
if imagen[:-9] == imagen0[:-9]:
lista_grupo.append(string.strip(imagen))
else:
c=0
lis_total.append(lista_grupo)
lista_grupo=[]
lista_grupo.append(string.strip(imagen))
lis_total.append(lista_grupo)
'''
#########################################################################################
############## ALINEAMIENTO Y COMBINACION DENTRO DEL MISMO CUBO DE IMAGENES
#########################################################################################
'''
#En la variable lis_total estan todas las imagenes divididas por cubos [[1ºcubo],[2ºcubo],...]
for cubo in lis_total:
lista_grupo=cubo
lista1=', '.join(lista_grupo)
lista2='c'+', c'.join(lista_grupo)
imagen0=lista_grupo[0]
imagen0=string.strip(imagen0)
sex(imagen0)
sex2catb(imagen0) #Esta funcion limpia el catalogo de sextractor para que pueda ser leido por imalign
###########################################################################################
#Bajo el estudio del movimiento "no guiado" del telescopio, tambien denominado deriva,
#es posible extrapolar una primera aproximacion del movimiento de deriva
#para utilizarlo como segunda contribucion al movimiento total y conseguir un mejor
#recentrado. Para ello hacemos un mini-alineado de las 3 primeras imagenes:
#Con imcentroid buscamos las mismas estrellas en imagen 1,2,3....hasta la -3.
lista_grupo0=lista_grupo[0:-2];
lista_grupo0=', '.join(lista_grupo0)
coo0x=[]
coo0y=[]
ppp=1
print imagen0[:-5]+'.obj.1'
#SI LA IMAGEN TIENE SÓLO UNA ESTRELLA NO SE PUEDE COMBINAR YA QUE LA BAJA RELACION
#SEÑAL-RUIDO HARÁ QUE SEA IMPOSIBLE ENCONTRAR DICHA ESTRELLA EN EL RESTO DE IMÁGENES
if file_len(imagen0[:-5]+'.obj.1') < 2:
print '################### Grupo '+grupo+' sin combinar'
ppp=0
break
qq=open(imagen0[:-5]+'.obj.1','r')
for cat in qq:
cat=cat.split()
coo0x.append(float(cat[0]))
coo0y.append(float(cat[1]))
#RECENTRO AHORA CADA UNA DE LAS ESTRELLAS CON IMCNTR
coo2=iraf.imcntr(input=lista_grupo0,x_init=coo0x[0],y_init = coo0y[0],cboxsize = '15',Stdout=1)
#coo2 es una lista
restax=[]
restay=[]
for centro in coo2:
x=float(centro.split(':')[1][:-1])
y=float(centro.split(':')[2])
restax.append(-x+coo0x[0]) #COMO ESTAMOS CALCULANDO DESPLAZAMIENTOS RELATIVOS Restamos estas posiciones a imagen1.
restay.append(-y+coo0y[0])
restax[0]=0.
restay[0]=0.
r=range(len(restax))
#Utilizamos la funcion polyfit de numpy. Usamos esta recta para extrapolar el desplazamiento teorico:
from numpy import polyfit
zx = polyfit(r, restax, 1)
zy = polyfit(r, restay, 1)
#Construimos los shifts Y LOS GUARDAMOS EN UN ARCHIVO.
rr=open(imagen0[:-5]+'.shft','w')
for i in range(len(lista_grupo)):
rr.write(('%8.5s %8.5s' % (zx[0]*i+zx[1],zy[0]*i+zx[1]))+'\n')
#print zx[0]*i+zx[1],zy[0]*i+zx[1]
rr.close()
#######################################################################################
iraf.imalign(input=lista1,referenc=imagen0,coords=imagen0[:-5]+'.obj.1',output=lista2,shifts=imagen0[:-5]+'.shft',boxsize='13',bigbox='17',negativ='no',backgro='INDEF',lower='INDEF',upper='INDEF',niterat='12',toleran='1',maxshif= 'INDEF',shiftim='yes',interp_='linear',boundar='constant',constan='0.',trimima='yes',verbose='no',mode='h')
iraf.imcombine(input=lista2,output='FINAL_'+imagen0[:-12]+'.fits',combine="median",masktype="none",outtype="real",scale="none",project="no",reject="none",weight="none",logfile = "")
print 'Image combined'
#break
if ppp==0:
print '##############################################################################'
print '##############################################################################'
qr.write(grupo+'\n')
continue #ESTO SIGNIFICA QUE SI LA IMAGEN TIENE UNA ESTRELLA SOLA QUE PASE AL SIGUIENTE GRUPO PORQUE
#VA A SER MUY DIFICIL DE COMBINAR
#Ahora tenemos las imagenes de cada grupo bajo la signatura FINAL_nombre
#Y ahora toca combinarlas entre sí. Para ello acudimos de nuevo a los offset calculados entre
#ellas a partir de las coordenadas.
'''
#########################################################################################
############## ALINEAMIENTO Y COMBINACION DE CADA CUBO DE IMAGENES
#########################################################################################
'''
os.system('more '+grupo+'shifts')
tt=open(grupo,'r')
file0=tt.readlines()
tt.close()
image00='FINAL_sbf'+string.strip(file0[0])+'s'
mk=open(grupo+'shifts','r')
line_shift=mk.readlines()
mk.close()
sex(image00)
sex2cat(image00)
print image00[:-5]+'.obj.1'
grupos=grupo+'s'
lista10=[]
for cada in file0:
lista10.append(string.strip(cada))
#IMAGENES DEL CUBO COMBINADAS:
lista1='FINAL_sbf'+'s, FINAL_sbf'.join(lista10)+'s'
#IMAGENES DEL CUBO COMBINADAS Y ALINEADAS
lista2='cFINAL_sbf'+'s, cFINAL_sbf'.join(lista10)+'s'
#### METODO PARA CALCULAR LOS OFFSET ENTRE LAS IMAGENES #####
print image00[:-5]+'.cat'
#El catalogo de la primera imagen es:
catalog=image00[:-5]+'.cat'
#SI NO ENCONTRAMOS NINGUNA ESTRELLA EN LA IMAGEN PASAMOS AL
#SIGUIENTE GRUPO DE IMAGENES PARA COMBINAR
if file_len(catalog) ==7:
print 'IMAGEN SIN ESTRELLA'
#LO GUARDAMOS EN ERRORFIND.LOG
continue
cc=open(catalog,'r')
posx=[]
posy=[]
flux=[]
for star in cc:
if star[0] != '#':
star=star.split()
x=float(star[1])
y=float(star[2])
if x > 20. and x < 230. and y > 20. and y < 230.:
posx.append(float(star[1]))
posy.append(float(star[2]))
flux.append(float(star[4]))
cc.close()
print flux
if len(flux)==0:
#NO HAY ESTRELLAS EN LAS INMEDIASIONES QUE BUSCAMOS
continue
#Ordenamos los arrays por orden DE FLUJO PARA ALINEAR CON LAS ESTRELLAS MAS BRILLANTES
#DE LA IMAGEN
band=int(0);
while band==0:
band=1
for k in range(0,len(flux)-1):
if flux[k]<flux[k+1]:
aux=flux[k+1]
flux[k+1]=flux[k]
flux[k]=aux
aux2=posx[k+1]
posx[k+1]=posx[k]
posx[k]=aux2
aux3=posy[k+1]
posy[k+1]=posy[k]
posy[k]=aux3
band=0;
lista_todas=lista1.split(',')
#Posiciones relativas
from numpy import *
flux=array(flux);posx=array(posx);posy=array(posy);
flux0=flux
posx0=posx
posy0=posy
flux=flux/flux[0]
posx=posx-posx[0]
posy=posy-posy[0]
#FLUXO,...HACEN ALUSION A LAS ESTRELLAS DE LA PRIMERA IAMGEN
#QUE ACONTINUACION SERAN COMPARADAS CON LAS RESTANTES
os.system('rm *shifts2*')
vv=open(grupo+'shifts2','a')
xx=0.0
yy=0.0
vv.write(('%8.5s %8.5s' % (xx,yy))+'\n')
vv.close()
index=1
for ima in lista_todas[1:]:
#print where(lista_todas == ima)
ima=string.strip(ima)
sex(ima)
#El catalogo de la primera imagen es:
catalog2=ima[:-5]+'.cat'
if file_len(catalog2)==7:
#SI NO ENCONTRAMOS NINGUNA ESTRELLA EN LA IMAGEN PASAMOS AL
#SIGUIENTE GRUPO DE IMAGENES PARA COMBINAR
print grupo+' sin combinar'
break #CON EL BREAK PARAMOS EL FOR Y COMO NO PODRA ALINEARLAS DARA ERROR Y
# APARECERA COMO GRUPO NO COMBINADO
cc2=open(catalog2,'r')
posx2=[]; posy2=[]; flux2=[];
for star in cc2:
if star[0] != '#':
star=star.split()
x=float(star[1])
y=float(star[2])
if x > 30. and x < 220. and y > 30. and y < 220.:
posx2.append(float(star[1]))
posy2.append(float(star[2]))
flux2.append(float(star[4]))
if len(flux2)==0:
#NO HAY ESTRELLAS EN LAS INMEDIASIONES QUE BUSCAMOS
continue
#DE NUEVO Ordenamos los arrays por orden DE FLUJO PARA BUSCAR LAS ESTRELLAS
band=int(0);
while band==0:
band=1
for k in range(0,len(flux2)-1):
if flux2[k]<flux2[k+1]:
aux=flux2[k+1]
flux2[k+1]=flux2[k]
flux2[k]=aux
aux2=posx2[k+1]
posx2[k+1]=posx2[k]
posx2[k]=aux2
aux3=posy2[k+1]
posy2[k+1]=posy2[k]
posy2[k]=aux3
band=0;
#print flux2,posx2,posy2
### ALGORITMO DE COMPARACION ###
#
#BUSCA LAS ESTRELLAS QUE TIENE LA MISMA SEPARACION RELATIVA Y CUYO COCIENTE DE
#FLUJO ES PARECEIDO
flux2=array(flux2);posx2=array(posx2);posy2=array(posy2);
aa=0;
contar=range(len(flux2))
flux3=[]
posx3=[]
posy3=[]
for i in contar[1:]:
flux3=flux2/flux2[i]
posx3=posx2-posx2[i]
posy3=posy2-posy2[i]
if aa==10:
break
for j in range(len(flux0)):
flux=flux0/flux0[j]
posx=posx0-posx0[j]
posy=posy0-posy0[j]
limpos=6.
if len(posx)==1:
## OFFSET PARA IMAGEN CON UNA ESTRELLA:
coo2=iraf.imcntr(input=ima,x_init=posx0[0],y_init = posy0[0],cboxsize = '60',Stdout=1)
x=float(centro.split(':')[1][:-1])
y=float(centro.split(':')[2])
vx=-x+posx0[0]
vy=-y+posy0[0]
vv=open(grupo+'shifts2','a')
vv.write(('%8.5s %8.5s' % (vx,vy))+'\n')
vv.close()
aa=10
break
else:
donde=where(( posx3<posx[1] + limpos) & ( posx3>posx[1] - limpos) & (posy3<posy[1] + limpos) & ( posy3>posy[1] - limpos))
if len(donde[0])!=0:
limflux=flux[1]/2.
if abs(flux3[donde[0]][0]-flux[1])<limflux:
if flux[1] != 1.:
print posx0[1]-posx2[donde[0]],posy0[1]-posy2[donde[0]],ima,flux3[donde[0]],flux[1]
x=(posx0[1]-posx2[donde[0]])[0]
y=(posy0[1]-posy2[donde[0]])[0]
vv=open(grupo+'shifts2','a')
vv.write(('%8.5s %8.5s' % (x,y))+'\n')
vv.close()
aa=10
break
if aa==0:
#ESTO SIGNIFICA QUE NO HA ENCONTRADO NINGUNA SIMILITUD EN ESTA IMAGEN
#Y AÑADO LA QUE ESTABA SEGUN LAS COORDENADAS
vv=open(grupo+'shifts2','a')
vv.write(line_shift[index])
vv.close()
index=index+1
os.system('more '+grupo+'shifts2')
try:
iraf.imalign(input=lista1,referenc=image00,coords=image00[:-5]+'.obj.1',output=lista2,shifts=grupo+'shifts2',boxsize='21',bigbox='23',negativ='no',backgro='INDEF',lower='INDEF',upper='INDEF',niterat='11',toleran='0',maxshif= 'INDEF',shiftim='yes',interp_='linear',boundar='constant',constan='0.',trimima='yes',verbose='yes',mode='h')
iraf.imcombine(input=lista2,output='aFINAL_'+grupo+'.fits',combine="median",masktype="none",outtype="real",scale="none",project="no",reject="none",weight="none",logfile = "")
print 'aFINAL_'+grupo+'.fits'
except:
print 'Grupo'+grupo+'sin combinar #~~~~~~~~~~~~~~~#'
qr.write(grupo+'\n')
#break
print '##############################################################################'
print '...'
qr.close()
|
from django.contrib import admin
import messaging.models
admin.site.register(messaging.models.MessageTemplate)
admin.site.register(messaging.models.Event)
admin.site.register(messaging.models.Message)
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from os import path
import sys
from d3m import container
from d3m.metadata import base as metadata_base
from common_primitives import dataset_to_dataframe, dataframe_image_reader
from distil.primitives.image_transfer import ImageTransferPrimitive
import utils as test_utils
class ImageTransferPrimitveTestCase(unittest.TestCase):
_dataset_path = path.abspath(path.join(path.dirname(__file__), "image_dataset_1"))
def test_basic(self):
dataset = test_utils.load_dataset(self._dataset_path)
dataframe_hyperparams_class = (
dataset_to_dataframe.DatasetToDataFramePrimitive.metadata.get_hyperparams()
)
dataframe_primitive = dataset_to_dataframe.DatasetToDataFramePrimitive(
hyperparams=dataframe_hyperparams_class.defaults().replace(
{"dataframe_resource": "0"}
)
)
dataframe = dataframe_primitive.produce(inputs=dataset).value
image_hyperparams_class = (
dataframe_image_reader.DataFrameImageReaderPrimitive.metadata.get_hyperparams()
)
image_primitive = dataframe_image_reader.DataFrameImageReaderPrimitive(
hyperparams=image_hyperparams_class.defaults().replace(
{"return_result": "replace"}
)
)
images = image_primitive.produce(inputs=dataframe).value
image_transfer_hyperparams = ImageTransferPrimitive.metadata.get_hyperparams()
primitive_volumes = ImageTransferPrimitive.metadata.get_volumes()
volumes = {
primitive_volumes[0]["key"]: os.getenv("D3MSTATICDIR")
+ "/"
+ primitive_volumes[0]["file_digest"]
}
image_transfer_primitive = ImageTransferPrimitive(
hyperparams=image_transfer_hyperparams.defaults().replace(
{"filename_col": 0}
),
volumes=volumes,
)
result = image_transfer_primitive.produce(inputs=images).value
self.assertEqual(result.shape[0], 5)
self.assertEqual(result.shape[1], 512)
def test_no_hyperparam(self):
dataset = test_utils.load_dataset(self._dataset_path)
dataframe_hyperparams_class = (
dataset_to_dataframe.DatasetToDataFramePrimitive.metadata.get_hyperparams()
)
dataframe_primitive = dataset_to_dataframe.DatasetToDataFramePrimitive(
hyperparams=dataframe_hyperparams_class.defaults().replace(
{"dataframe_resource": "0"}
)
)
dataframe = dataframe_primitive.produce(inputs=dataset).value
image_hyperparams_class = (
dataframe_image_reader.DataFrameImageReaderPrimitive.metadata.get_hyperparams()
)
image_primitive = dataframe_image_reader.DataFrameImageReaderPrimitive(
hyperparams=image_hyperparams_class.defaults().replace(
{"return_result": "replace"}
)
)
images = image_primitive.produce(inputs=dataframe).value
images.metadata = images.metadata.add_semantic_type(
(
metadata_base.ALL_ELEMENTS,
images.metadata.get_column_index_from_column_name("filename"),
),
"http://schema.org/ImageObject",
)
image_transfer_hyperparams = ImageTransferPrimitive.metadata.get_hyperparams()
primitive_volumes = ImageTransferPrimitive.metadata.get_volumes()
volumes = {
primitive_volumes[0]["key"]: os.getenv("D3MSTATICDIR")
+ "/"
+ primitive_volumes[0]["file_digest"]
}
image_transfer_primitive = ImageTransferPrimitive(
hyperparams=image_transfer_hyperparams.defaults(), volumes=volumes
)
result = image_transfer_primitive.produce(inputs=images).value
self.assertEqual(result.shape[0], 5)
self.assertEqual(result.shape[1], 512)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import cv2
from sklearn.cluster import DBSCAN
from extra_functions import cluster_gen
import pcl
import numpy as np
import matplotlib.cm as cm
import data_image_2
import itertools
from sklearn.cluster import MeanShift, estimate_bandwidth
# get pcd file
data_image_2.plot()
# Load Point Cloud file
cloud = pcl.load_XYZRGB('./test_rgb.pcd')
# Voxel Grid Downsampling filter
################################
# Create a VoxelGrid filter object for our input point cloud
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
# Note: this (1) means 1mx1mx1m is a poor choice of leaf size
# Experiment and find the appropriate size!
#LEAF_SIZE = 0.01
LEAF_SIZE =45
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
filename = './pcd_out/voxel_downsampled.pcd'
pcl.save(cloud_filtered, filename)
# PassThrough filter
################################
# Create a PassThrough filter object.
passthrough = cloud_filtered.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0
axis_max = 100
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the resultant point cloud.
cloud_filtered = passthrough.filter()
filename = './pcd_out/pass_through_filtered.pcd'
pcl.save(cloud_filtered, filename)
# RANSAC plane segmentation
################################
# Create the segmentation object
seg = cloud_filtered.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Experiment with different values for max_distance
# for segmenting the table
max_distance = 0.01
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
# Extract outliers
# Save pcd for tabletop objects
################################
extracted_outliers = cloud_filtered.extract(inliers, negative=True)
e=np.asarray(extracted_outliers)
#print e[:,:-1]
filename = './pcd_out/extracted_outliers.pcd'
pcl.save(extracted_outliers, filename)
# Generate some clusters!
data = e[:,:-1]
print data
bandwidth = estimate_bandwidth(data, quantile=0.05, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(data)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
# #############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
fig = plt.figure()
ax =Axes3D(fig)
colors = itertools.cycle(["r", "b", "g","c","y","m"])
#colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
#print my_members
cluster_center = cluster_centers[k]
#ax.scatter(X[my_members, 0], X[my_members, 1], col + '.')
#ax.scatter(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
#markeredgecolor='k', markersize=14)
ax.scatter(data[my_members, 0], data[my_members, 1],color=col,s=1)
ax.scatter(cluster_center[0], cluster_center[1],color='black',s=10)
print len(cluster_center[0])
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
import os
import cv2
import numpy as np
def bigavif(p):
if not os.path.isfile('0/af/'+p):
return False
if(os.path.getsize('0/af/'+p) - os.path.getsize('0/pq/'+p))< 100:
return True
return False
fyo=os.listdir('alph')
fyo.sort()
fyo=fyo[:-1]
for alphna in fyo:
zet = alphna.split('.')
if len(zet) !=3:
break
al=cv2.imread('alph/'+alphna, cv2.IMREAD_GRAYSCALE).astype(np.uint)
al=np.right_shift(al*int(zet[1],16),12).astype(np.uint8)
cv2.imwrite('alph/'+zet[0]+'.png', al)
os.remove('alph/'+alphna)
fyo=os.listdir('0')
fyo.sort()
fyo=fyo[:-2]
emptyz=np.zeros(4, dtype=np.uint8)
for p in fyo:
if os.path.isfile('0/af/'+p) or p.endswith('.png.png'):
continue
ymg=cv2.imread('0/'+p, cv2.IMREAD_UNCHANGED)
h, w, chan = ymg.shape
altalph='alph/'+p
if os.path.isfile(altalph):
amap=cv2.imread(altalph, cv2.IMREAD_UNCHANGED)
for y in range(h):
for x in range(w):
if amap[y][x] < 4:
ymg[y][x]=emptyz
cv2.imwrite('tmpklean.png', ymg)
os.system('zavif2.bat '+p)
else:
for y in range(h):
for x in range(w):
if ymg[y][x][3] < 4:
ymg[y][x]=emptyz
cv2.imwrite('tmpklean.png', ymg)
os.system('zavif1.bat '+p)
if bigavif(p):
os.remove('0/pq/'+p) |
from string import Template
# 1
#saves time typing and reduces code length
#2
# like if u wbant to use the delimeter $ in the template
# declaration then u can use double $$
#3
'''to attach a string at the end od the $item u'll have to use {}
like "the ${place}yard is far away from here"
its output will be "the shipyard is far awat from here"
'''
def main():
cart=[]
cart.append(dict(item="coke",price=2,qty=1))
cart.append(dict(item="cake",price=8,qty=1))
cart.append(dict(item="joke",price=1,qty=1))
print cart
t = Template("$qty x $item = $price")
total=0
for data in cart:
print t.substitute(data)
total += data["price"]
print "total = " ,total
if __name__ == "__main__":
main()
|
#testing of the simulated annealing approach to the problem
import copy
import random
import math
import numpy as np
class Node:
def __init__(self, name):
self.name = name
self.edges = set()
def addEdge(self, toNode, cost):
self.edges.add((toNode, cost))
class GraphPartition:
def __init__(self, nodes):
#split the lists in half
self.state = (nodes[:len(nodes)/2], nodes[len(nodes)/2:])
def successor(self):
newState = self
#get random node from both sides
randNode1 = random.choice(self.state[0])
randNode2 = random.choice(self.state[1])
#remove randomly selected node, add to other side, and return new state
newState.state[0].remove(randNode1)
newState.state[1].append(randNode2)
newState.state[0].append(randNode1)
newState.state[1].remove(randNode2)
return newState
def cost(self):
total = 0
for node in self.state[0]:
for e in node.edges:
if e[0] in self.state[1]:
total += e[1]
return total
#create max nodes
nodes = []
for i in range(1000):
nodes.append(Node(i))
#create max edges
for i in range(100):
fromNode = random.choice(nodes)
toNode = random.choice(nodes)
cost = random.randint(1,10)
fromNode.addEdge(toNode,cost)
toNode.addEdge(fromNode,cost)
#create starting graph object
g = GraphPartition(nodes)
T = 200
current_cost = g.cost()
global_min = g
global_min_cost = current_cost
#try to find better solutions
for t in range(1,T):
s = g.successor()
new_cost = s.cost()
if new_cost < current_cost:
g = s
current_cost = new_cost
else:
delta = new_cost - current_cost
prob = np.exp((delta*T)/t)
rand = random.uniform(0,1)
if rand < prob:
g = s
current_cost = new_cost
if current_cost < global_min_cost:
global_min = g
global_min_cost = current_cost
print current_cost
print "GLOBAL MIN : " + str(global_min_cost)
print "------------------1----------------------"
print sum([len(node.edges) for node in global_min.state[0]])
print "------------------2----------------------"
print sum([len(node.edges) for node in global_min.state[1]])
|
from typing import Mapping
from .base import api_function, BaseFunction
from ..request import Request
__all__ = (
'System',
)
class System(BaseFunction):
"""
Provides the function interface for the API endpoint's system information.
"""
@api_function
@classmethod
async def get_versions(cls) -> Mapping[str, str]:
rqst = Request('GET', '/')
async with rqst.fetch() as resp:
return await resp.json()
@api_function
@classmethod
async def get_manager_version(cls) -> str:
rqst = Request('GET', '/')
async with rqst.fetch() as resp:
ret = await resp.json()
return ret['manager']
@api_function
@classmethod
async def get_api_version(cls) -> str:
rqst = Request('GET', '/')
async with rqst.fetch() as resp:
ret = await resp.json()
return ret['version']
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import torch_geometric as pyg
import os.path as osp
import torch
import torch.nn.functional as F
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from pandas import DataFrame
from torch_geometric.data import DataLoader
from torch_geometric.datasets import PPI
from torch_geometric.nn import GCNConv
# In[2]:
# path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'PPI')
path = osp.join(osp.abspath(''), '..', 'data', 'PPI')
train_dataset = PPI(path, split='train')
validation_dataset = PPI(path, split='val')
test_dataset = PPI(path, split='test')
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
# batch_size=1 单位是图
validation_loader = DataLoader(validation_dataset, batch_size=2, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=2, shuffle= False)
# In[3]:
intrain = True
class testNet(torch.nn.Module):
def __init__(self):
super(testNet, self).__init__()
self.conv1 = GCNConv(train_dataset.num_features, 256)
self.conv2 = GCNConv(256, train_dataset.num_classes)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = F.leaky_relu(x)
x = F.dropout(x, training=intrain)
x = self.conv2(x, edge_index)
x = F.log_softmax(x, dim=1)
return x
# In[11]:
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
model = testNet().to(device)
loss_op = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
# In[12]:
def train():
model.train()
totalTrainLoss = 0.0
gcn_PPI_profile = DataFrame(columns=['node_num', 'edge_num', 'max_degree',
'conv1_in_channels', 'conv1_out_channels',
'conv2_in_channels', 'conv2_out_channels',
'conv1_agg_time', 'conv1_map_time',
'conv2_agg_time', 'conv2_map_time'
]
)
for batch in train_loader:
batch = batch.to(device)
totalAggTime = 0.0
totalMapTime = 0.0
totalAggTime_2 = 0.0
totalMapTime_2 = 0.0
for epoch in range(1, 11):
x, edge_index = batch.x, batch.edge_index
num_graphs = batch.num_graphs
optimizer.zero_grad()
pred = model(x, edge_index)
loss = loss_op(pred, batch.y)
totalTrainLoss += loss.item() * num_graphs
totalAggTime += model.conv1.aggregateTime
totalMapTime += model.conv1.mappingTime
totalAggTime_2 += model.conv2.aggregateTime
totalMapTime_2 += model.conv2.mappingTime
loss.backward()
optimizer.step()
# log = 'batch_node_num:{:d}, batch_edge_num:{:d}, Loss:{:f}, Agg_time:{:f}, Map_time:{:f}'
# print(log.format(
# batch.x.shape[0],
# batch.edge_index[0].shape[0],
# loss.item() * num_graphs,
# model.conv1.aggregateTime * 1e6,
# model.conv1.mappingTime * 1e6
# )
# )
result = DataFrame([[batch.x.shape[0], batch.edge_index[0].shape[0],
np.max(np.bincount(batch.edge_index[0].cpu())),
model.conv1.in_channels, model.conv1.out_channels,
model.conv2.in_channels, model.conv2.out_channels,
totalAggTime / 10 * 1e6, totalMapTime / 10 * 1e6,
totalAggTime_2 / 10 * 1e6, totalMapTime_2 / 10 * 1e6
]],
columns=['node_num', 'edge_num', 'max_degree',
'conv1_in_channels', 'conv1_out_channels',
'conv2_in_channels', 'conv2_out_channels',
'conv1_agg_time', 'conv1_map_time',
'conv2_agg_time', 'conv2_map_time'
]
)
gcn_PPI_profile = gcn_PPI_profile.append(result)
return totalTrainLoss/len(train_loader.dataset), gcn_PPI_profile
# In[13]:
# gcn_PPI_profile = DataFrame(columns=['node_num',
# 'edge_num',
# 'in_channels',
# 'out_channels',
# 'agg_time',
# 'map_time'
# ]
# )
# for epoch in range(1, 11):
# _, epoch_gcn_PPI_profile = train()
# gcn_PPI_profile = gcn_PPI_profile.append(epoch_gcn_PPI_profile)
_, gcn_PPI_profile = train()
gcn_PPI_profile.to_csv("./gcn_PPI_profile.csv")
# In[14]:
# CoraProfile = pd.read_excel("../gcn_Cora_profile.xlsx")
# CiteSeerProfile = pd.read_excel("../gcn_CiteSeer_profile.xlsx")
# PubMedProfile = pd.read_excel("../gcn_PubMed_profile.xlsx")
PPIProfile = pd.read_csv("./gcn_PPI_profile.csv")
# In[15]:
PPIProfile['conv1_mapx'] = PPIProfile['node_num'] * PPIProfile['conv1_in_channels'] * PPIProfile['conv1_out_channels']
PPIProfile['conv1_aggx'] = PPIProfile['edge_num'] * PPIProfile['conv1_out_channels']
PPIProfile['conv2_mapx'] = PPIProfile['node_num'] * PPIProfile['conv2_in_channels'] * PPIProfile['conv2_out_channels']
PPIProfile['conv2_aggx'] = PPIProfile['edge_num'] * PPIProfile['conv2_out_channels']
# In[17]:
# plt.plot(CoraProfile.epoch, CoraProfile.agg_time, color = "red", linestyle = "--",
# label = "Cora_agg_time n_v_aver = 7.8, d = 1443")
# plt.plot(CoraProfile.epoch, CoraProfile.map_time, color = "red", linestyle = "-",
# label = "Cora_map_time n_v_aver = 7.8, d = 1443")
# plt.plot(CiteSeerProfile.epoch, CiteSeerProfile.agg_time, color = "green", linestyle = "--",
# label = "CiteSeer_agg_time n_v_aver = 5.5, d = 3703")
# plt.plot(CiteSeerProfile.epoch, CiteSeerProfile.map_time, color = "green", linestyle = "-",
# label = "CiteSeer_map_time n_v_aver = 5.5, d = 3703")
# plt.plot(PubMedProfile.epoch, PubMedProfile.agg_time, color = "blue", linestyle = "--",
# label = "PubMed_agg_time n_v_aver = 9, d = 500")
# plt.plot(PubMedProfile.epoch, PubMedProfile.map_time, color = "blue", linestyle = "-",
# label = "PubMed_map_time n_v_aver = 9, d = 500")
gs = matplotlib.gridspec.GridSpec(2,3)
fig = plt.figure(figsize=(15,10))
conv1_map_plot = fig.add_subplot(gs[0])
conv1_map_plot.set_title("conv1_map_time")
conv1_map_plot.scatter(PPIProfile['conv1_mapx'], PPIProfile['conv1_map_time'], color = 'red')
conv1_map_plot.set_xlabel("n * d * d\'")
conv1_map_plot.set_ylabel("us")
conv1_map_plot.set_ylim(0, 300000)
conv1_agg_plot = fig.add_subplot(gs[1])
conv1_agg_plot.set_title("conv1_agg_time")
conv1_agg_plot.scatter(PPIProfile['conv1_aggx'], PPIProfile['conv1_agg_time'], color='red')
conv1_agg_plot.set_xlabel("E * d\'")
conv1_agg_plot.set_ylabel("us")
conv1_agg_plot.set_ylim(0, 300000)
conv1_md_agg_plot = fig.add_subplot(gs[2])
conv1_md_agg_plot.set_title("conv1_agg_time")
conv1_md_agg_plot.scatter(PPIProfile['max_degree'], PPIProfile['conv1_agg_time'], color='blue')
conv1_md_agg_plot.set_xlabel("max_degree")
conv1_md_agg_plot.set_ylabel("us")
conv1_md_agg_plot.set_ylim(0, 300000)
conv2_map_plot = fig.add_subplot(gs[3])
conv2_map_plot.set_title("conv2_map_time")
conv2_map_plot.scatter(PPIProfile['conv2_mapx'], PPIProfile['conv2_map_time'], color = 'green')
conv2_map_plot.set_xlabel("n * d * d\'")
conv2_map_plot.set_ylabel("us")
conv2_map_plot.set_ylim(0, 300000)
conv2_agg_plot = fig.add_subplot(gs[4])
conv2_agg_plot.set_title("conv2_agg_time")
conv2_agg_plot.scatter(PPIProfile['conv2_aggx'], PPIProfile['conv2_agg_time'], color='green')
conv2_agg_plot.set_xlabel("E * d\'")
conv2_agg_plot.set_ylabel("us")
conv2_agg_plot.set_ylim(0, 300000)
conv2_md_agg_plot = fig.add_subplot(gs[5])
conv2_md_agg_plot.set_title("conv2_agg_time")
conv2_md_agg_plot.scatter(PPIProfile['max_degree'], PPIProfile['conv2_agg_time'], color='blue')
conv2_md_agg_plot.set_xlabel("max_degree")
conv2_md_agg_plot.set_ylabel("us")
conv2_md_agg_plot.set_ylim(0, 300000)
# plt.ylabel("us")
plt.legend(loc='upper right')
plt.savefig("../ppi_plot_cpu.png")
# In[10]:
np.max(np.bincount(train_dataset.data.edge_index[0]))
|
# Author:ambiguoustexture
# Date: 2020-02-05
file = 'hightemp.txt'
n = int(input('N: '))
with open(file) as text:
lines = text.readlines()
lines_count = len(lines)
for index, flag in enumerate(range(0, lines_count, n), 1):
with open('hightemp_split_{:02d}.txt'.format(index), 'w') as split_file:
for line in lines[flag:flag + n]:
split_file.write(line)
|
from graph_db.access.cursor import Cursor
from graph_db.engine.api import EngineAPI
from graph_db.engine.graph_engine import GraphEngine
class GraphDB:
def __init__(self, config_path: str):
self.config_path = config_path
self.graph_engine: EngineAPI = GraphEngine(config_path)
def cursor(self):
return Cursor(self.graph_engine)
def close(self):
self.graph_engine.close()
def get_graph(self):
return self.graph_engine.get_graph()
def get_stats(self):
return self.graph_engine.get_stats()
def get_engine(self):
return self.graph_engine
def connect(config_path: str):
db = GraphDB(config_path)
return db
|
trp_player = 0
trp_multiplayer_profile_troop_male = 1
trp_multiplayer_profile_troop_female = 2
trp_temp_troop = 3
trp_find_item_cheat = 4
trp_random_town_sequence = 5
trp_tournament_participants = 6
trp_tutorial_maceman = 7
trp_tutorial_archer = 8
trp_tutorial_swordsman = 9
trp_novice_fighter = 10
trp_regular_fighter = 11
trp_veteran_fighter = 12
trp_champion_fighter = 13
trp_arena_training_fighter_1 = 14
trp_arena_training_fighter_2 = 15
trp_arena_training_fighter_3 = 16
trp_arena_training_fighter_4 = 17
trp_arena_training_fighter_5 = 18
trp_arena_training_fighter_6 = 19
trp_arena_training_fighter_7 = 20
trp_arena_training_fighter_8 = 21
trp_arena_training_fighter_9 = 22
trp_arena_training_fighter_10 = 23
trp_cattle = 24
trp_farmer = 25
trp_townsman = 26
trp_watchman = 27
trp_caravan_guard = 28
trp_mercenary_swordsman = 29
trp_hired_blade = 30
trp_mercenary_crossbowman = 31
trp_mercenary_horseman = 32
trp_mercenary_cavalry = 33
trp_orebro_sharpshooter = 34
trp_mounted_crossbowmerc = 35
trp_mercenaries_end = 36
trp_townguard_01 = 37
trp_orebro_knightslayer = 38
trp_swadian_recruit = 39
trp_swadian_militia = 40
trp_swadian_footman = 41
trp_swadian_infantry = 42
trp_swadian_sergeant = 43
trp_swadian_skirmisher = 44
trp_swadian_crossbowman = 45
trp_swadian_sharpshooter = 46
trp_swadian_man_at_arms = 47
trp_swadian_knight = 48
trp_swadian_messenger = 49
trp_swadian_deserter = 50
trp_swadian_prison_guard = 51
trp_swadian_castle_guard = 52
trp_kalmar_knight = 53
trp_vaegir_recruit = 54
trp_vaegir_footman = 55
trp_vaegir_skirmisher = 56
trp_vaegir_archer = 57
trp_vaegir_marksman = 58
trp_vaegir_veteran = 59
trp_vaegir_infantry = 60
trp_vaegir_guard = 61
trp_vaegir_horseman = 62
trp_vaegir_knight = 63
trp_vaegir_messenger = 64
trp_vaegir_deserter = 65
trp_vaegir_prison_guard = 66
trp_vaegir_castle_guard = 67
trp_khergit_tribesman = 68
trp_khergit_skirmisher = 69
trp_khergit_horseman = 70
trp_khergit_horse_archer = 71
trp_khergit_veteran_horse_archer = 72
trp_khergit_lancer = 73
trp_khergit_messenger = 74
trp_khergit_deserter = 75
trp_khergit_prison_guard = 76
trp_khergit_castle_guard = 77
trp_nord_recruit = 78
trp_nord_footman = 79
trp_nord_trained_footman = 80
trp_nord_warrior = 81
trp_nord_veteran = 82
trp_nord_champion = 83
trp_swed_champion_1 = 84
trp_swed_champion_2 = 85
trp_nord_huntsman = 86
trp_nord_archer = 87
trp_nord_veteran_archer = 88
trp_nord_messenger = 89
trp_nord_deserter = 90
trp_nord_prison_guard = 91
trp_nord_castle_guard = 92
trp_swed_trained_spearman = 93
trp_swed_veteran_spearman = 94
trp_swed_sergeant = 95
trp_swed_spear_sergeant = 96
trp_swed_spear_man_at_arms = 97
trp_rhodok_tribesman = 98
trp_rhodok_spearman = 99
trp_rhodok_trained_spearman = 100
trp_rhodok_veteran_spearman = 101
trp_rhodok_sergeant = 102
trp_rhodok_crossbowman = 103
trp_rhodok_trained_crossbowman = 104
trp_rhodok_veteran_crossbowman = 105
trp_rhodok_sharpshooter = 106
trp_rhodok_messenger = 107
trp_rhodok_deserter = 108
trp_rhodok_prison_guard = 109
trp_rhodok_castle_guard = 110
trp_sarranid_recruit = 111
trp_sarranid_footman = 112
trp_sarranid_veteran_footman = 113
trp_sarranid_infantry = 114
trp_sarranid_guard = 115
trp_sarranid_skirmisher = 116
trp_sarranid_archer = 117
trp_sarranid_master_archer = 118
trp_sarranid_horseman = 119
trp_sarranid_mamluke = 120
trp_sarranid_messenger = 121
trp_sarranid_deserter = 122
trp_sarranid_prison_guard = 123
trp_sarranid_castle_guard = 124
trp_butterlord = 125
trp_looter = 126
trp_bandit = 127
trp_brigand = 128
trp_mountain_bandit = 129
trp_forest_bandit = 130
trp_sea_raider = 131
trp_steppe_bandit = 132
trp_taiga_bandit = 133
trp_desert_bandit = 134
trp_black_khergit_horseman = 135
trp_manhunter = 136
trp_slave_driver = 137
trp_slave_hunter = 138
trp_slave_crusher = 139
trp_slaver_chief = 140
trp_follower_woman = 141
trp_hunter_woman = 142
trp_fighter_woman = 143
trp_sword_sister = 144
trp_refugee = 145
trp_peasant_woman = 146
trp_caravan_master = 147
trp_kidnapped_girl = 148
trp_town_walker_1 = 149
trp_town_walker_2 = 150
trp_khergit_townsman = 151
trp_khergit_townswoman = 152
trp_sarranid_townsman = 153
trp_sarranid_townswoman = 154
trp_village_walker_1 = 155
trp_village_walker_2 = 156
trp_spy_walker_1 = 157
trp_spy_walker_2 = 158
trp_tournament_master = 159
trp_trainer = 160
trp_constable_hareck = 161
trp_ramun_the_slave_trader = 162
trp_guide = 163
trp_xerina = 164
trp_dranton = 165
trp_kradus = 166
trp_tutorial_trainer = 167
trp_tutorial_student_1 = 168
trp_tutorial_student_2 = 169
trp_tutorial_student_3 = 170
trp_tutorial_student_4 = 171
trp_galeas = 172
trp_farmer_from_bandit_village = 173
trp_trainer_1 = 174
trp_trainer_2 = 175
trp_trainer_3 = 176
trp_trainer_4 = 177
trp_trainer_5 = 178
trp_ransom_broker_1 = 179
trp_ransom_broker_2 = 180
trp_ransom_broker_3 = 181
trp_ransom_broker_4 = 182
trp_ransom_broker_5 = 183
trp_ransom_broker_6 = 184
trp_ransom_broker_7 = 185
trp_ransom_broker_8 = 186
trp_ransom_broker_9 = 187
trp_ransom_broker_10 = 188
trp_tavern_traveler_1 = 189
trp_tavern_traveler_2 = 190
trp_tavern_traveler_3 = 191
trp_tavern_traveler_4 = 192
trp_tavern_traveler_5 = 193
trp_tavern_traveler_6 = 194
trp_tavern_traveler_7 = 195
trp_tavern_traveler_8 = 196
trp_tavern_traveler_9 = 197
trp_tavern_traveler_10 = 198
trp_tavern_bookseller_1 = 199
trp_tavern_bookseller_2 = 200
trp_tavern_bookseller_3 = 201
trp_tavern_minstrel_1 = 202
trp_tavern_minstrel_2 = 203
trp_tavern_minstrel_3 = 204
trp_tavern_minstrel_4 = 205
trp_tavern_minstrel_5 = 206
trp_musican_male = 207
trp_musican_female = 208
trp_musicans_end = 209
trp_kingdom_heroes_including_player_begin = 210
trp_npc1 = 211
trp_npc2 = 212
trp_npc3 = 213
trp_npc4 = 214
trp_npc5 = 215
trp_npc6 = 216
trp_npc7 = 217
trp_npc8 = 218
trp_npc9 = 219
trp_npc10 = 220
trp_npc11 = 221
trp_npc12 = 222
trp_npc13 = 223
trp_npc14 = 224
trp_npc15 = 225
trp_npc16 = 226
trp_npc17 = 227
trp_kingdom_1_lord = 228
trp_kingdom_2_lord = 229
trp_kingdom_3_lord = 230
trp_kingdom_4_lord = 231
trp_kingdom_5_lord = 232
trp_kingdom_6_lord = 233
trp_knight_1_1 = 234
trp_knight_1_2 = 235
trp_knight_1_3 = 236
trp_knight_1_4 = 237
trp_knight_1_5 = 238
trp_knight_1_6 = 239
trp_knight_1_7 = 240
trp_knight_1_8 = 241
trp_knight_1_9 = 242
trp_knight_1_10 = 243
trp_knight_1_11 = 244
trp_knight_1_12 = 245
trp_knight_1_13 = 246
trp_knight_1_14 = 247
trp_knight_1_15 = 248
trp_knight_1_16 = 249
trp_knight_1_17 = 250
trp_knight_1_18 = 251
trp_knight_1_19 = 252
trp_knight_1_20 = 253
trp_knight_2_1 = 254
trp_knight_2_2 = 255
trp_knight_2_3 = 256
trp_knight_2_4 = 257
trp_knight_2_5 = 258
trp_knight_2_6 = 259
trp_knight_2_7 = 260
trp_knight_2_8 = 261
trp_knight_2_9 = 262
trp_knight_2_10 = 263
trp_knight_2_11 = 264
trp_knight_2_12 = 265
trp_knight_2_13 = 266
trp_knight_2_14 = 267
trp_knight_2_15 = 268
trp_knight_2_16 = 269
trp_knight_2_17 = 270
trp_knight_2_18 = 271
trp_knight_2_19 = 272
trp_knight_2_20 = 273
trp_knight_3_1 = 274
trp_knight_3_2 = 275
trp_knight_3_3 = 276
trp_knight_3_4 = 277
trp_knight_3_5 = 278
trp_knight_3_6 = 279
trp_knight_3_7 = 280
trp_knight_3_8 = 281
trp_knight_3_9 = 282
trp_knight_3_10 = 283
trp_knight_3_11 = 284
trp_knight_3_12 = 285
trp_knight_3_13 = 286
trp_knight_3_14 = 287
trp_knight_3_15 = 288
trp_knight_3_16 = 289
trp_knight_3_17 = 290
trp_knight_3_18 = 291
trp_knight_3_19 = 292
trp_knight_3_20 = 293
trp_knight_4_1 = 294
trp_knight_4_2 = 295
trp_knight_4_3 = 296
trp_knight_4_4 = 297
trp_knight_4_5 = 298
trp_knight_4_6 = 299
trp_knight_4_7 = 300
trp_knight_4_8 = 301
trp_knight_4_9 = 302
trp_knight_4_10 = 303
trp_knight_4_11 = 304
trp_knight_4_12 = 305
trp_knight_4_13 = 306
trp_knight_4_14 = 307
trp_knight_4_15 = 308
trp_knight_4_16 = 309
trp_knight_4_17 = 310
trp_knight_4_18 = 311
trp_knight_4_19 = 312
trp_knight_4_20 = 313
trp_knight_5_1 = 314
trp_knight_5_2 = 315
trp_knight_5_3 = 316
trp_knight_5_4 = 317
trp_knight_5_5 = 318
trp_knight_5_6 = 319
trp_knight_5_7 = 320
trp_knight_5_8 = 321
trp_knight_5_9 = 322
trp_knight_5_10 = 323
trp_knight_5_11 = 324
trp_knight_5_12 = 325
trp_knight_5_13 = 326
trp_knight_5_14 = 327
trp_knight_5_15 = 328
trp_knight_5_16 = 329
trp_knight_5_17 = 330
trp_knight_5_18 = 331
trp_knight_5_19 = 332
trp_knight_5_20 = 333
trp_knight_6_1 = 334
trp_knight_6_2 = 335
trp_knight_6_3 = 336
trp_knight_6_4 = 337
trp_knight_6_5 = 338
trp_knight_6_6 = 339
trp_knight_6_7 = 340
trp_knight_6_8 = 341
trp_knight_6_9 = 342
trp_knight_6_10 = 343
trp_knight_6_11 = 344
trp_knight_6_12 = 345
trp_knight_6_13 = 346
trp_knight_6_14 = 347
trp_knight_6_15 = 348
trp_knight_6_16 = 349
trp_knight_6_17 = 350
trp_knight_6_18 = 351
trp_knight_6_19 = 352
trp_knight_6_20 = 353
trp_kingdom_1_pretender = 354
trp_kingdom_2_pretender = 355
trp_kingdom_3_pretender = 356
trp_kingdom_4_pretender = 357
trp_kingdom_5_pretender = 358
trp_kingdom_6_pretender = 359
trp_knight_1_1_wife = 360
trp_kingdom_1_lady_1 = 361
trp_kingdom_1_lady_2 = 362
trp_knight_1_lady_3 = 363
trp_knight_1_lady_4 = 364
trp_kingdom_l_lady_5 = 365
trp_kingdom_1_lady_6 = 366
trp_kingdom_1_lady_7 = 367
trp_kingdom_1_lady_8 = 368
trp_kingdom_1_lady_9 = 369
trp_kingdom_1_lady_10 = 370
trp_kingdom_1_lady_11 = 371
trp_kingdom_1_lady_12 = 372
trp_kingdom_l_lady_13 = 373
trp_kingdom_1_lady_14 = 374
trp_kingdom_1_lady_15 = 375
trp_kingdom_1_lady_16 = 376
trp_kingdom_1_lady_17 = 377
trp_kingdom_1_lady_18 = 378
trp_kingdom_1_lady_19 = 379
trp_kingdom_1_lady_20 = 380
trp_kingdom_2_lady_1 = 381
trp_kingdom_2_lady_2 = 382
trp_kingdom_2_lady_3 = 383
trp_kingdom_2_lady_4 = 384
trp_kingdom_2_lady_5 = 385
trp_kingdom_2_lady_6 = 386
trp_kingdom_2_lady_7 = 387
trp_kingdom_2_lady_8 = 388
trp_kingdom_2_lady_9 = 389
trp_kingdom_2_lady_10 = 390
trp_kingdom_2_lady_11 = 391
trp_kingdom_2_lady_12 = 392
trp_kingdom_2_lady_13 = 393
trp_kingdom_2_lady_14 = 394
trp_kingdom_2_lady_15 = 395
trp_kingdom_2_lady_16 = 396
trp_kingdom_2_lady_17 = 397
trp_kingdom_2_lady_18 = 398
trp_kingdom_2_lady_19 = 399
trp_kingdom_2_lady_20 = 400
trp_kingdom_3_lady_1 = 401
trp_kingdom_3_lady_2 = 402
trp_kingdom_3_lady_3 = 403
trp_kingdom_3_lady_4 = 404
trp_kingdom_3_lady_5 = 405
trp_kingdom_3_lady_6 = 406
trp_kingdom_3_lady_7 = 407
trp_kingdom_3_lady_8 = 408
trp_kingdom_3_lady_9 = 409
trp_kingdom_3_lady_10 = 410
trp_kingdom_3_lady_11 = 411
trp_kingdom_3_lady_12 = 412
trp_kingdom_3_lady_13 = 413
trp_kingdom_3_lady_14 = 414
trp_kingdom_3_lady_15 = 415
trp_kingdom_3_lady_16 = 416
trp_kingdom_3_lady_17 = 417
trp_kingdom_3_lady_18 = 418
trp_kingdom_3_lady_19 = 419
trp_kingdom_3_lady_20 = 420
trp_kingdom_4_lady_1 = 421
trp_kingdom_4_lady_2 = 422
trp_kingdom_4_lady_3 = 423
trp_kingdom_4_lady_4 = 424
trp_kingdom_4_lady_5 = 425
trp_kingdom_4_lady_6 = 426
trp_kingdom_4_lady_7 = 427
trp_knight_4_2b_daughter_1 = 428
trp_kingdom_4_lady_9 = 429
trp_knight_4_2c_wife_1 = 430
trp_kingdom_4_lady_11 = 431
trp_knight_4_2c_daughter = 432
trp_knight_4_1b_wife = 433
trp_kingdom_4_lady_14 = 434
trp_knight_4_1b_daughter = 435
trp_knight_4_2b_daughter_2 = 436
trp_kingdom_4_lady_17 = 437
trp_knight_4_2c_wife_2 = 438
trp_knight_4_1c_daughter = 439
trp_kingdom_4_lady_20 = 440
trp_kingdom_5_lady_1 = 441
trp_kingdom_5_lady_2 = 442
trp_kingdom_5_lady_3 = 443
trp_kingdom_5_lady_4 = 444
trp_kingdom_5_5_wife = 445
trp_kingdom_5_2b_wife_1 = 446
trp_kingdom_5_1c_daughter_1 = 447
trp_kingdom_5_2c_daughter_1 = 448
trp_kingdom_5_1c_wife_1 = 449
trp_kingdom_5_2c_wife_1 = 450
trp_kingdom_5_1c_daughter_2 = 451
trp_kingdom_5_2c_daughter_2 = 452
trp_kingdom_5_1b_wife = 453
trp_kingdom_5_2b_wife_2 = 454
trp_kingdom_5_1c_daughter_3 = 455
trp_kingdom_5_lady_16 = 456
trp_kingdom_5_1c_wife_2 = 457
trp_kingdom_5_2c_wife_2 = 458
trp_kingdom_5_1c_daughter_4 = 459
trp_kingdom_5_lady_20 = 460
trp_kingdom_6_lady_1 = 461
trp_kingdom_6_lady_2 = 462
trp_kingdom_6_lady_3 = 463
trp_kingdom_6_lady_4 = 464
trp_kingdom_6_lady_5 = 465
trp_kingdom_6_lady_6 = 466
trp_kingdom_6_lady_7 = 467
trp_kingdom_6_lady_8 = 468
trp_kingdom_6_lady_9 = 469
trp_kingdom_6_lady_10 = 470
trp_kingdom_6_lady_11 = 471
trp_kingdom_6_lady_12 = 472
trp_kingdom_6_lady_13 = 473
trp_kingdom_6_lady_14 = 474
trp_kingdom_6_lady_15 = 475
trp_kingdom_6_lady_16 = 476
trp_kingdom_6_lady_17 = 477
trp_kingdom_6_lady_18 = 478
trp_kingdom_6_lady_19 = 479
trp_kingdom_6_lady_20 = 480
trp_heroes_end = 481
trp_town_1_seneschal = 482
trp_town_2_seneschal = 483
trp_town_3_seneschal = 484
trp_town_4_seneschal = 485
trp_town_5_seneschal = 486
trp_town_6_seneschal = 487
trp_town_7_seneschal = 488
trp_town_8_seneschal = 489
trp_town_9_seneschal = 490
trp_town_10_seneschal = 491
trp_town_11_seneschal = 492
trp_town_12_seneschal = 493
trp_town_13_seneschal = 494
trp_town_14_seneschal = 495
trp_town_15_seneschal = 496
trp_town_16_seneschal = 497
trp_town_17_seneschal = 498
trp_town_18_seneschal = 499
trp_town_19_seneschal = 500
trp_town_20_seneschal = 501
trp_town_21_seneschal = 502
trp_town_22_seneschal = 503
trp_castle_1_seneschal = 504
trp_castle_2_seneschal = 505
trp_castle_3_seneschal = 506
trp_castle_4_seneschal = 507
trp_castle_5_seneschal = 508
trp_castle_6_seneschal = 509
trp_castle_7_seneschal = 510
trp_castle_8_seneschal = 511
trp_castle_9_seneschal = 512
trp_castle_10_seneschal = 513
trp_castle_11_seneschal = 514
trp_castle_12_seneschal = 515
trp_castle_13_seneschal = 516
trp_castle_14_seneschal = 517
trp_castle_15_seneschal = 518
trp_castle_16_seneschal = 519
trp_castle_17_seneschal = 520
trp_castle_18_seneschal = 521
trp_castle_19_seneschal = 522
trp_castle_20_seneschal = 523
trp_castle_21_seneschal = 524
trp_castle_22_seneschal = 525
trp_castle_23_seneschal = 526
trp_castle_24_seneschal = 527
trp_castle_25_seneschal = 528
trp_castle_26_seneschal = 529
trp_castle_27_seneschal = 530
trp_castle_28_seneschal = 531
trp_castle_29_seneschal = 532
trp_castle_30_seneschal = 533
trp_castle_31_seneschal = 534
trp_castle_32_seneschal = 535
trp_castle_33_seneschal = 536
trp_castle_34_seneschal = 537
trp_castle_35_seneschal = 538
trp_castle_36_seneschal = 539
trp_castle_37_seneschal = 540
trp_castle_38_seneschal = 541
trp_castle_39_seneschal = 542
trp_castle_40_seneschal = 543
trp_castle_41_seneschal = 544
trp_castle_42_seneschal = 545
trp_castle_43_seneschal = 546
trp_castle_44_seneschal = 547
trp_castle_45_seneschal = 548
trp_castle_46_seneschal = 549
trp_castle_47_seneschal = 550
trp_castle_48_seneschal = 551
trp_town_1_arena_master = 552
trp_town_2_arena_master = 553
trp_town_3_arena_master = 554
trp_town_4_arena_master = 555
trp_town_5_arena_master = 556
trp_town_6_arena_master = 557
trp_town_7_arena_master = 558
trp_town_8_arena_master = 559
trp_town_9_arena_master = 560
trp_town_10_arena_master = 561
trp_town_11_arena_master = 562
trp_town_12_arena_master = 563
trp_town_13_arena_master = 564
trp_town_14_arena_master = 565
trp_town_15_arena_master = 566
trp_town_16_arena_master = 567
trp_town_17_arena_master = 568
trp_town_18_arena_master = 569
trp_town_19_arena_master = 570
trp_town_20_arena_master = 571
trp_town_21_arena_master = 572
trp_town_22_arena_master = 573
trp_town_1_armorer = 574
trp_town_2_armorer = 575
trp_town_3_armorer = 576
trp_town_4_armorer = 577
trp_town_5_armorer = 578
trp_town_6_armorer = 579
trp_town_7_armorer = 580
trp_town_8_armorer = 581
trp_town_9_armorer = 582
trp_town_10_armorer = 583
trp_town_11_armorer = 584
trp_town_12_armorer = 585
trp_town_13_armorer = 586
trp_town_14_armorer = 587
trp_town_15_armorer = 588
trp_town_16_armorer = 589
trp_town_17_armorer = 590
trp_town_18_armorer = 591
trp_town_19_armorer = 592
trp_town_20_armorer = 593
trp_town_21_armorer = 594
trp_town_22_armorer = 595
trp_town_1_weaponsmith = 596
trp_town_2_weaponsmith = 597
trp_town_3_weaponsmith = 598
trp_town_4_weaponsmith = 599
trp_town_5_weaponsmith = 600
trp_town_6_weaponsmith = 601
trp_town_7_weaponsmith = 602
trp_town_8_weaponsmith = 603
trp_town_9_weaponsmith = 604
trp_town_10_weaponsmith = 605
trp_town_11_weaponsmith = 606
trp_town_12_weaponsmith = 607
trp_town_13_weaponsmith = 608
trp_town_14_weaponsmith = 609
trp_town_15_weaponsmith = 610
trp_town_16_weaponsmith = 611
trp_town_17_weaponsmith = 612
trp_town_18_weaponsmith = 613
trp_town_19_weaponsmith = 614
trp_town_20_weaponsmith = 615
trp_town_21_weaponsmith = 616
trp_town_22_weaponsmith = 617
trp_town_1_tavernkeeper = 618
trp_town_2_tavernkeeper = 619
trp_town_3_tavernkeeper = 620
trp_town_4_tavernkeeper = 621
trp_town_5_tavernkeeper = 622
trp_town_6_tavernkeeper = 623
trp_town_7_tavernkeeper = 624
trp_town_8_tavernkeeper = 625
trp_town_9_tavernkeeper = 626
trp_town_10_tavernkeeper = 627
trp_town_11_tavernkeeper = 628
trp_town_12_tavernkeeper = 629
trp_town_13_tavernkeeper = 630
trp_town_14_tavernkeeper = 631
trp_town_15_tavernkeeper = 632
trp_town_16_tavernkeeper = 633
trp_town_17_tavernkeeper = 634
trp_town_18_tavernkeeper = 635
trp_town_19_tavernkeeper = 636
trp_town_20_tavernkeeper = 637
trp_town_21_tavernkeeper = 638
trp_town_22_tavernkeeper = 639
trp_town_1_merchant = 640
trp_town_2_merchant = 641
trp_town_3_merchant = 642
trp_town_4_merchant = 643
trp_town_5_merchant = 644
trp_town_6_merchant = 645
trp_town_7_merchant = 646
trp_town_8_merchant = 647
trp_town_9_merchant = 648
trp_town_10_merchant = 649
trp_town_11_merchant = 650
trp_town_12_merchant = 651
trp_town_13_merchant = 652
trp_town_14_merchant = 653
trp_town_15_merchant = 654
trp_town_16_merchant = 655
trp_town_17_merchant = 656
trp_town_18_merchant = 657
trp_town_19_merchant = 658
trp_town_20_merchant = 659
trp_town_21_merchant = 660
trp_town_22_merchant = 661
trp_salt_mine_merchant = 662
trp_town_1_horse_merchant = 663
trp_town_2_horse_merchant = 664
trp_town_3_horse_merchant = 665
trp_town_4_horse_merchant = 666
trp_town_5_horse_merchant = 667
trp_town_6_horse_merchant = 668
trp_town_7_horse_merchant = 669
trp_town_8_horse_merchant = 670
trp_town_9_horse_merchant = 671
trp_town_10_horse_merchant = 672
trp_town_11_horse_merchant = 673
trp_town_12_horse_merchant = 674
trp_town_13_horse_merchant = 675
trp_town_14_horse_merchant = 676
trp_town_15_horse_merchant = 677
trp_town_16_horse_merchant = 678
trp_town_17_horse_merchant = 679
trp_town_18_horse_merchant = 680
trp_town_19_horse_merchant = 681
trp_town_20_horse_merchant = 682
trp_town_21_horse_merchant = 683
trp_town_22_horse_merchant = 684
trp_town_1_mayor = 685
trp_town_2_mayor = 686
trp_town_3_mayor = 687
trp_town_4_mayor = 688
trp_town_5_mayor = 689
trp_town_6_mayor = 690
trp_town_7_mayor = 691
trp_town_8_mayor = 692
trp_town_9_mayor = 693
trp_town_10_mayor = 694
trp_town_11_mayor = 695
trp_town_12_mayor = 696
trp_town_13_mayor = 697
trp_town_14_mayor = 698
trp_town_15_mayor = 699
trp_town_16_mayor = 700
trp_town_17_mayor = 701
trp_town_18_mayor = 702
trp_town_19_mayor = 703
trp_town_20_mayor = 704
trp_town_21_mayor = 705
trp_town_22_mayor = 706
trp_village_1_elder = 707
trp_village_2_elder = 708
trp_village_3_elder = 709
trp_village_4_elder = 710
trp_village_5_elder = 711
trp_village_6_elder = 712
trp_village_7_elder = 713
trp_village_8_elder = 714
trp_village_9_elder = 715
trp_village_10_elder = 716
trp_village_11_elder = 717
trp_village_12_elder = 718
trp_village_13_elder = 719
trp_village_14_elder = 720
trp_village_15_elder = 721
trp_village_16_elder = 722
trp_village_17_elder = 723
trp_village_18_elder = 724
trp_village_19_elder = 725
trp_village_20_elder = 726
trp_village_21_elder = 727
trp_village_22_elder = 728
trp_village_23_elder = 729
trp_village_24_elder = 730
trp_village_25_elder = 731
trp_village_26_elder = 732
trp_village_27_elder = 733
trp_village_28_elder = 734
trp_village_29_elder = 735
trp_village_30_elder = 736
trp_village_31_elder = 737
trp_village_32_elder = 738
trp_village_33_elder = 739
trp_village_34_elder = 740
trp_village_35_elder = 741
trp_village_36_elder = 742
trp_village_37_elder = 743
trp_village_38_elder = 744
trp_village_39_elder = 745
trp_village_40_elder = 746
trp_village_41_elder = 747
trp_village_42_elder = 748
trp_village_43_elder = 749
trp_village_44_elder = 750
trp_village_45_elder = 751
trp_village_46_elder = 752
trp_village_47_elder = 753
trp_village_48_elder = 754
trp_village_49_elder = 755
trp_village_50_elder = 756
trp_village_51_elder = 757
trp_village_52_elder = 758
trp_village_53_elder = 759
trp_village_54_elder = 760
trp_village_55_elder = 761
trp_village_56_elder = 762
trp_village_57_elder = 763
trp_village_58_elder = 764
trp_village_59_elder = 765
trp_village_60_elder = 766
trp_village_61_elder = 767
trp_village_62_elder = 768
trp_village_63_elder = 769
trp_village_64_elder = 770
trp_village_65_elder = 771
trp_village_66_elder = 772
trp_village_67_elder = 773
trp_village_68_elder = 774
trp_village_69_elder = 775
trp_village_70_elder = 776
trp_village_71_elder = 777
trp_village_72_elder = 778
trp_village_73_elder = 779
trp_village_74_elder = 780
trp_village_75_elder = 781
trp_village_76_elder = 782
trp_village_77_elder = 783
trp_village_78_elder = 784
trp_village_79_elder = 785
trp_village_80_elder = 786
trp_village_81_elder = 787
trp_village_82_elder = 788
trp_village_83_elder = 789
trp_village_84_elder = 790
trp_village_85_elder = 791
trp_village_86_elder = 792
trp_village_87_elder = 793
trp_village_88_elder = 794
trp_village_89_elder = 795
trp_village_90_elder = 796
trp_village_91_elder = 797
trp_village_92_elder = 798
trp_village_93_elder = 799
trp_village_94_elder = 800
trp_village_95_elder = 801
trp_village_96_elder = 802
trp_village_97_elder = 803
trp_village_98_elder = 804
trp_village_99_elder = 805
trp_village_100_elder = 806
trp_village_101_elder = 807
trp_village_102_elder = 808
trp_village_103_elder = 809
trp_village_104_elder = 810
trp_village_105_elder = 811
trp_village_106_elder = 812
trp_village_107_elder = 813
trp_village_108_elder = 814
trp_village_109_elder = 815
trp_village_110_elder = 816
trp_merchants_end = 817
trp_town_1_master_craftsman = 818
trp_town_2_master_craftsman = 819
trp_town_3_master_craftsman = 820
trp_town_4_master_craftsman = 821
trp_town_5_master_craftsman = 822
trp_town_6_master_craftsman = 823
trp_town_7_master_craftsman = 824
trp_town_8_master_craftsman = 825
trp_town_9_master_craftsman = 826
trp_town_10_master_craftsman = 827
trp_town_11_master_craftsman = 828
trp_town_12_master_craftsman = 829
trp_town_13_master_craftsman = 830
trp_town_14_master_craftsman = 831
trp_town_15_master_craftsman = 832
trp_town_16_master_craftsman = 833
trp_town_17_master_craftsman = 834
trp_town_18_master_craftsman = 835
trp_town_19_master_craftsman = 836
trp_town_20_master_craftsman = 837
trp_town_21_master_craftsman = 838
trp_town_22_master_craftsman = 839
trp_zendar_chest = 840
trp_tutorial_chest_1 = 841
trp_tutorial_chest_2 = 842
trp_bonus_chest_1 = 843
trp_bonus_chest_2 = 844
trp_bonus_chest_3 = 845
trp_household_possessions = 846
trp_temp_array_a = 847
trp_temp_array_b = 848
trp_temp_array_c = 849
trp_stack_selection_amounts = 850
trp_stack_selection_ids = 851
trp_notification_menu_types = 852
trp_notification_menu_var1 = 853
trp_notification_menu_var2 = 854
trp_banner_background_color_array = 855
trp_multiplayer_data = 856
trp_local_merchant = 857
trp_tax_rebel = 858
trp_trainee_peasant = 859
trp_fugitive = 860
trp_belligerent_drunk = 861
trp_hired_assassin = 862
trp_fight_promoter = 863
trp_spy = 864
trp_spy_partner = 865
trp_nurse_for_lady = 866
trp_temporary_minister = 867
trp_quick_battle_6_player = 868
trp_swadian_crossbowman_multiplayer_ai = 869
trp_swadian_infantry_multiplayer_ai = 870
trp_swadian_man_at_arms_multiplayer_ai = 871
trp_vaegir_archer_multiplayer_ai = 872
trp_vaegir_spearman_multiplayer_ai = 873
trp_vaegir_horseman_multiplayer_ai = 874
trp_khergit_dismounted_lancer_multiplayer_ai = 875
trp_khergit_veteran_horse_archer_multiplayer_ai = 876
trp_khergit_lancer_multiplayer_ai = 877
trp_nord_veteran_multiplayer_ai = 878
trp_nord_scout_multiplayer_ai = 879
trp_nord_archer_multiplayer_ai = 880
trp_rhodok_veteran_crossbowman_multiplayer_ai = 881
trp_rhodok_veteran_spearman_multiplayer_ai = 882
trp_rhodok_scout_multiplayer_ai = 883
trp_sarranid_infantry_multiplayer_ai = 884
trp_sarranid_archer_multiplayer_ai = 885
trp_sarranid_horseman_multiplayer_ai = 886
trp_swadian_crossbowman_multiplayer = 887
trp_swadian_infantry_multiplayer = 888
trp_swadian_man_at_arms_multiplayer = 889
trp_vaegir_archer_multiplayer = 890
trp_vaegir_spearman_multiplayer = 891
trp_vaegir_horseman_multiplayer = 892
trp_khergit_veteran_horse_archer_multiplayer = 893
trp_khergit_infantry_multiplayer = 894
trp_khergit_lancer_multiplayer = 895
trp_nord_archer_multiplayer = 896
trp_nord_veteran_multiplayer = 897
trp_nord_scout_multiplayer = 898
trp_rhodok_veteran_crossbowman_multiplayer = 899
trp_rhodok_sergeant_multiplayer = 900
trp_rhodok_horseman_multiplayer = 901
trp_sarranid_archer_multiplayer = 902
trp_sarranid_footman_multiplayer = 903
trp_sarranid_mamluke_multiplayer = 904
trp_multiplayer_end = 905
trp_log_array_entry_type = 906
trp_log_array_entry_time = 907
trp_log_array_actor = 908
trp_log_array_center_object = 909
trp_log_array_center_object_lord = 910
trp_log_array_center_object_faction = 911
trp_log_array_troop_object = 912
trp_log_array_troop_object_faction = 913
trp_log_array_faction_object = 914
trp_quick_battle_troop_1 = 915
trp_quick_battle_troop_2 = 916
trp_quick_battle_troop_3 = 917
trp_quick_battle_troop_4 = 918
trp_quick_battle_troop_5 = 919
trp_quick_battle_troop_6 = 920
trp_quick_battle_troop_7 = 921
trp_quick_battle_troop_8 = 922
trp_quick_battle_troop_9 = 923
trp_quick_battle_troop_10 = 924
trp_quick_battle_troop_11 = 925
trp_quick_battle_troops_end = 926
trp_tutorial_fighter_1 = 927
trp_tutorial_fighter_2 = 928
trp_tutorial_fighter_3 = 929
trp_tutorial_fighter_4 = 930
trp_tutorial_archer_1 = 931
trp_tutorial_master_archer = 932
trp_tutorial_rider_1 = 933
trp_tutorial_rider_2 = 934
trp_tutorial_master_horseman = 935
trp_swadian_merchant = 936
trp_vaegir_merchant = 937
trp_khergit_merchant = 938
trp_nord_merchant = 939
trp_rhodok_merchant = 940
trp_sarranid_merchant = 941
trp_startup_merchants_end = 942
trp_sea_raider_leader = 943
trp_looter_leader = 944
trp_bandit_leaders_end = 945
trp_relative_of_merchant = 946
trp_relative_of_merchants_end = 947
|
from BowlingGame import BowlingGame
def main():
"""
Hi CardFlight devs! Thanks for reviewing my code. I didn't want to go over the 6 hour mark too much,
so I stopped before implementing much of the scoring system, but the input logic and display is here
I tried to cover as many cases as I could with the input. Hopefully they're as you expect
X, "strike", or 10 will result in a strike (strings are set to lower case, so strike/STRIKE/sTrIkE are accepted)
/, "spare", or two frames that add up to 10 (unless the first frame is a miss) will result in a spare
-, or 0, or "miss" will result in a miss
"""
print("\nWelcome to Backend Bowling!\n")
print("| Frame | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |")
print("|-------|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|\n")
game = BowlingGame()
while game.gameOver == False:
game.getInput()
game.getScoreSheet()
print("Game over")
if __name__ == "__main__":
main() |
import discord
from random import randint
bot = discord.Client()
prefix = "l?"
@bot.event
async def on_connect():
print(f"Connected. Logged in as {bot.user}")
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name=prefix+"help"))
print("Ready")
@bot.event
async def on_message(message):
if not message.author.bot and (message.guild is not None and message.content.startswith(prefix)):
arg = message.content.split(" ")[1:]
cmd = message.content.lower().split(" ")[0][len(prefix):]
if cmd == "hi":
await message.channel.send("Hello, my name is Labut.")
elif cmd == "blah":
await message.channel.send("blah *blah* **blah** ***blah***")
elif cmd == "macandcheese":
await message.channel.send(f"Here, have some Mac and Cheese. *gives mac and cheese to <@{message.author.id}>*")
elif cmd == "fastfood":
await message.channel.send(f"Here, have some Fastfood. *gives hamburger and fries to <@{message.author.id}>*")
elif cmd == "randomnumber":
await message.channel.send(str(randint(0, 1000)))
elif cmd == "help":
await message.channel.send("**Labut help**\nl?help - shows this\nl?hi - says hello to you\nl?blah - does the blah blah blah thing\nl?macandcheese - gives you mac and cheese\nl?fastfood - gives you fastfood\nl?kill <victim> - kill the specified victim\nl?succ - ***s u c c***\nl?dicklength - shows the length of your dick\nl?credits - credits")
elif cmd == "kill":
try:
victim = arg[0]
await message.channel.send(f"**Loud screams of {victim}**")
except:
await message.channel.send("You need to specify what do you want to kill!")
elif cmd == "succ":
dicks = randint(0, 25)
await message.channel.send(f"You have succ'd {dicks} dicks.")
elif cmd == "dicklength":
length = randint(-10, 20)
await message.channel.send(f"Your dick is {length}cm long.")
elif cmd == "credits":
await message.channel.send("Original creator: <@490169798244958208>\nRewritten by: <@396699211946655745>")
bot.run("token")
|
import os
import pandas as pd
from osmo_camera.calibration.temperature import (
temperature_given_digital_count_calibrated,
)
def process_temperature_log(
experiment_dir,
local_sync_directory_path,
temperature_log_filename="temperature.csv",
):
temperature_log_filepath = os.path.join(
local_sync_directory_path, experiment_dir, temperature_log_filename
)
temperature_data = pd.read_csv(
temperature_log_filepath, parse_dates=["capture_timestamp"]
)
temperature_data["temperature_c"] = temperature_data["digital_count"].apply(
temperature_given_digital_count_calibrated
)
return temperature_data
|
from django.views.generic import CreateView, UpdateView, DetailView
from django.urls import reverse
from examples.forms import ExampleForm
from examples.models import Example
class ExampleFormViewMixin(object):
form_class = ExampleForm
template_name = 'form.html'
def get_success_url(self):
return reverse('examples:detail', kwargs={'pk': self.object.pk})
class ExampleCreateView(ExampleFormViewMixin, CreateView):
pass
example_create_view = ExampleCreateView.as_view()
class ExampleUpdateView(ExampleFormViewMixin, UpdateView):
model = Example
example_update_view = ExampleUpdateView.as_view()
class ExampleDetailView(DetailView):
model = Example
template_name = 'detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['history'] = self.object.audit_records.all()
return context
example_detail_view = ExampleDetailView.as_view()
|
import numpy as np
from numba import njit
from game.othello import Othello, array_to_bits
import game.bitboard as bitop
import random
@njit
def simulate(o: Othello):
turn = 0
while not o.terminated():
moves = bitop.pack(o.my_moves())
if moves == 0:
o = o.make_move_pass()
else:
while True:
index = random.randrange(64)
if (1 << index) & moves:
row, col = divmod(index, 8)
o = o.make_move(row, col)
break
turn ^= 1
if turn:
o = o.make_move_pass()
p0 = np.sum(o.array[0])
p1 = np.sum(o.array[1])
print(o.to_string())
if p0 > p1:
return 1
elif p0 == p1:
return 0.5
else:
return 0
@njit
def test(N):
arr = np.zeros((3, 8, 8), dtype=np.uint64)
arr[0][3][3] = 1
arr[0][4][4] = 1
arr[1][3][4] = 1
arr[1][4][3] = 1
arr[2][2][2] = 1
arr[2][5][5] = 1
my, opp, obs = array_to_bits(arr)
pts = 0
for _ in range(N):
pts += simulate(Othello(my, opp, obs))
print(pts)
if __name__ == '__main__':
test(1)
|
from unittest.mock import Mock
import pytest
from game import Game
from model.components.skill import SkillComponent
from model.config import config
from model.helper_functions.item_callbacks import restore_skill_points
class TestRestoreSkillPoints:
@pytest.fixture()
def skill_component(self):
skill_component = SkillComponent(Mock(), 50)
Game.instance.skill_system.set(Game.instance.player, skill_component)
yield skill_component
def test_restore_skill_points_restores_skill_points(self, skill_component):
config.data.item.skillPointPotion.restores = to_restore = 15
skill_component.max_skill_points = 50
skill_component.skill_points = old_skill_points = 30
restore_skill_points()
assert skill_component.skill_points == old_skill_points + to_restore
def test_restore_skill_points_doesnt_overflow(self, skill_component):
skill_component.max_skill_points = 50
config.data.item.skillPointPotion.restores = 15
skill_component.skill_points = 40
restore_skill_points()
assert skill_component.skill_points == skill_component.max_skill_points
def test_restore_skill_points_doesnt_restore_if_full(self, skill_component):
skill_component.max_skill_points = skill_component.skill_points = 50
restore_skill_points()
assert skill_component.skill_points == skill_component.max_skill_points
|
from flask_login import LoginManager, AnonymousUserMixin
class MyAnonymousUser(AnonymousUserMixin):
def get_role(self):
return 'AnonymousRole'
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
login_manager.anonymous_user = MyAnonymousUser
|
from typing import Dict, List, Any
import torch
import numpy as np
from torch.nn import Linear, Dropout, functional as F
from torch.nn import CrossEntropyLoss
from pytorch_pretrained_bert.modeling import BertModel, BertOnlyMLMHead
from allennlp.nn.util import get_text_field_mask
from allennlp.nn.util import sequence_cross_entropy_with_logits
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from allennlp.nn.util import viterbi_decode
from allennlp.training.util import rescale_gradients
from babybertsrl import configs
class MTBert(torch.nn.Module):
"""
Multi-task BERT.
It has a head for MLM and another head for SRL, and can be trained jointly on both tasks
"""
def __init__(self,
id2tag_wp_srl: Dict[int, str],
id2tag_wp_mlm: Dict[int, str],
bert_model: BertModel,
embedding_dropout: float = 0.0,
) -> None:
super().__init__()
self.bert_model = bert_model
# vocab for heads
self.id2tag_wp_srl = id2tag_wp_srl
self.id2tag_wp_mlm = id2tag_wp_mlm
# Allen NLP vocab gives same word indices as word-piece tokenizer
# because indices are obtained from word-piece tokenizer during conversion to instances
# make one projection layer for each task
self.head_srl = Linear(self.bert_model.config.hidden_size, len(self.id2tag_wp_srl))
self.head_mlm = BertOnlyMLMHead(self.bert_model.config, self.bert_model.embeddings.word_embeddings.weight)
self.embedding_dropout = Dropout(p=embedding_dropout)
self.xe = CrossEntropyLoss(ignore_index=configs.Training.ignored_index) # ignore tags with index=ignore_index
def forward(self,
task: str,
tokens: Dict[str, torch.Tensor],
indicator: torch.Tensor, # indicates either masked word, or predicate
metadata: List[Dict[str, Any]],
tags: torch.LongTensor = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
task: string indicating which projection layer to use: either "srl" or "mlm"
tokens : Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. For this model, this must be a `SingleIdTokenIndexer` which
indexes wordpieces from the BERT vocabulary.
indicator: torch.LongTensor, required.
An integer ``SequenceFeatureField`` representation of the position of the masked token or predicate
in the sentence. This should have shape (batch_size, num_tokens) and importantly, can be
all zeros, in the case that the sentence has no mask. # TODO so is this required even for MLM?
tags : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold class labels
of shape ``(batch_size, num_tokens)``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
metadata contains the original words in the sentence, the masked word or predicate,
and start offsets for converting wordpieces back to a sequence of words.
Returns
-------
An output dictionary consisting of:
logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
unnormalised log probabilities of the tag classes.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
loss = None
# move to GPU
tokens['tokens'] = tokens['tokens'].cuda()
indicator = indicator.cuda()
if tags is not None:
tags = tags.cuda()
# get BERT contextualized embeddings
attention_mask = get_text_field_mask(tokens)
bert_embeddings, _ = self.bert_model(input_ids=tokens['tokens'],
token_type_ids=indicator,
attention_mask=attention_mask,
output_all_encoded_layers=False)
embedded_text_input = self.embedding_dropout(bert_embeddings)
batch_size, sequence_length, _ = embedded_text_input.size()
# use correct head for task
if task == 'mlm':
logits = self.head_mlm(bert_embeddings) # projects to vector of size bert_config.vocab_size
if tags is not None:
loss = self.xe(logits.view(-1, self.bert_model.config.vocab_size), tags.view(-1))
elif task == 'srl':
logits = self.head_srl(embedded_text_input)
if tags is not None:
loss = sequence_cross_entropy_with_logits(logits, tags, attention_mask)
else:
raise AttributeError('Invalid arg to "task"')
output_dict = {
'tokens': tokens['tokens'], # for decoding MLM tags
'loss': loss,
"logits": logits,
"attention_mask": attention_mask, # for decoding BIO SRL tags
'start_offsets': [], # for decoding BIO SRL tags
'in': [], # for decoding MLM tags
'gold_tags': [], # for computing f1 score
}
# add additional info for decoding
for d in metadata:
output_dict['start_offsets'].append(d['start_offsets'])
output_dict['in'].append(d['in'])
output_dict['gold_tags'].append(d['gold_tags'])
return output_dict
def decode_mlm(self,
output_dict: Dict[str, Any],
) -> List[List[str]]:
"""
:returns original sequence with [MASK] replaced with highest scoring word-piece.
No viterbi or handling word-piece sequences, because task is MLM, not SRL.
"""
logits = output_dict['logits'].detach().cpu().numpy()
tokens = output_dict['tokens'].detach().cpu().numpy() # integer array with shape [batch size, seq length]
res = []
num_sequences = len(logits)
assert num_sequences == len(output_dict['tokens'])
for seq_id in range(num_sequences):
# get predicted wp
wp_id = np.where(tokens[seq_id] == configs.Data.mask_vocab_id)
assert len(wp_id) == 1
logits_for_masked_wp = logits[seq_id][wp_id] # shape is now [vocab_size]
tag_wp_id = np.asscalar(np.argmax(logits_for_masked_wp))
tag_wp = self.id2tag_wp_mlm[tag_wp_id]
# fill in input sequence
mlm_in = output_dict['in'][seq_id]
filled_in_sequence = mlm_in.copy()
filled_in_sequence[mlm_in.index('[MASK]')] = tag_wp
res.append(filled_in_sequence)
return res # sequence with predicted word-piece, one per sequence in batch
def decode_srl(self,
output_dict: Dict[str, Any],
) -> List[List[str]]:
"""
for each sequence in batch:
1) get max likelihood tags
2) convert back from wordpieces
Do NOT use decoding constraints - transition matrix has zeros only
we are interested in learning dynamics, not best performance.
Note: decoding is performed on word-pieces, and word-pieces are then converted to whole words
"""
# get probabilities
logits = output_dict['logits']
reshaped_logits = logits.view(-1, len(self.id2tag_wp_srl)) # collapse time steps and batches
class_probabilities = F.softmax(reshaped_logits, dim=-1).view([logits.shape[0],
logits.shape[1],
len(self.id2tag_wp_srl)])
attention_mask = get_lengths_from_binary_sequence_mask(output_dict['attention_mask']).data.tolist()
# ph: transition matrices contain only ones (and no -inf, which would signal illegal transition)
transition_matrix = torch.zeros([len(self.id2tag_wp_srl), len(self.id2tag_wp_srl)])
# loop over each sequence in batch
res = []
for seq_id in range(logits.shape[0]):
# get max likelihood tags
length = attention_mask[seq_id]
tag_wp_probabilities = class_probabilities[seq_id].detach().cpu()[:length]
ml_tag_wp_ids, _ = viterbi_decode(tag_wp_probabilities, transition_matrix) # ml = max likelihood
ml_tags_wp = [self.id2tag_wp_srl[tag_id] for tag_id in ml_tag_wp_ids]
# convert back from wordpieces
ml_tags = [ml_tags_wp[i] for i in output_dict['start_offsets'][seq_id]] # specific to BIO SRL tags
res.append(ml_tags)
return res # list of max likelihood tags
def train_on_batch(self, task, batch, optimizer):
# forward + loss
optimizer.zero_grad()
output_dict = self(task, **batch) # input is dict[str, tensor]
loss = output_dict['loss']
if torch.isnan(loss):
raise ValueError("nan loss encountered")
# backward + update
loss.backward()
rescale_gradients(self, grad_norm=1.0)
optimizer.step()
return loss |
#this program is used to find the number
from util import utility
import math
try:
noOfTimes = int(input("How much time you want to ask the question:"))
low = 0
high = int(math.pow(2, noOfTimes))
print("Think a number between(", low+1, ")to(", high, ")in range")
print(utility.question(low, high))
except ValueError:
print("ENTER THE INT VALUES") |
# 10/30/17
# Number Cycler 1-100
x = 1
while True:
for counter in range(1, 101):
print(counter)
x += 1
# Done
|
from django.db import models
from residents.models import Community, Area
from smart_selects.db_fields import ChainedForeignKey
class IPCamera(models.Model):
class Meta:
verbose_name_plural = "IP Camera Settings"
STATUS = (
('EF', 'Entry Front Camera'),
('EB', 'Entry Back Camera'),
('IC', 'IC Camera'),
('XF', 'Exit Front Camera'),
('XB', 'Exit Back Camera'),
('FC', 'Face Camera')
)
url = models.CharField(max_length=255)
type = models.CharField(max_length=2, choices=STATUS, default='EF')
community = models.ForeignKey(Community,on_delete=models.CASCADE)
area = ChainedForeignKey(Area,chained_field="community",
chained_model_field="community",
show_all=False,
auto_choose=False,
sort=True)
class Boomgate(models.Model):
class Meta:
verbose_name_plural = "Boomgate Settings"
STATUS = (
('E', 'Entry Boomgate'),
('X', 'Exit Boomgate'),
)
url = models.CharField(max_length=255)
type = models.CharField(max_length=2, choices=STATUS, default='E')
community = models.ForeignKey(Community,on_delete=models.CASCADE)
area = ChainedForeignKey(Area,chained_field="community",
chained_model_field="community",
show_all=False,
auto_choose=False,
sort=True) |
from os import error, path
import sys
from typing import Set
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
sys.path.append(path.dirname(path.dirname(
path.abspath(path.dirname(__file__)))))
from cctpy import *
from work.draw和cuda对比.A04run import create_gantry_beamline,run
def beamline_phase_ellipse_multi_delta(bl: Beamline, particle_number: int,
dps: List[float], describles: str = ['r-', 'y-', 'b-', 'k-', 'g-', 'c-', 'm-']):
if len(dps) > len(describles):
raise ValueError(
f'describles(size={len(describles)}) 长度应大于 dps(size={len(dps)})')
xs = []
ys = []
for dp in dps:
x, y = bl.track_phase_ellipse(
x_sigma_mm=3.5, xp_sigma_mrad=7.5,
y_sigma_mm=3.5, yp_sigma_mrad=7.5,
delta=dp, particle_number=particle_number,
kinetic_MeV=215, concurrency_level=16,
footstep=100*MM
)
xs.append(x)
ys.append(y)
plt.subplot(121)
for i in range(len(dps)):
plt.plot(*P2.extract(xs[i]), describles[i])
plt.xlabel(xlabel='x/mm')
plt.ylabel(ylabel='xp/mr')
plt.title(label='x-plane')
plt.legend(['dp'+str(int(dp*100)) for dp in dps])
plt.axis("equal")
plt.subplot(122)
for i in range(len(dps)):
plt.plot(*P2.extract(ys[i]), describles[i])
plt.xlabel(xlabel='y/mm')
plt.ylabel(ylabel='yp/mr')
plt.title(label='y-plane')
plt.legend(['dp'+str(int(dp*100)) for dp in dps])
plt.axis("equal")
plt.show()
if __name__ == '__main__':
BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()
param = [
-9.637097934233304630e-02,
1.117754653599041248e+01,
2.232343668400407921e+01,
3.898027532185977151e+01,
-1.018410794537818583e+04,
2.300000000000000000e+01,
2.300000000000000000e+01,
1.174200552539830245e+00,
5.925234508968078018e-01,
5.886982145475472272e-01,
2.723161025187265105e-01,
1.715738297020128755e-01,
]
bl = create_gantry_beamline(param)
# print(bl.get_length())
# beamline_phase_ellipse_multi_delta(
# bl, 8, [-0.05]
# )
run(numpy.array([param]))
# Plot3.plot_beamline(bl,
# describes=['r-', 'r-', 'r-', 'b-', 'b-', 'g-', 'g-', 'b-', 'b-', 'b-', 'r-', 'r-', 'b-', 'b-',
# 'g-', 'g-', 'b-', 'b-'])
# track = bl.track_ideal_particle(
# kinetic_MeV=215,
# s=0,
# footstep=1 * MM
# )
# Plot3.plot_p3s(track, describe='k-')
# Plot3.show() |
from datetime import datetime
from django.db import models
from rest_hooks.models import Hook
class Note(models.Model):
title = models.CharField(max_length=140)
updated_at = models.DateTimeField(default=datetime.now())
content = models.TextField()
def __unicode__(self):
return self.title
def save(self, **kwargs):
return super(Note, self).save()
# Monkey patching Hooks to always be associated
# with User pk=1 cause we want it to be free-for-all
# This is bad, mkayyy
Hook._meta.fields[3].default = 1 |
from django.db import models
class Collection(models.Model):
colID = models.CharField(max_length = 100, primary_key=True)
colName = models.CharField(max_length = 100)
colExh = models.TextField(blank = True)
colMods = models.TextField(blank = True)
class Exhibit(models.Model):
exhID = models.CharField(max_length = 100, primary_key=True)
exhName = models.CharField(max_length = 100)
exhIMG = models.TextField()
exhDesc = models.TextField()
exhMods = models.TextField(blank = True)
class Module(models.Model):
modID = models.CharField(max_length = 100, primary_key=True)
modName = models.CharField(max_length = 100)
modType = models.CharField(max_length = 100)
modQuestions = models.TextField()
class Question(models.Model):
queID = models.CharField(max_length = 100, primary_key=True)
queTitle = models.CharField(max_length = 100)
queType = models.CharField(max_length = 100)
queExtras = models.TextField(blank = True) |
import sys
import io
from pathlib import Path
import requests
import numpy as np
from astropy.table import Table, join
from astropy.io import fits
import astropy.units as u
import astropy.coordinates as coord
from astroquery.vizier import Vizier
SIA_URL = 'https://irsa.ipac.caltech.edu/SIA'
sia_params = {
'COLLECTION': 'wise_allwise',
'RESPONSEFORMAT': 'VOTABLE',
'FORMAT': 'image/fits',
'POS': 'circle $RA $DEC 0.0',
}
Vizier.ROW_LIMIT = -1
catalogs = Vizier.get_catalogs("J/A+A/618/A110")
source_table = join(catalogs[0], catalogs[1])
source_table.sort(keys=["RAJ2000", "DEJ2000"])
# Restrict to only bow shock sources
m = (source_table["MClass"] == "bs") | (source_table["MClass"] == "bsna")
source_table = source_table[m]
OUTPUT_IMAGE_DIR = Path('OB/BSC-WISE')
OUTPUT_IMAGE_DIR.mkdir(exist_ok=True)
BASE_IMAGE_SIZE_ARCMIN = 8.0
def skycoord_from_table_row(data):
ra = data["RAJ2000"]
dec = data["DEJ2000"]
return coord.SkyCoord(f'{ra} {dec}', unit=(u.hourangle, u.deg))
try:
k1 = int(sys.argv[1])
except:
k1 = 1
try:
k2 = int(sys.argv[2])
except:
k2 = None
# Loop over all sources in the table
for source_data in source_table[k1-1:k2]:
print(source_data["HD", "Name", "R0A"])
# Make a SkyCoord object
c = skycoord_from_table_row(source_data)
sia_params['POS'] = f"circle {c.to_string()} 0.0"
# Perform a search around the specified coordinates
r = requests.get(SIA_URL, params=sia_params)
tab = Table.read(io.BytesIO(r.content), format='votable')
# Expand the image size for bigger bows
expand = 1.0
for threshold in 40.0, 80.0, 160.0:
if 60*source_data["R0A"] > threshold:
expand *= 2
image_size = BASE_IMAGE_SIZE_ARCMIN*expand
image_params = {
"center": f"{c.ra.deg:.4f},{c.dec.deg:.4f}",
"size": f"{image_size}, {image_size} arcmin",
"gzip": 0,
}
# Now fetch images in each band
for data in tab:
print(
f"Fetching image ({image_size} arcmin square) from",
data['access_url'].decode(),
)
r = requests.get(data['access_url'], params=image_params)
hdulist = fits.open(io.BytesIO(r.content))
# Get name of WISE bandpass as a unicode string
bpname = data['energy_bandpassname'].decode()
hdulist.writeto(
OUTPUT_IMAGE_DIR / f"HD{source_data['HD']:06d}-{bpname}.fits",
overwrite=True,
)
|
"""Module for Loading and Transformation of the given data file
Owner: Venkateshwaran Loganathan
Created: 19 July 2018"""
#import necessary modules
import sys
import os
import locale
import json
class Auto1ETL:
""" Class used for the loading and transformation of the given data set"""
def __init__(self, filePath=''):
"""Contructor that initializes various parameters used in the module"""
#Identifies the current directory of this file
self.currDir = os.path.dirname(os.path.realpath(__file__))
self.filePath = filePath
try:
'''Loading the config file.
//TODO: Can be extracted from environment variable settings as well
'''
with open(self.currDir + '/' + 'config.json', 'r') as config_file:
self.config = json.load(config_file)
config_file.close()
except FileNotFoundError:
print("The specified config file is not found. Please check the filename and try again")
except:
print("Unexpected error:", sys.exc_info()[0])
try:
if self.filePath == '':
self.filePath = self.currDir + '/' + self.config['inputDataFile']
with open(self.filePath, 'r') as data_file:
#Getting the first line to identify the columns in the given data file. The sepChar is a configurable parameter
self.colDef = data_file.readline().strip().split(self.config['sepChar'])
data_file.close()
except AttributeError:
print("The config attribute is missing. Please check and try again")
except FileNotFoundError:
print("The specified data input file is not found. Please check the filename and try again")
except:
print("Unexpected error:", sys.exc_info()[0])
counter = 0
self.dictColDef = {}
for item in self.colDef :
#Setting up the column headers definition
self.dictColDef[item] = counter
counter = counter + 1
#Setting the locale to German, this takes care of utf-8 and german digit settings
locale.setlocale(locale.LC_ALL, self.config['locale'])
#Series of lambda functions that transforms the data into the specified format given in the problem statement
self.transformationFunctions = {
'engine-location': (lambda x: 0 if x == self.config['engLocn'] else 1), #Coding engine location front to be 0 and rear to be 1
'num-of-cylinders': (lambda x: self.config['words2Num'][x]), #Funtion to convert number names to numbers //TODO: can be extended and built as a separate functionality
'engine-size': (lambda x: int(x)),
'weight': (lambda x: int(x)),
'horsepower': (lambda x: locale.atof(x)), #converting to float
'aspiration': (lambda x: 0 if x == self.config['aspiration'] else 1), #Boolean representation of aspiration
'price': (lambda x: locale.atof(x)/100), #conversion of cents to Euros
'make': (lambda x: str(x)) #No Change :)
}
self.transformedData = []
def transformData(self, line):
"""Transforms the data line by line by calling the respective lambda functions"""
splittedLine = line.split(self.config['sepChar'])
tempList = []
for item in self.config['order']:
tempList.append(self.transformationFunctions[item](splittedLine[self.dictColDef[item]]))
self.transformedData.append(tempList)
def loadAndTransform(self):
"""Executes the program line by line and convert them"""
with open(self.filePath) as dataTransform:
next(dataTransform)
for line in dataTransform:
if self.config['NAChar'] in line:
continue
self.transformData(line.strip())
#returns the converted data after adding the columns definition in the top
return [self.config['order']] + self.transformedData
#if __name__ == "__main__":
#auto1etl = Auto1ETL() |
#!/usr/bin/env python3
__all__ = ["expand"]
from typing import List, Tuple
from functools import wraps
def find_braces(s: str) -> Tuple:
return (s.index("{"), s.index("}"))
def string_contains_set_of_braces(s: str) -> bool:
return s.find("}") > s.find("{") >= 0
def split_brace_contents(s: str) -> List[str]:
return s.split(",")
def expand(s: str) -> List[str]:
if not string_contains_set_of_braces(s):
return [s]
expanded_strings = []
begin_brace, end_brace = find_braces(s)
brace_contents = s[begin_brace+1:end_brace]
for content in split_brace_contents(brace_contents):
expanded_string = "{before_brace}{content}{after_brace}".format(
before_brace=s[:begin_brace],
content=content,
after_brace=s[end_brace+1:],
)
expanded_strings.append(expanded_string)
# There might still be more strings to expand
new_list = []
for expanded_string in expanded_strings:
if string_contains_set_of_braces(expanded_string):
new_list += expand(expanded_string)
else:
new_list.append(expanded_string)
return new_list
|
"""
Python exposes a terse and intuitive syntax for performing
slicing on lists and strings. This makes it easy to reference
only a portion of a list or string.
This Stack Overflow answer provides a brief but thorough
overview: https://stackoverflow.com/a/509295
Use Python's slice syntax to achieve the following:
Colon character (:) start(beginning) : end(ending) stop
Left; inclusive. Right; exclusive
"""
a = [2, 4, 1, 7, 9, 6]
# Output the second element: 4:
#a[:2]
print(a[1])
# Output the second-to-last element: 9
#a[-2:]
print(a[-2])
# Output the last three elements in the array: [7, 9, 6]
#a[:-3]
print(a[-3:])
# Output the two middle elements in the array: [1, 7]
#a[-3:-4]
print(a[2:4])
#not end at three, but end at the one before it.
# Output every element except the first one: [4, 1, 7, 9, 6]
#a[0:]
print(a[1:])
# Output every element except the last one: [2, 4, 1, 7, 9]
#a[:-1]
print(a[:-1])
# For string s...
s = "Hello, world!"
# Output just the 8th-12th characters: "world"
#s[:6]
print(s[7:12]) |
# Generated by Django 2.1.2 on 2018-11-08 19:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0020_auto_20181005_1301'),
]
operations = [
migrations.AlterField(
model_name='category',
name='disclaimer_is_visible',
field=models.BooleanField(default=False, help_text='Застереження для категорії, відображатиметься на сайті, коли активувати', verbose_name='Застереження<br/>показувати/<br/>не показувати'),
),
]
|
# Python language basics 4
# control flow
# if statements
is_game_over = False
p_0_x_pos = 0
e_0_x_pos = 3
e_1_x_pos = 5
p_0_x_pos += 2 # p_0_x_pos = 2
if p_0_x_pos == e_0_x_pos: # False so skip code below
is_game_over = True
elif p_0_x_pos == e_1_x_pos: # False so skip code below
is_game_over = True
else: # Carried out if all above tests fail to execute
e_0_x_pos += 1
e_1_x_pos += 1
## Another way below
if p_0_x_pos == e_0_x_pos or p_0_x_pos == e_1_x_pos:
is_game_over = True
else: # Carried out if all above tests fail to execute
e_0_x_pos += 1
e_1_x_pos += 1
# Python language basics 5
# while loops
# for in loops
is_game_over = False
p_x_pos = 2
e_x_pos = 3
end_x_pos = 10
while not is_game_over:
print(p_x_pos)
print(e_x_pos)
if p_x_pos == e_x_pos:
print('you lose')
is_game_over = True
elif p_x_pos >= end_x_pos:
print('you win')
is_game_over = True
else:
p_x_pos += 3
e_x_pos += 1
x_pos = 5
movements = [1, -2, 6, -3, -2, 4]
for movement in movements:
x_pos += movement
print(x_pos)
|
# -*- coding: utf-8 -*-
#1
total=0.0
a=eval(input())
if a >= 38000.0:
total = a * 0.7
elif a >= 28000.0:
total = a * 0.8
elif a >= 18000.0:
total = a * 0.9
elif a >= 8000.0:
total = a * 0.95
print(total)
input() |
import os
from random import randrange
import time
from novaclient.client import Client
import swiftclient.client
config = {'user':os.environ['OS_USERNAME'],
'key':os.environ['OS_PASSWORD'],
'tenant_name':os.environ['OS_TENANT_NAME'],
'authurl':os.environ['OS_AUTH_URL']}
conn = swiftclient.client.Connection(auth_version=2, **config)
config = {'username':os.environ['OS_USERNAME'],
'api_key':os.environ['OS_PASSWORD'],
'project_id':os.environ['OS_TENANT_NAME'],
'auth_url':os.environ['OS_AUTH_URL'],
}
nova = Client('2',**config)
container_name = "dj_container"
exists = False
(response, bucket_list) = conn.get_account()
for bucket in bucket_list:
if (bucket == container_name):
exists == True
if (exists == False):
conn.put_container(container_name)
instancename = "dj_broker1"
if not nova.keypairs.findall(name="Svensskey"):
with open(os.path.expanduser('svensskey.pem')) as fpubkey:
nova.keypairs.create(name="Svensskey", public_key=fpubkey.read())
image = nova.images.find(name="Ubuntu Server 14.04 LTS (Trusty Tahr)")
flavor = nova.flavors.find(name="m1.medium")
user_data = open('userdata_broker.yml', 'r')
instance = nova.servers.create(name=instancename, image=image, flavor=flavor, key_name="Svensskey", userdata=user_data)
user_data.close()
# Poll at 5 second intervals, until the status is no longer 'BUILD'
status = instance.status
while status == 'BUILD':
time.sleep(5)
# Retrieve the instance again so the status field updates
instance = nova.servers.get(instance.id)
status = instance.status
print "status: %s" % status
# Assign Floating IP
iplist = nova.floating_ips.list()
if (len(iplist) < 1):
print "No IP:s available!"
sys.exit(0)
random_index = randrange(0,len(iplist))
ip_obj = iplist[random_index] # Pick random address
floating_ip = getattr(ip_obj, 'ip')
print "Attaching IP:"
print floating_ip
#ins = nova.servers.find(name=instancename)
instance.add_floating_ip(floating_ip)
|
#
#
# This code is not well maintained, mostly for reference if we revisit the
# deep particle simulation/ related experiments.
#
#
import numpy as np
import cv2
from fauxtograph import VAE, GAN, VAEGAN, get_paths, image_resize
import matplotlib.pyplot as plt
%matplotlib tk
loader ={}
loader['enc'] = 'VAEGAN/new_arch_test_epoch20_enc.h5'
loader['dec'] = 'VAEGAN/new_arch_test_epoch20_dec.h5'
loader['disc'] = 'VAEGAN/new_arch_test_epoch20_disc.h5'
loader['enc_opt'] = 'VAEGAN/new_arch_test_epoch20_enc_opt.h5'
loader['dec_opt'] = 'VAEGAN/new_arch_test_epoch20_dec_opt.h5'
loader['disc_opt'] = 'VAEGAN/new_arch_test_epoch20_disc_opt.h5'
loader['meta'] = 'VAEGAN/new_arch_test_epoch20_meta.json'
vg2 = VAEGAN.load(flag_gpu=False, **loader)
shape = 3, vg2.latent_width
random_data = np.random.standard_normal(shape).astype('f')*3.
fake_images = vg2.inverse_transform(random_data, test=True)
reconstruct = vg2.inverse_transform(vg2.transform(fake_images))
# real_drop1 = cv2.imread('/Users/kevingordon/cims/drops/normalized_gray/d10-1.png')
# real_drop2 = cv2.imread('/Users/kevingordon/cims/drops/normalized_gray/d100-1.png')
# real_drop3 = cv2.imread('/Users/kevingordon/cims/drops/normalized_gray/d1000-1.png')
paths = get_paths('/Users/kevingordon/cims/drops/tmp_subset/')
real_images = vg2.load_images(paths)
real_recon1 = vg2.inverse_transform(vg2.transform(real_images))
plt.figure(figsize=(16,12))
for i in range(3):
ax = plt.subplot(4, 10, i+1)
plt.imshow(real_images[i].transpose(1,2,0))
plt.axis("off")
if i==4:
ax.set_title("Randomly Sampled Real Images")
ax = plt.subplot(4, 10, 10+i+1)
plt.imshow(real_recon1[i])
plt.axis("off")
if i==4:
ax.set_title("Reconstruction of Randomly Sampled Real Images")
ax = plt.subplot(4, 10, 20+i+1)
plt.imshow(fake_images[i])
plt.axis("off")
if i==4:
ax.set_title("Randomly Sampled Simulated Images")
ax = plt.subplot(4, 10, 30+i+1)
plt.imshow(reconstruct[i])
plt.axis("off")
if i==4:
ax.set_title("Reconstruction of Randomly Sampled Simulated Images")
plt.show() |
# -*- coding: utf-8 -*-
import string
import sys
from avro import datafile, io, schema
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
__author__ = 'yd'
import avro.ipc as ipc
import avro.protocol as protocol
PROTOCOL = protocol.parse(open("../../../avro/herring-box.avpr").read())
server_addr = ('localhost', 9090)
def sendData(command, data):
client = ipc.HTTPTransceiver(server_addr[0], server_addr[1])
requestor = ipc.Requestor(PROTOCOL, client)
message = dict()
message['command'] = command
message['data'] = data
params = dict()
params['message'] = message
print("Result: " + requestor.request('send', params))
client.close()
if __name__ == '__main__':
avro_file = ""
writer = open(avro_file, 'wb')
datum_writer = io.DatumWriter()
schema_object = schema.parse("""{ "type": "record",
"name": "Pair",
"doc": "A pair of strings.",
"fields": [
{"name": "file", "type": "{"type": "array", "items": "bytes"}"}
]
}""")
dfw = datafile.DataFileWriter(writer, datum_writer, schema_object)
for line in sys.stdin.readlines():
(left, right) = string.split(line.strip(), ',')
dfw.append({'left': left, 'right': right})
dfw.close()
class DataSend:
SCHEMA = schema.parse(open("../avro/herring-box-data.avpc").read())
def testWrite(self, id, filename, bufferSize=8024):
fd = open(filename, 'wb')
dict = {'id': id, 'data': None}
datum = DatumWriter()
with DataFileWriter(fd, datum, self.SCHEMA) as writer:
with open("filename", "rw") as file:
while True:
byte = file.read(bufferSize)
dict['data'] = byte
writer.append(dict)
if not bytes:
break |
import sys
import tkinter as tk
import os
import matplotlib
import tensorflow as tf
import numpy as np
import pyaudio
import wave
import subprocess
from PIL import ImageTk, Image
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from tkinter.filedialog import askopenfilename
from gmc.dataset.utils import mel_spec_plot as msp
from gmc.core.models import nn
from gmc.dataset import features, musicset
from gmc.core.cache import store
from gmc.conf import settings
from gmc.core import handler
class GmcApp:
def __init__(self, root):
self.root = root
self.img = tk.PhotoImage(file="icon.gif")
self.canvas = tk.Label(root, image=self.img)
self.canvas.image = self.img
self.canvas.grid(row=0, column=0)
self.prediction = None
root.wm_title("Music Classifier")
tk.Button(root, text = "Select File", command = lambda: self.openFile()).grid(row=1, column=0, pady=5)
tk.Button(root, text = "Record Audio", command = lambda: self.record()).grid(row=1, column=1, pady=5)
tk.Button(root, text = "Classify", command = lambda: self.classify()).grid(row=1, column=2, pady=5)
def plot (self, filepath):
if self.prediction is not None:
self.prediction.destroy()
if self.canvas is not None:
self.canvas.destroy()
fig = msp(filepath)
canvas = FigureCanvasTkAgg(fig, master=self.root)
self.canvas = canvas.get_tk_widget()
self.canvas.grid(row=0, column=0)
canvas.draw()
def openFile(self):
self.fileName = askopenfilename(initialdir = ".")
self.plot(self.fileName)
def classify(self):
storage = store(os.path.join(settings.BRAIN_DIR, 'nn'))
save_path = storage['save.path']
meta_path = save_path + '.meta'
saver = tf.train.import_meta_graph(meta_path)
data = self.get_features()
n_f = data.shape[0]
data = data.reshape((1, n_f))
prediction = None
with tf.Session() as sess:
saver.restore(sess, save_path)
graph = tf.get_default_graph()
x = graph.get_tensor_by_name('x:0')
y_ = graph.get_tensor_by_name('y_:0')
keep_prob = graph.get_tensor_by_name('keep_prob:0')
result = sess.run(y_, feed_dict={x : data, keep_prob:1})[0]
idx = np.argmax(result)
dataset = musicset.MusicSet()
dataset.one_hot_encode_genres()
for genre in dataset.genres:
if dataset.encoded_genres[genre][idx] == 1:
prediction = genre
if self.prediction is not None:
self.prediction.destroy()
self.prediction = tk.Label(self.root, text=prediction)
self.prediction.config(font=("Courier", 36))
self.prediction.grid(row=0, column=1)
def get_features(self):
result = None
for f in settings.FEATURES:
feat_func = getattr(features, f)
if result is None:
result = feat_func(self.fileName)
else:
result = np.hstack((result, feat_func(self.fileName)))
return result
def record(self):
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 22050
CHUNK = 1024
RECORD_SECONDS = 10
WAVE_OUTPUT_FILENAME = os.path.join(settings.BRAIN_DIR, "file.wav")
FINAL_OUTPUT_FILENAME = os.path.join(settings.BRAIN_DIR, "output.wav")
audio = pyaudio.PyAudio()
# start Recording
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
print("recording...")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("finished recording")
# stop Recording
stream.stop_stream()
stream.close()
audio.terminate()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
p = subprocess.Popen(['ffmpeg', '-y', '-i', WAVE_OUTPUT_FILENAME,
'-map_channel', '0.0.0', FINAL_OUTPUT_FILENAME],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p_out, p_err = p.communicate()
self.fileName = FINAL_OUTPUT_FILENAME
self.plot(FINAL_OUTPUT_FILENAME)
def show_app():
handler.execute_from_command_line()
root = tk.Tk()
app = GmcApp(root)
root.protocol('WM_DELETE_WINDOW', lambda: close(app, root)) # root is your root window
root.mainloop()
def close(app, root):
plt.close()
root.destroy()
if __name__ == '__main__':
show_app() |
# Числа Фибоначи
# Выведите n-ое число Фибоначчи, используя только временные переменные,
# циклические операторы и условные операторы. n - вводится
num = int(input('Введите число '))
num_1 = 0
num_2 = 0
for i in range(num + 1):
if i == 0 or i == 1:
num_1 = i
num_2 = num_2 + num_1
print(1, end=' ')
else:
num_1, num_2 = num_2, num_1 + num_2
print(num_2, end=' ')
|
# for loops in python
magicians = ['candice', 'duque', 'jess', 'ali']
for magician in magicians:
print(magician.title())
revelers = ['candice', 'duque', 'jess', 'ali']
costumes = ['wayne', 'garth', 'clown', 'edie gray']
#this doesn't work the way I want it to
for reveler in revelers:
print(reveler.title() + ", you looked so great as ")
for costume in costumes:
print(costume)
print("\n")
############## Exercises
# 4.1
pizzas = ['green lantern', 'margherita', 'heart throb']
for pizza in pizzas:
#print(pizza)
print("I could eat " + pizza.title() + " pizza anytime at all.")
print(pizza.title() + " pizza is the bestest.\n")
print("\nI just <3 the za.\n")
# 4.2
imaginary_animals = ['narwhal', 'dragon', 'unicorn']
for imaginary_animal in imaginary_animals:
print("People say that " + imaginary_animal + "s aren't real, but I don't believe them.")
print("\nI think they're all based in reality at least.")
|
# -*- coding=utf8 -*-
from numpy import *
import matplotlib.pyplot as plt
import math
def loadDataSet(fileName):
cnt = len(open(fileName).readline().split()) - 1
print cnt
dataMat = []
labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split()
curLine = []
for i in range(cnt):
curLine.append(float(lineArr[i]))
curLine.append(1.0)
dataMat.append(curLine)
labelMat.append(float(lineArr[-1]))
return mat(dataMat), mat(labelMat).T
dataMat, labelMat = loadDataSet('dataset.txt')
m, n = shape(dataMat)
print dataMat
print labelMat
print m, n
def sigmoid(x):
return 1 / (1 + exp(-x))
# L2范数
def norm(vec):
res = 0
for item in vec[0]:
res += item ** 2
res = sqrt(res)
return res
def firstDerivative(dataMat, labelMat, beta):
res = zeros((n, 1))
for i in range(m):
temp = math.exp(dataMat[i, :] * beta)
temp = labelMat[i] - temp / (1.0 + temp)
res -= float(temp) * dataMat[i, :].T
return res
def secondDerivative(dataMat, labelMat, beta):
res = zeros((n, n))
res = mat(res)
for i in range(m):
temp = math.exp(dataMat[i, :] * beta)
res += dataMat[i, :].T * dataMat[i, :] * (temp / ((1.0 + temp) ** 2))
return res
def train(dataMat, labelMat, eps):
beta = zeros((n, 1))
while norm(firstDerivative(dataMat, labelMat, beta).T) > eps:
first = firstDerivative(dataMat, labelMat, beta)
second = secondDerivative(dataMat, labelMat, beta)
beta -= second.I * first
print beta.T
return beta
weight = train(dataMat, labelMat, 0.0001)
def test(dataMat, labelMat, weight):
cnt = 0
for i in range(m):
if sigmoid(dataMat[i,:]*weight) > 0.5:
if labelMat[i]!=1.0:
cnt+=1
else:
if labelMat[i]!=0.0:
cnt+=1
print "the error rate:",cnt*1.0/m
test(dataMat, labelMat, weight)
def showLogRegres(dataMat, labelMat, weight):
# notice: train_x and train_y is mat datatype
weight=mat(weight)
numSamples, numFeatures = shape(dataMat)
if numFeatures != 3:
print "Sorry! I can not draw because the dimension of your data is not 2!"
return 1
# draw all samples
for i in xrange(numSamples):
if int(labelMat[i, 0]) == 0:
plt.plot(dataMat[i, 0], dataMat[i, 1], 'or')
elif int(labelMat[i, 0]) == 1:
plt.plot(dataMat[i, 0], dataMat[i, 1], 'ob')
# draw the classify line
min_x = min(dataMat[:, 0])[0, 0]
max_x = max(dataMat[:, 0])[0, 0]
weights = weight.getA() # convert mat to array
y_min_x = float(-weights[2] - weights[0] * min_x) / weights[1]
y_max_x = float(-weights[2] - weights[0] * max_x) / weights[1]
plt.plot([min_x, max_x], [y_min_x, y_max_x], '-g')
plt.xlabel('X1');
plt.ylabel('X2')
plt.show()
showLogRegres(dataMat, labelMat, weight) |
import unittest
from Calculator import Calculator
class MyTestCase(unittest.TestCase):
stub = Calculator()
def test_empty(self):
self.assertEqual(self.stub.add(""), 0)
def test_single(self):
self.assertEqual(self.stub.add("1"), 1)
def test_two(self):
self.assertEqual(self.stub.add("1,2"), 3)
def test_two(self):
self.assertEqual(self.stub.add("1,2,1"), 4)
def test_newline_delim(self):
self.assertEqual(self.stub.add("1\n2,3"), 6)
def test_given_delim(self):
self.assertEqual(self.stub.add("//;\n1;2"), 3)
def test_negative(self):
with self.assertRaises(Exception) as context:
self.stub.add("-1")
print str(context.exception)
self.assertTrue('negatives not allowed [-1]' in str(context.exception))
def test_limit(self):
self.assertEqual(self.stub.add("1001,2"), 2)
def test_delim_in_bracket(self):
self.assertEqual(self.stub.add("//[***]\n1***2***3"), 6)
def test_multiple_delim_in_bracket(self):
self.assertEqual(self.stub.add("//[*][%]\n1*2%3"), 6)
def test_multiple_longdelim_in_bracket(self):
self.assertEqual(self.stub.add("//[***][%%%]\n1***2%%%3"), 6)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.2.6 on 2019-12-18 07:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consumers', '0008_consumer_hab_id'),
]
operations = [
migrations.AddField(
model_name='consumer',
name='district',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
|
import sys
class p1(dict):
def __del__(self):
print("删除1")
class p2(dict):
def __del__(self):
print("删除2")
a = p1()
b = p2()
a["aa"] = b
b["aa"] = a
print("OK")
|
import sys
import urllib
import json
import argparse
import urllib.request
import unicodedata
import collections
import os
import xml.etree.ElementTree as ET
import csv
import glob
import urllib.parse
csv_file = open("data/genjitext.csv")
f = csv.reader(csv_file, delimiter=",")
header = next(f)
print(header)
map = {}
files = glob.glob("../../docs/iiif/kuronet/*.json")
for file in files:
with open(file, 'r') as f:
vol_str = file.split("/")[-1].split(".")[0]
data = json.load(f)
rows = []
members = data["selections"][0]["members"]
for member in members:
label = member["label"]
rows.append([label])
with open('data/old/'+vol_str+'.csv', 'w') as f:
writer = csv.writer(f, lineterminator='\n') # 改行コード(\n)を指定しておく
writer.writerows(rows) # 2次元配列も書き込める
|
from rest_framework import routers
from .api import LocationViewSet, UserViewSet, DeliverymanViewSet, DeliveryViewSet
from Car.api import Car_modelViewSet, CarViewSet, Car_rentViewSet
from Bike.api import Bike_modelViewSet, BikeViewSet, Bike_rentViewSet
router = routers.DefaultRouter()
router.register('users', UserViewSet, 'users')
router.register('locations', LocationViewSet, 'locations')
router.register('deliverymans', DeliverymanViewSet, 'deliverymans')
router.register('deliverys', DeliveryViewSet, 'deliverys')
router.register('car_models', Car_modelViewSet, 'car_models')
router.register('cars', CarViewSet, 'cars')
router.register('car_rents', Car_rentViewSet, 'car_rents')
router.register('bike_models', Bike_modelViewSet, 'bike_models')
router.register('bikes', BikeViewSet, 'bikes')
router.register('bike_rents', Bike_rentViewSet, 'bike_rents')
urlpatterns = router.urls |
from collections import Counter
from functools import reduce
def solution_my(clothes):
items = {}
for cloth in clothes:
if cloth[1] in items:
items[cloth[1]].append(cloth[0])
else:
items[cloth[1]] = [cloth[0]]
answer = len(clothes)
temp = 1
if len(items) > 1:
for item in items:
temp *= len(items[item])
answer += temp
return answer
def solution(clothes):
counter_category = Counter([cat for _, cat in clothes])
return reduce(lambda x, y: x * (y + 1), counter_category.values(), 1) - 1
clothes = [
["yellow_hat", "headgear"],
["blue_sunglasses", "eyewear"],
["green_turban", "headgear"]
]
print(solution(clothes))
|
import speech_recognition as sr
AUDIO_FILE=("calimp3.wav") #import the audio file
r=sr.Recognizer() #initialize the recognizer
with sr.AudioFile(AUDIO_FILE) as source:
audio=r.record(source)
try:
s=r.recognize_google(audio) #store the audio as text in s
except sr.UnknownValueError:
print("Could't understand the voice")
except sr.RequestError:
print("Could't get the result")
with open("output.txt","w") as f:
f.write(s)
f.close()
print("Audio stored in output.txt") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.