text stringlengths 8 6.05M |
|---|
import pytest
from Programs import amsterdam
def test_1():
assert amsterdam.amsterdam("I have been in Amsterdam","am") == 0
def test_2():
assert amsterdam.amsterdam("Am I in Amsterdam","am") == 1
def test_3():
assert amsterdam.amsterdam("I am in Amsterdam am I?","am") == 2
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader.processors import MapCompose,TakeFirst,Join #对item_loader的值进行后期处理
import datetime
from scrapy.loader import ItemLoader
import re
class ArticleSpyderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
def date_convert_str(value):
'''对时间进行转换'''
try: # 时间转换
create_date = datetime.datetime.strptime(value, "%Y/%m/%d").date()
except Exception as e:
create_date = datetime.datetime.now().date()
return create_date
def get_nums(value):
match = re.match(".*?(\d+).*", value)
if match:
nums = int(match.group(1))
else: # 当评论条数为0的时候
nums = 0
return nums
def return_value(value):
'''当使用默认输出方式时,为了不覆盖原始的方法'''
return value
class JobboleArticleItem(scrapy.Item):
title = scrapy.Field()
create_date = scrapy.Field(
input_processor=MapCompose(date_convert_str), # 时间转换
)
url = scrapy.Field() # url是个变长
url_md5_id = scrapy.Field() # 将url,MD5后变成定常
front_image_url = scrapy.Field(
output_processor=MapCompose(return_value) # 覆盖默认输出格式
)
front_image_path = scrapy.Field() # 本地图片存放路径
comment_nums = scrapy.Field(
input_processor=MapCompose(get_nums) # 正则处理数字
)
fav_nums = scrapy.Field(
input_processor=MapCompose(get_nums)
)
tags = scrapy.Field(
output_processor=Join(',') # 对tags进行拼接
)
content = scrapy.Field()
vote_num = scrapy.Field()
class ArticleItemLoader(ItemLoader):
'''自定义item loader集成ItemLoader类
- 定义属性--item字段默认输出格式,类似对css选择器执行extract_fisrt()
- 将默认输出变成list
'''
default_output_processor = TakeFirst()
|
from handlers import AdminHandler, MainHandler, UrlHandler
from models import Base as ModelsBase
from models import engine as models_engine
from security import import_key
from sqlalchemy.orm import sessionmaker
from tornado import ioloop, web
from tornado.options import define, options, parse_command_line
define("port", default=8888, help="Port for webserver to run")
# get options from command line or use defaults
parse_command_line()
db_engine = models_engine
Session = sessionmaker(bind=db_engine)
db_session = Session()
PUBLIC_KEY = import_key('public_key.pem')
PRIVATE_KEY = import_key('private_key.pem')
class MyApplication(web.Application):
"""Main class for the application.
We setup db and create or update tables here at startup."""
def __init__(self, *args, **kwargs):
self.session = kwargs.pop('session')
super(MyApplication, self).__init__(*args, **kwargs)
def create_database(self):
ModelsBase.metadata.create_all(db_engine)
application = MyApplication([
(r"/", MainHandler),
(r"/submit_url", UrlHandler, dict(
db_session=db_session, public_key=PUBLIC_KEY)),
(r"/admin", AdminHandler, dict(
db_session=db_session, private_key=PRIVATE_KEY)),
(r"/content/(.*)", web.StaticFileHandler, {'path': './'})
], session=db_session)
if __name__ == "__main__":
application.create_database()
application.listen(options.port)
ioloop.IOLoop.instance().start()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 18 01:03:23 2015
@author: Ricky
"""
from flask import Flask, render_template, request, redirect
from toyota_functions import *
from edmunds import Edmunds
import random
import os
api_key = 'cj3k5hqkyjzup8hzqqwz86t8' # edmunds api key
api = Edmunds(api_key)
# call a list of all Toyota models that are new (2014 & 2015)
def models_call():
toyota = api.make_call('/api/vehicle/v2/toyota?fmt=json&api_key='+
api_key + '&state=new')
models_list = return_models(toyota)
return models_list
# call the available years for each model
def years_call(carmodel):
models = models_call()
for model in models:
if model['name'] == carmodel:
years_list = []
for year in model['years']:
years_list.append(year)
return years_list
# call the dictionary of styles that are available for each model
def styles_call(carmodel, caryear):
models = models_call()
for model in models:
if model['name'] == carmodel:
for year in model['years']:
if year == caryear:
styles = api.make_call('/api/vehicle/v2/toyota/'+ model['name']
+'/'+str(year)+'/styles?fmt=json&api_key='+api_key+'&view=full')
styles_list = return_styles(styles)
return styles_list
app = Flask(__name__)
unsold_cars = [] # hold all cars that are still unsold including information
sold_cars = [] # list of cars that have been sold to customers, to display on sold page
deleted_cars = [] # array to hold cars that have been deleted, so that they aren't readded
@app.route('/')
def home():
return redirect('index.html')
@app.route('/index.html', methods = ['POST', 'GET'])
@app.route('/', methods = ['GET'])
def index():
m_list = models_call() # api call for all toyota models
cars = []
# add customer info to cars that are sold
if (request.method == 'POST' and request.form.get('carid')):
#check to see if user is deleting a car
if (request.form['submit'] == "Delete this car"):
carmodel = request.form['updatemodel']
car_id = int(request.form.get('carid'))
years_list = years_call(carmodel)
deleted = request.form['submit']
if car_id not in deleted_cars:
deleted_cars.append(car_id)
return render_template('index.html', models_list = m_list, years_list = years_list, carmodel = carmodel, car_styles = cars, deleted = deleted)
elif (request.form['submit'] == "Reset"):
return redirect('index.html')
else:
# otherwise sell the user a car!
sold_car_id = int(request.form.get('carid'))
for item in unsold_cars:
if item['options']['id'] == sold_car_id:
return render_template('sellcar.html', sold_car_id = sold_car_id, style = item)
# otherwise load cars from the dropdown lists on the left sidebar
elif request.method == 'POST':
carmodel = request.form['model_list'] # only display selected model from dropdown
years_list = years_call(carmodel) # api call for available years for that model
car_year = request.form['year_list'] # only display cars from selected year
if car_year == "2014" or car_year == "2015":
results = "Showing matching results for "
car_year = int(car_year) #convert dropdown year to int
if carmodel == "Fj Cruiser" or carmodel == "Rav4 Ev": #cruiser and rav4 only have 2014
car_year = 2014
car_styles = styles_call(carmodel, car_year) #do an api call for all different styles
# add cars from api call to cars array
for item in car_styles:
item['options']['id'] = int(item['options']['id'])
if item['options']['id'] not in deleted_cars:
if not any(d['options']['id'] == item['options']['id'] for d in sold_cars):
if not any(d['options']['id'] == item['options']['id'] for d in unsold_cars):
unsold_cars.append(item)
for item in unsold_cars:
if item['options']['id'] not in deleted_cars:
if not any(d['options']['id'] == item['options']['id'] for d in cars):
if item['options']['name'] == carmodel:
cars.append(item)
# if user has checked any of the search checkboxes
if (request.form.getlist('vehicle')):
search_queries = request.form.getlist('vehicle')
vehicle_list = []
for item in search_queries:
for car in cars:
if not any(d['options']['id'] == car['options']['id'] for d in vehicle_list):
# Search for cars in particular price range
if item == "< $20k":
if float(car['options']['price']) < 20000.0:
vehicle_list.append(car)
if item == "$20k-$24,999":
if 24999.00 >= float(car['options']['price']) >= 20000.00:
vehicle_list.append(car)
if item == "$25k-$29,999":
if 25000.00 >= float(car['options']['price']) >= 30000.00:
vehicle_list.append(car)
if item == "> $30k":
if 30000.00 <= float(car['options']['price']):
vehicle_list.append(car)
# Search for cars with specified search parameters
if item in car['options'].values():
vehicle_list.append(car)
if item in car['options']['options']:
vehicle_list.append(car)
# if there are no search query items
if request.form.getlist('vehicle') is None:
return render_template('index.html', models_list = m_list, years_list = years_list, carmodel = carmodel, car_year = car_year, car_styles = cars, results = results)
else:
# return the page of search results
return render_template('index.html', models_list = m_list, years_list = years_list, carmodel = carmodel, car_year = car_year, car_styles = vehicle_list, search_queries = search_queries, results = results)
else:
# return the page of all car styles for model and year
return render_template('index.html', models_list = m_list, years_list = years_list, carmodel = carmodel, car_year = car_year, car_styles = cars, results = results)
else:
year_display = "Please select a year."
# return just list of models and years
return render_template('index.html', models_list = m_list, years_list = years_list, carmodel = carmodel,
car_year = car_year, year_display = year_display)
else:
#display a blank page
m_list = models_call()
y_list = []
model_display = "Please select a model."
return render_template('index.html', models_list = m_list, years_list = y_list, model_display = model_display)
# the page that actually allows you to sell the car
@app.route('/sellcar.html', methods=['POST', 'GET'])
def sell():
if request.method == 'GET':
return redirect('index.html')
else:
# get the customer's info from the form
sold_car_id = int(request.form.get('soldcarid'))
first_name = request.form.get('firstname')
last_name = request.form.get('lastname')
phone_number = request.form.get('phone')
address = request.form.get('address')
address2 = request.form.get('address2')
city = request.form.get('city')
state = request.form.get('state')
cust_zip = request.form.get('cust_zip')
notes = request.form.get('notes')
# if the sold car doesn't already exist in the sold array, add it
if not any(d['options']['id'] == sold_car_id for d in sold_cars):
add_sold_car(sold_car_id, first_name, last_name, phone_number, address, address2, city, state, cust_zip,
notes, unsold_cars, sold_cars)
# then remove it from the available cars list
sell_car(unsold_cars, sold_car_id)
# return the page of all car styles for model and year
carmodel = request.form['updatemodel'] # only display selected model from dropdown
years_list = years_call(carmodel) # api call for available years for that model
car_year = request.form['updateyear'] # only display cars from selected year
if car_year == "2014" or car_year == "2015":
results = "Showing matching results for "
car_year = int(car_year) #convert dropdown year to int
if carmodel == "Fj Cruiser" or carmodel == "Rav4 Ev": #cruiser and rav4 only have 2014
car_year = 2014
car_styles = styles_call(carmodel, car_year) #do an api call for all different styles
# add cars from api call to cars array
for item in car_styles:
item['options']['id'] = int(item['options']['id'])
if not any(d['options']['id'] == item['options']['id'] for d in sold_cars):
if not any(d['options']['id'] == item['options']['id'] for d in unsold_cars):
unsold_cars.append(item)
# go to the page of sold cars
return render_template('sold.html', sold_cars_list = sold_cars)
# load the page of cars that have been sold to customers
@app.route('/sold.html', methods = ['POST', 'GET'])
def soldcars():
if (request.method == 'POST' and request.form.get('carid')):
#if (request.form['submit'] == "Update"):
car_id = int(request.form['carid'])
notes = request.form['notes']
print car_id
print notes
#update_notes(car_id, notes, sold_cars)
return render_template('sold.html', sold_cars_list = sold_cars)
else:
return render_template('sold.html', sold_cars_list = sold_cars)
@app.route('/addcar.html', methods = ['POST', 'GET'])
def add_car():
m_list = models_call() # api call for all toyota models
if (request.method == 'POST' and request.form.get('package')):
name = request.form.get('name')
year = request.form.get('year')
package = request.form.get('package')
transmission_type = request.form.get('transmission')
warranty = request.form.get('warranty')
if request.form.get('style') != "":
style = request.form.get('style')
else:
style = "N/A"
if request.form.get('submodel') != "":
submodel = request.form.get('submodel').title()
else:
submodel = "N/A"
if request.form.get('trim') != "":
trim = request.form.get('trim').title()
else:
trim = "N/A"
if request.form.get('horsepower') != "":
horsepower = request.form.get('horsepower')
else:
horsepower = "N/A"
if request.form.get('cylinders') != "":
cylinder = request.form.get('cylinders')
else:
cylinder = "N/A"
if request.form.get('fuel_type') != "":
fuel_type = request.form.get('fuel_type').title()
else:
fuel_type = "N/A"
if request.form.get('speeds') != "":
num_speeds = request.form.get('speeds')
else:
num_speeds = "N/A"
if request.form.get('mpg_hwy') != "":
mpg_highway = request.form.get('mpg_hwy')
else:
mpg_highway = "N/A"
if request.form.get('mpg_city') != "":
mpg_city = request.form.get('mpg_city')
else:
mpg_city = "N/A"
if request.form.get('price') != "":
price = "{:.2f}".format(decimal.Decimal(float(request.form.get('price'))))
else:
price = "N/A"
if request.form.get('vehicle_style') != "":
vehicle_style = request.form.get('vehicle_style')
else:
vehicle_style = "N/A"
if request.form.get('vehicle_size') != "":
vehicle_size = request.form.get('vehicle_size')
else:
vehicle_size = "N/A"
if request.form.getlist('option') != "":
options = request.form.getlist('option')
else:
options = "N/A"
option_dict = {
"name": name,
"year": year,
"id": random.randrange(300000000,399999999),
"style": style,
"submodel": submodel,
"trim": trim,
"horsepower": horsepower,
"cylinders": cylinder,
"fuel_type": fuel_type,
"transmission_type": transmission_type,
"number_of_speeds": num_speeds,
"mpg_highway": mpg_highway,
"mpg_city": mpg_city,
"package": package,
"price": price,
"vehicle_style": vehicle_style,
"vehicle_size": vehicle_size,
"warranty": warranty,
"options": options
}
car = {
"options": option_dict
}
if not any(d['options']['id'] == car['options']['id'] for d in unsold_cars):
unsold_cars.append(car)
years_list = years_call(name)
added = request.form['submit']
return render_template('index.html', models_list = m_list, years_list = years_list, added = added)
elif request.method == 'POST':
carmodel = request.form['model_list'] # only display selected model from dropdown
years_list = years_call(carmodel) # api call for available years for that model
car_year = request.form['year_list'] # only display cars from selected year
if car_year == "2014" or car_year == "2015":
car_year = int(car_year) #convert dropdown year to int
if carmodel == "Fj Cruiser" or carmodel == "Rav4 Ev": #cruiser and rav4 only have 2014
car_year = 2014
added = "Car has been successfully added."
# return just list of models and years
return render_template('addcar.html', models_list = m_list, years_list = years_list, carmodel = carmodel, car_year = car_year, added = added)
else:
m_list = models_call()
y_list = []
return render_template('addcar.html', models_list = m_list, years_list = y_list)
if __name__ == '__main__':
"""
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
"""
app.run(debug=True) |
from rest_framework.pagination import(
LimitOffsetPagination,
PageNumberPagination
)
class PostLimitOffsetPagination(LimitOffsetPagination):
max_limit = 10
default_limit = 10
class PagePageNumberPagination(PageNumberPagination):
page_size = 10 |
import os
from selenium import webdriver
import time
#输出目录
# OUTPUT_DIR = '/Users/xxxx/Documents/运动'
base_path = os.path.abspath(".")
OUTPUT_DIR = os.path.join(base_path, "pexels")
#关键字数组:将在输出目录内创建以以下关键字们命名的txt文件
SEARCH_KEY_WORDS = ["smoking", "smokers"]
#页数
PAGE_NUM = 100
repeateNum = 0
preLen = 0
def getSearchUrl(keyWord):
if(isEn(keyWord)):
return 'https://www.pexels.com/search/' + keyWord + "/"
else:
return 'https://www.pexels.com/search/' + keyWord + "/"
def isEn(keyWord):
return all(ord(c) < 128 for c in keyWord)
# 启动Firefox浏览器
driver = webdriver.Chrome(executable_path="C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe")
if os.path.exists(OUTPUT_DIR) == False:
os.makedirs(OUTPUT_DIR)
def output(SEARCH_KEY_WORD):
global repeateNum
global preLen
print('搜索' + SEARCH_KEY_WORD + '图片中,请稍后...')
# 如果此处为搜搜,搜索郁金香,此处可配置为:http://pic.sogou.com/pics?query=%D3%F4%BD%F0%CF%E3&di=2&_asf=pic.sogou.com&w=05009900&sut=9420&sst0=1523883106480
# 爬取页面地址,该处为google图片搜索url
url = getSearchUrl(SEARCH_KEY_WORD);
# 如果是搜搜,此处配置为:'//div[@id="imgid"]/ul/li/a/img'
# 目标元素的xpath,该处为google图片搜索结果内img标签所在路径
xpath = '//div[@id="rg"]/div/div/a/img'
xpath = "//div[@class='photos']//div//div/article/a[1]/img"
# 浏览器打开爬取页面
driver.get(url)
outputFile = OUTPUT_DIR + '\\' + SEARCH_KEY_WORD + '.txt'
outputSet = set()
# 模拟滚动窗口以浏览下载更多图片
pos = 0
m = 0 # 图片编号
i = 0
# while True:
for i in range(PAGE_NUM):
pos += i*600 # 每次下滚600
js = "document.documentElement.scrollTop=%d" % pos
driver.execute_script(js)
time.sleep(10)
i = i + 1
for element in driver.find_elements_by_xpath(xpath):
img_url = element.get_attribute('src')
if img_url is not None and img_url.startswith('http'):
outputSet.add(img_url)
# if preLen == len(outputSet):
# if repeateNum == 2:
# repeateNum = 0
# preLen = 0
# break
# else:
# repeateNum = repeateNum + 1
# else:
# repeateNum = 0
# preLen = len(outputSet)
# if driver.find_element_by_xpath("//*[@id='smb']"):
# try:
# driver.find_element_by_xpath("//*[@id='smb']").click()
# print("显示更多")
# except Exception:
# print("没有到加载页面")
# else:
# print("没有到数据加载")
print('写入' + SEARCH_KEY_WORD + '图片中,请稍后...')
file = open(outputFile, 'a')
for val in outputSet:
file.write(val + '\n')
file.close()
print(SEARCH_KEY_WORD+'图片搜索写入完毕')
print(len(outputSet))
for val in SEARCH_KEY_WORDS:
output(val)
driver.close() |
import numpy as np
import copy
from multiagent.core import World, Agent, Landmark,Nest
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 10
num_agents = 3
num_landmarks = 10
num_nest = 1
world.collaborative = True # whether agents share rewards
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.size = 0.04
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
print("world.landmarks:",world.landmarks)
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.04
#add nest
world.nests = [Nest() for i in range(num_nest)]
for i, nest in enumerate(world.nests):
nest.name = 'nest %d' % i
nest.collide = False
nest.movable = False
nest.size = 0.15
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
'''
# assign goals to agents
for agent in world.agents:
agent.goal_a = None
agent.goal_b = None
# want other agent to go to the goal landmark
world.agents[0].goal_a = world.agents[1]
world.agents[0].goal_b = np.random.choice(world.landmarks)
world.agents[1].goal_a = world.agents[0]
world.agents[1].goal_b = np.random.choice(world.landmarks)
'''
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.35, 0.85])
agent.foraging_capability = True
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
landmark.becaught = False
# random properties for nests
for i, nest in enumerate(world.nests):
nest.color = np.array([0.78,0.04,0.25])
'''
# special colors for goals
world.agents[0].goal_a.color = world.agents[0].goal_b.color
world.agents[1].goal_a.color = world.agents[1].goal_b.color
'''
# set random initial states
for i, nest in enumerate(world.nests):
#nest.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
nest.state.p_pos = [0,0]
nest.state.p_vel = np.zeros(world.dim_p)
for agent in world.agents:
#agent.state.p_pos = copy.deepcopy(nest.state.p_pos)
agent.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
#print("agent position",agent.state.p_pos)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1,+1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
input("here")
rew = 0
collisions = 0
gotten_targets = 0
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
collisions += 1
for l in world.landmarks:
if self.is_collision(l,agent):
if (agent.foraging_capability == True and l.becaught == False):
agent.foraging_capability = False
l.becaught = True
l.color = np.array([1,1,1])
rew += 1
break
for n in world.nests:
if self.is_collision(n,agent):
if agent.foraging_capability == False:
agent.foraging_capability = True
rew += 100
gotten_targets += 1
return (rew, collisions, gotten_targets)
def is_collision(self, agent1, agent2):
#print("agent1 position",agent1.state.p_pos)
#print("agent2 position",agent2.state.p_pos)
element_delta_pos = agent1.state.p_pos - agent2.state.p_pos
#print("delta_pos:", element_delta_pos)
dist = np.sqrt(np.sum(np.square( element_delta_pos)))
#print("dist:",dist)
dist_min = agent1.size + agent2.size
#print("dist_min:",dist_min)
return True if dist < dist_min else False
def reward(self, agent, world):
'''
if agent.goal_a is None or agent.goal_b is None:
return 0.0
dist2 = np.sum(np.square(agent.goal_a.state.p_pos - agent.goal_b.state.p_pos))
return -dist2
'''
# Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
rew = 0
'''
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
rew -= min(dists)
'''
# agents are penalized for exiting the screen, so that they can be caught by the adversaries
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min(np.exp(2 * x - 2), 10)
for p in range(world.dim_p):
x = abs(agent.state.p_pos[p])
rew -= bound(x)
if agent.foraging_capability:
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
rew -= min(dists)
if not (agent.foraging_capability):
for n in world.nests:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - n.state.p_pos))) for a in world.agents]
rew -= min(dists)
if agent.collide:
'''
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
'''
for l in world.landmarks:
#print( "collision2",self.is_collision(l,agent))
if self.is_collision(l,agent):
#print("collision l&a")
#print("agent",agent.name,"before agent.foraging_capability:",agent.foraging_capability)
# input()
if (agent.foraging_capability == True and l.becaught == False):
agent.foraging_capability = False
# print("agent",agent.name,"after agent.foraging_capability:",agent.foraging_capability)
#input()
#l.state.p_pos = agent.state.p_pos
l.becaught = True
l.color = np.array([1,1,1])
rew += 1
#input()
#print(l.name)
#world.landmarks.remove(l)
#print("now world.landmarks:",world.landmarks)
break
for n in world.nests:
#print( "collision3",self.is_collision(n,agent))
if self.is_collision(n,agent):
#print("collision n&a")
#input()
if agent.foraging_capability == False:
agent.foraging_capability = True
rew += 100
return rew
def observation(self, agent, world):
# goal color
goal_color = [np.zeros(world.dim_color), np.zeros(world.dim_color)]
'''
if agent.goal_b is not None:
goal_color[1] = agent.goal_b.color
'''
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# entity colors
entity_color = []
for entity in world.landmarks:
entity_color.append(entity.color)
# communication of all other agents
comm = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
return np.concatenate([agent.state.p_vel] + entity_pos + [goal_color[1]] + comm)
|
import abc
from earthquake.steps_converter import StepItem
class Engine(metaclass=abc.ABCMeta):
@abc.abstractmethod
def init_engine(self):
pass
@abc.abstractmethod
def park_engine(self):
pass
@abc.abstractmethod
def move(self, step_item: StepItem) -> None:
pass
|
# -*- coding:utf-8 -*-
# Author: Jorden Hai
class Foo(object):#来自于python
def __init__(self,name):
self.name = name
def func(self):
print("Hello Jorden")
def __init__(self,name,age):
self.name = name
self.age = age
obj = Foo("ALEX")
jh = type('jh',(object,),{'func':func,
'__init__':__init__})
|
# Write a Python program to count the number occurrence of a specific character in a string ?
import time
str_1 = str(input("Please enter the string :"))
a=input("Please enter the character you want to count the ouccurrence of :")
print("Calculating it's occurrence ... ")
time.sleep(1)
print("Occurrence :",str_1.count(a),"times")
|
some_string = "hello"
string_iterator = iter(some_string)
some_list = [1,2,3,4,5]
list_iterator = iter(some_list)
# We can call next(iterator) to get the next value
# in the container
# print(next(string_iterator))
# print(next(list_iterator))
def some_generator():
yield 1
yield 2
yield 3
# for value in some_generator():
# print(value)
# print(next(some_generator()))
def fibonacci():
first,second = 0,1
while True:
yield first
first,second=second,first+second
print(next(fibonacci()))
for value in fibonacci():
if value > 100:
break
print(value, " ")
list_comprehension_example = [n**2 for n in range(11)]
generator_expression_example = (n**2 for n in range(11))
print(list_comprehension_example) # [0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
print(generator_expression_example) # <generator object <genexpr> at 0x01413EA0>
print(next(generator_expression_example))
print(next(generator_expression_example))
print(next(generator_expression_example)) |
# 自然数Nをコマンドライン引数などの手段で受け取り,入力のファイルを行単位でN分割せよ
# 同様の処理をsplitコマンドで実現せよ
# split -l 9 hightemp.txt
# usr/bin/env python
#-*- coding:utf-8 -*-
import sys
def Judge_Remainder(divided,divisor):
remainder=divided%divisor
quotient=int(divided/divisor)
if remainder is 0:
return quotient
else:
return quotient+1
f=open('hightemp.txt')
N=int(sys.argv[1])
datas=[line for line in open('hightemp.txt')]
unity=Judge_Remainder(len(datas),N)
for i in range(unity):
with open(str(i+1)+'.txt','w') as fout:
fout.write("".join(datas[i*N:(i+1)*N]))
|
import pifightermatrix as Matrix
import datetime
import logging
import configparser
import queue
Mode = 0 # Initial Setting - not a real mode
KickButtMode = 1
WorkoutMode =2
UserName =""
# Create 2 queues for talking between the threads - one for TCP and one for UDP
UDPCommSendQueue = queue.Queue()
UDPCommRecQueue = queue.Queue()
TCPCommSendQueue = queue.Queue()
TCPCommRecQueue = queue.Queue()
def InitialiseSystem():
global Mode
global UserName
#Set up the mode, getting challengers name
#UserName = input("Challenger's name:")
#UserHealthPoints = 200
#print(UserName + " is not exactly fierce sounding - Can I call you THE Dragon?")
# Set up the LED Matrix
Matrix.Setup()
# Ask what mode to be used.
Mode = input("Desired Mode [1=Fight someone 2= Workout]")
if (int(Mode) == KickButtMode):
print("Kick some butt mode!")
elif (int(Mode)==WorkoutMode):
print("Workout as too wimpy to fight yet")
else:
print("Invalid Mode - must mean Kick Some Butt Mode")
Mode = KickButtMode
def SetUpLoggingAndConfig():
global SAMPLE_SLEEP
global DISPLAY_FREQ
global STD_PUNCH_WAIT
global CMD_FLASH_TIME
global BETWEEN_SEQ_REST
global SERVER_HOST
global TCP_PORT
global UDP_PORT
global config
# Setting up logging - add in time to it. Create a filename using time functions
Now = datetime.datetime.now()
LogFileName = 'log/pi-fighter_' + Now.strftime("%y%m%d%H%M") + ".log"
# Sets up the logging - no special settings.
logging.basicConfig(filename=LogFileName,level=logging.DEBUG)
# Setting up to read config file
config = configparser.RawConfigParser()
config.read('pi-fighter.cfg')
#Get the sampling rate to use
SAMPLE_SLEEP = config.getint('TIMING', 'SAMPLE_SLEEP')
# Get Display Sample - how often to update the display based on how many samples are taken before displaying
DISPLAY_FREQ = config.getint('TIMING', 'DISPLAY_FREQ')
# Get how long to typically wait between punches
STD_PUNCH_WAIT = config.getint('TIMING', 'STD_PUNCH_WAIT')
# Get how long to typically wait between punches
CMD_FLASH_TIME = config.getint('TIMING', 'CMD_FLASH_TIME')
BETWEEN_SEQ_REST = config.getint('TIMING', 'BETWEEN_SEQ_REST')
# Get Server Information
SERVER_HOST= config['SERVER']['SERVER_HOST']
UDP_PORT= int (config['SERVER']['UDP_PORT'])
TCP_PORT= int (config['SERVER']['TCP_PORT'])
# Getting everything ready
InitialiseSystem() # initial setup
SetUpLoggingAndConfig() # Gather config info and start logging.
|
from nltk import word_tokenize
import pandas as pd
'''
Module that splits sentences into one word per row and then tags them with b-sym and i-sym
Output file is sentence number, words on that sentence per row and the tag for each row - 3 columns
'''
def tokenize_sentences(frame):
'''
:param frame: Data frame that has the sentence_id and one sentence per row - 2 columns in total
:return: data frame that tags symptoms as b-sym if the word is beginning of a symptom and i-sym if it is the continuing word for a symtom
'''
words = []
i = 0
for j, row in frame.iterrows():
for word, temptag in zip(word_tokenize(row['Sentence']), word_tokenize(row['Token'])):
if temptag == 'BSYM':
tag = 'B-SYM'
elif temptag == 'ISYM':
tag = 'I-SYM'
else:
tag = 'O'
words.append((row['Sentence_ID'], word, tag))
tag_df = pd.DataFrame(words, columns=['Sentence_ID', 'Words', 'Tag'])
return tag_df
def remove_duplicate_sentence_ids(df):
'''
Takes in the data frame where the same sentence numbers are repeated multiple times and returns a df
that repeats sentence number only once for the entire row of words that it has
'''
is_duplicate = df['Sentence_ID'].duplicated()
df['Sentence_ID'] = df['Sentence_ID'].where(~is_duplicate, ' ')
tagged_data = df[['Sentence_ID', 'Words', 'Tag']]
return tagged_data |
import numpy,random
inversion = numpy.random.triangular(-130000,-100000,-80000)
rescate = numpy.random.triangular(16000,20000,26000)
infacion = numpy.random.triangular(15,20,25)
flujos = 0
anios= []
for x in range(0,5):
r = random.random()
if r < 0.2:
flujos+= 20000
anios.append(20000)
elif r >= 0.2 and r < 0.4:
flujos+= 30000
anios.append(30000)
elif r >= 0.4 and r < 0.6:
flujos+= 40000
anios.append(40000)
elif r >= 0.6 and r < 0.8:
flujos+= 50000
anios.append(50000)
else:
flujos+= 60000
anios.append(60000)
npv = numpy.npv(infacion,anios)
impuestos = npv*0.5
trema = npv*0.2
print(f"El VPN es ${npv} la inversion es -${inversion}")
print("Quintando impuestos y sumando el rescate:")
npv -= impuestos
npv -= trema
npv += rescate
print(npv)
if npv > inversion:
print("No es conveniente")
else:
print("Es conveniente") |
from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.dummy_operator import DummyOperator
from airflow.sensors.external_task_sensor import ExternalTaskSensor
from operators import (CreateEMRClusterOperator,ClusterCheckSensor,SubmitSparkJobToEmrOperator)
import boto3
from airflow import AirflowException
import logging
region_name="us-west-2"
emr_conn=None
try:
emr_conn = boto3.client('emr', region_name=region_name)
except Exception as e:
logging.info(emr_conn)
raise AirflowException("emr_connection fail!")
default_args = {
'owner': 'decapstone-immigration',
'start_date': datetime(2018,1,1),
'depends_on_past':False,
'retries':1,
'retry_delay':timedelta(minutes=5),
'email_on_retry':False,
'provide_context': True
}
#Initializing the Dag, create EMR cluster and then wait for the ETL dag to complete
dag = DAG('cluster_dag',
default_args=default_args,
concurrency=3,
schedule_interval=None,
description='Create EMR cluster, wait for ETL to complete immigration transformation. Terminate cluster',
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
create_cluster=CreateEMRClusterOperator(
task_id = "create_emr_cluster",
dag = dag,
region_name=region_name,
emr_connection=emr_conn,
cluster_name="immigration_cluster",
release_label='emr-5.9.0',
master_instance_type='m3.xlarge',
num_core_nodes=3,
core_node_instance_type='m3.2xlarge'
)
check_cluster = ClusterCheckSensor(
task_id="check_cluster_waiting",
dag=dag,
poke=60,
emr=emr_conn,
)
end_operator = DummyOperator(task_id='End_execution', dag=dag)
start_operator >> create_cluster >> check_cluster >> end_operator
|
#!/usr/bin/env python
import argparse
from createConfigFiles import *
@timeit
def condor_control(original_dir ="./SubmittedJobs/" , JECVersions_Data=["Autumn18_V4"], JetLabels=["AK4CHS"], systematics=["", "PU", "JEC", "JER"], internal_option="-l", processes=[], extratext=""):
count = 0
list_processes = []
nProcess = 48
time_ = 1
for newJECVersion in JECVersions_Data:
for newJetLabel in JetLabels:
for sys in systematics:
for dir in ["", "up", "down"]:
if sys == "" and dir != "":
continue
if sys == "JER" and dir != "":
continue
if sys == "JER" and dir == "":
dir = "nominal"
path = os.path.join(original_dir,newJECVersion,newJetLabel+extratext,sys,dir)
for sample in sorted(os.listdir(path)):
if not ".xml" in sample:
continue
if all(not control in sample for control in processes): continue
if internal_option:
command = ['sframe_batch.py', internal_option, os.path.join(path,sample)]
else:
command = ['sframe_batch.py', os.path.join(path,sample)]
command = [path]+command
list_processes.append(command)
if internal_option == "-f":
nProcess = 20
if internal_option == "":
time_ = 0.5
print len(list_processes)
parallelise(list_processes, nProcess, cwd=True, time_=time_)
@timeit
def delete_workdir(original_dir ="./SubmittedJobs/" , JECVersions_Data=["Autumn18_V4", "Autumn18_V4"], JetLabels=["AK4CHS", "AK8Puppi"], systematics=["", "PU", "JEC", "JER"],extratext=""):
add_name = original_dir[original_dir.find("SubmittedJobs")+len("SubmittedJobs"):-1]
for sample in ["DATA", "QCD"]:
for newJECVersion in JECVersions_Data:
for newJetLabel in JetLabels:
for sys in systematics:
for dir in ["", "up", "down"]:
if sys == "" and dir != "":
continue
if sys == "JER" and dir != "":
continue
if sys == "JER" and dir == "":
dir = "nominal"
path = userPathSframeOutput+"/"+newJECVersion+"/"+newJetLabel+extratext+"/"+sys+"/"+dir+"/"
if os.path.isdir(path):
for workdir in sorted(os.listdir(path)):
if "workdir" in workdir:
cmd = "rm -fr %s" % (path+workdir)
a = os.system(cmd)
print cmd
path = original_dir+newJECVersion+"/"+newJetLabel+extratext+"/"+sys+"/"+dir+"/"
if os.path.isdir(path):
for workdir in sorted(os.listdir(path)):
if "workdir" in workdir:
cmd = "rm -fr %s" % (path+workdir)
a = os.system(cmd)
def main_program(option="", internal_option="", study="Standard", processes=[], others=[], JECVersions_Data=[], JECVersions_MC=[], JetLabels=[], systematics=[], original_dir="./SubmittedJobs/", original_file="JER2018.xml", year="2018", isMB=False, test_trigger=False, isThreshold=False, isLowPt=False, isL1Seed=False, isECAL=False, extratext=""):
if option == "new":
createConfigFiles(study, processes, others, JECVersions_Data, JECVersions_MC, JetLabels, systematics, original_dir, original_file, outdir, year, isMB, test_trigger, isThreshold,isLowPt,isL1Seed,isECAL,extratext)
elif option == "remove" or option == "delete":
delete_workdir(original_dir, JECVersions_Data, JetLabels, systematics, extratext)
else:
condor_control(original_dir, JECVersions_Data, JetLabels, systematics, internal_option, processes, extratext)
##################################################
# #
# MAIN Program #
# #
##################################################
USER = os.environ["USER"]
try:
option = sys.argv[1]
except:
option = ""
if option == "resubmit":
internal_option = "-r"
elif option == "submit":
internal_option = "-s"
elif option == "add" or option == "merge":
internal_option = "-f"
elif option == "list":
internal_option = "-l"
elif option == "new":
internal_option = ""
elif option == "remove" or option == "delete":
internal_option = ""
elif option == "split":
internal_option = ""
else:
internal_option = ""
QCD_process= []
Data_process= []
# QCD_process.append("QCDHT50to100_2018")
# QCD_process.append("QCDHT100to200_2018")
# QCD_process.append("QCDHT200to300_2018")
# QCD_process.append("QCDHT300to500_2018")
# QCD_process.append("QCDHT500to700_2018")
# QCD_process.append("QCDHT700to1000_2018")
# QCD_process.append("QCDHT1000to1500_2018")
# QCD_process.append("QCDHT1500to2000_2018")
# QCD_process.append("QCDHT2000toInf_2018")
# Data_process.append("DATA_RunA_2018")
# Data_process.append("DATA_RunB_2018")
# Data_process.append("DATA_RunC_2018")
# Data_process.append("DATA_RunD_2018")
#
# QCD_process.append("QCDHT50to100_UL16preVFP")
# QCD_process.append("QCDHT100to200_UL16preVFP")
# QCD_process.append("QCDHT200to300_UL16preVFP")
# QCD_process.append("QCDHT300to500_UL16preVFP")
# QCD_process.append("QCDHT500to700_UL16preVFP")
# QCD_process.append("QCDHT700to1000_UL16preVFP")
# QCD_process.append("QCDHT1000to1500_UL16preVFP")
# QCD_process.append("QCDHT1500to2000_UL16preVFP")
# QCD_process.append("QCDHT2000toInf_UL16preVFP")
# QCD_process.append("QCDHT50to100_UL16postVFP")
# QCD_process.append("QCDHT100to200_UL16postVFP")
# QCD_process.append("QCDHT200to300_UL16postVFP")
# QCD_process.append("QCDHT300to500_UL16postVFP")
# QCD_process.append("QCDHT500to700_UL16postVFP")
# QCD_process.append("QCDHT700to1000_UL16postVFP")
# QCD_process.append("QCDHT1000to1500_UL16postVFP")
# QCD_process.append("QCDHT1500to2000_UL16postVFP")
# QCD_process.append("QCDHT2000toInf_UL16postVFP")
# Data_process.append("DATA_RunB_UL16preVFP")
# Data_process.append("DATA_RunC_UL16preVFP")
# Data_process.append("DATA_RunD_UL16preVFP")
# Data_process.append("DATA_RunE_UL16preVFP")
# Data_process.append("DATA_RunF_UL16preVFP")
# Data_process.append("DATA_RunF_UL16postVFP")
# Data_process.append("DATA_RunG_UL16postVFP")
# Data_process.append("DATA_RunH_UL16postVFP")
#
#
# QCD_process.append("QCDHT50to100_UL17")
# QCD_process.append("QCDHT100to200_UL17")
# QCD_process.append("QCDHT200to300_UL17")
# QCD_process.append("QCDHT300to500_UL17")
# QCD_process.append("QCDHT500to700_UL17")
# QCD_process.append("QCDHT700to1000_UL17")
# QCD_process.append("QCDHT1000to1500_UL17")
# QCD_process.append("QCDHT1500to2000_UL17")
# QCD_process.append("QCDHT2000toInf_UL17")
# QCD_process.append("QCDPt15to30_UL17")
# QCD_process.append("QCDPt30to50_UL17")
# QCD_process.append("QCDPt50to80_UL17")
# QCD_process.append("QCDPt80to120_UL17")
# QCD_process.append("QCDPt120to170_UL17")
# QCD_process.append("QCDPt170to300_UL17")
# QCD_process.append("QCDPt300to470_UL17")
# QCD_process.append("QCDPt470to600_UL17")
# QCD_process.append("QCDPt600to800_UL17")
# QCD_process.append("QCDPt800to1000_UL17")
# QCD_process.append("QCDPt1000to1400_UL17")
# QCD_process.append("QCDPt1400to1800_UL17")
# QCD_process.append("QCDPt1800to2400_UL17")
# QCD_process.append("QCDPt2400to3200_UL17")
# QCD_process.append("QCDPt3200toInf_UL17")
# Data_process.append("DATA_RunB_UL17")
# Data_process.append("DATA_RunC_UL17")
# Data_process.append("DATA_RunD_UL17")
# Data_process.append("DATA_RunE_UL17")
# Data_process.append("DATA_RunF_UL17")
#
#
#
# QCD_process.append("QCDHT50to100_UL18")
# QCD_process.append("QCDHT100to200_UL18")
# QCD_process.append("QCDHT200to300_UL18")
# QCD_process.append("QCDHT300to500_UL18")
# QCD_process.append("QCDHT500to700_UL18")
# QCD_process.append("QCDHT700to1000_UL18")
# QCD_process.append("QCDHT1000to1500_UL18")
# QCD_process.append("QCDHT1500to2000_UL18")
# QCD_process.append("QCDHT2000toInf_UL18")
# Data_process.append("DATA_RunA_UL18")
# Data_process.append("DATA_RunB_UL18")
# Data_process.append("DATA_RunC_UL18")
# Data_process.append("DATA_RunD_UL18")
QCD_process.append("QCD_Flat_2022")
QCD_process.append("QCDPt50to80_2022")
QCD_process.append("QCDPt80to120_2022")
QCD_process.append("QCDPt120to170_2022")
QCD_process.append("QCDPt170to300_2022")
QCD_process.append("QCDPt300to470_2022")
QCD_process.append("QCDPt470to600_2022")
QCD_process.append("QCDPt600to800_2022")
QCD_process.append("QCDPt800to1000_2022")
QCD_process.append("QCDPt1000to1400_2022")
QCD_process.append("QCDPt1400to1800_2022")
QCD_process.append("QCDPt1800to2400_2022")
QCD_process.append("QCDPt2400to3200_2022")
QCD_process.append("QCDPt3200toInf_2022")
Data_process.append("DATA_RunC_2022")
Data_process.append("DATA_RunD_2022")
# JECVersions_Data = ["Autumn18_V4"]
# JetLabels = ["AK4CHS", "AK8Puppi"]
# systematics = ["", "PU", "JEC", "JER"]
# year = "2018"
# year = "UL16preVFP"
# year = "UL16postVFP"
# year = "UL17"
# year = "UL18"
year = "2022"
studies = []
# studies.append("Standard")
studies.append("L1L2Residual")
# studies.append("L1L2")
# studies.append("eta_JER")
# studies.append("eta_L2R")
# studies.append("eta_narrow")
# studies.append("eta_simple")
print "Running for: ", studies
time.sleep(2)
outdir = "DiJetJERC_DiJetHLT"
original_file = outdir+".xml"
original_dir_ = os.getcwd()
# QCDSamples = ["QCDPt","QCDHT", "DATA"]
QCDSamples = ["QCD", "DATA"]
QCDSamples = ["QCD_Flat", "DATA"]
processes = filter( lambda sample: year in sample and any(QCD in sample for QCD in QCDSamples) , QCD_process+Data_process)
others = list(set(QCD_process+Data_process)-set(processes))
JECVersions_Data = {}
JECVersions_MC = {}
JECVersions_Data["2017"] = ["Fall17_17Nov2017_V32"]
JECVersions_MC["2017"] = ["Fall17_17Nov2017_V32"]
JECVersions_Data["2018"] = ["Autumn18_V19"]
JECVersions_MC["2018"] = ["Autumn18_V19"]
JECVersions_Data["UL16preVFP"] = ["Summer19UL16APV_V3"]
JECVersions_MC["UL16preVFP"] = ["Summer19UL16APV_V3"]
JECVersions_Data["UL16postVFP"] = ["Summer19UL16_V2"]
JECVersions_MC["UL16postVFP"] = ["Summer19UL16_V2"]
JECVersions_Data["UL17"] = ["Summer19UL17_V5"]
JECVersions_MC["UL17"] = ["Summer19UL17_V5"]
JECVersions_Data["UL18"] = ["Summer19UL18_V5"]
JECVersions_MC["UL18"] = ["Summer19UL18_V5"]
JECVersions_Data["2022"] = ["Winter22Run3_V1"]
JECVersions_MC["2022"] = ["Winter22Run3_V1"]
# JetLabels = ["AK4CHS","AK8Puppi", "AK4Puppi"]
JetLabels = ["AK4CHS"]
# JetLabels = ["AK4Puppi"]
# JetLabels = ["AK4CHS", "AK4Puppi"]
# JetLabels = ["AK8Puppi", "AK4Puppi"]
# systematics = [""]
systematics = ["", "PU", "JEC", "JER"]
# systematics = ["", "PU", "JEC"]
# systematics = ["PU", "JEC"]
# systematics = ["PU"]
# systematics = [""]
# systematics = ["JEC"]
# systematics = ["JER"]
for study in studies:
userPathSframeOutput="/nfs/dust/cms/user/"+USER+"/sframe_all/"+outdir+"/"+year+"/"+study+"/"
original_dir = original_dir_
original_dir += "/SubmittedJobs/"+year+"/"+study+"/"
main_program(option, internal_option, study, processes, others, JECVersions_Data[year], JECVersions_MC[year], JetLabels, systematics, original_dir, original_file, year)
|
from django.shortcuts import render, redirect,get_object_or_404
from django.views import generic
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from .models import Post
from .forms import CommentForm
from .forms import *
# Create your views here.
@login_required
def blogpost(request):
form=PostForm()
if request.method == "POST":
form=PostForm(request.POST)
if form.is_valid():
blog_post=form.save(commit=False)
author=request.user
#author=User.objects.get(user = author_name)
print(author)
blog_post.author= author
blog_post.save()
return redirect('home')
return render(request,'blogpost.html',{'form':form})
def logout_view(request):
logout(request)
return redirect('home')
def signup(request):
form =SignupForm()
if request.method == "POST":
form=SignupForm(request.POST)
if form.is_valid():
user=form.save()
user.set_password(user.password)
user.save()
return redirect('home')
return render(request,'registration/signup.html',{'form':form})
def post_detail(request, slug):
template_name = 'post_detail.html'
post = get_object_or_404(Post, slug=slug)
comments = post.comments.filter(active=True)
new_comment = None
# Comment posted
if request.method == 'POST':
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
# Create Comment object but don't save to database yet
new_comment = comment_form.save(commit=False)
# Assign the current post to the comment
new_comment.post = post
# Save the comment to the database
new_comment.save()
else:
comment_form = CommentForm()
return render(request, template_name, {'post': post,
'comments': comments,
'new_comment': new_comment,
'comment_form': comment_form})
class PostList(generic.ListView):
queryset = Post.objects.filter(status=1).order_by('-created_on')
template_name = 'index.html'
paginate_by = 3
def CategoryView(request,category):
post_list = Post.objects.filter(category=category)
return render (request,'index.html',{'post_list':post_list})
"""class PostDetail(generic.DetailView):
model = Post
template_name = 'post_detail.html'"""
def DeletePost(request,id):
if request.method == 'POST':
post = Post.objects.get(id = id)
post.delete()
return redirect('home')
return render(request,'delete.html')
|
from BlaseBallClient import *
from DBConnectors import *
def main():
bbc = BlaseBallClient(MongoDBConnector())
bbc.track_scores()
if __name__ == '__main__':
main() |
import numpy as np
import cv2
# HSV color thresholds for RED
THRESHOLD_LOW_R1 = (0, 170, 50)
THRESHOLD_HIGH_R1 = (4, 255, 255)
# HSV color thresholds for RED
THRESHOLD_LOW_R2 = (171, 170, 50)
THRESHOLD_HIGH_R2 = (178, 255, 255)
# HSV color threshold for GREEN
THRESHOLD_LOW_G = (45, 100, 50)
THRESHOLD_HIGH_G= (75, 255, 255)
# Minimum required radius of enclosing circle of contour
MIN_RADIUS = 10
# Initialize camera
cam = cv2.VideoCapture(0)
# Main loop
while True:
# Get image from camera
ret_val, img = cam.read()
# Erase image to remove noise
img_filter = cv2.GaussianBlur(img.copy(), (3, 3), 0)
# Convert image from BGR to HSV
img_filter = cv2.cvtColor(img_filter, cv2.COLOR_BGR2HSV)
# Set pixels to white if in color range (binary bitmap) for each color
img_binary_R1 = cv2.inRange(img_filter.copy(), THRESHOLD_LOW_R1, THRESHOLD_HIGH_R1)
img_binary_R2 = cv2.inRange(img_filter.copy(), THRESHOLD_LOW_R2, THRESHOLD_HIGH_R2)
img_binary_G = cv2.inRange(img_filter.copy(), THRESHOLD_LOW_G, THRESHOLD_HIGH_G)
# Gathers all binary bitmap
img_binary = img_binary_R2 + img_binary_G + img_binary_R1
# Find center of object using contours
img_contours = img_binary.copy()
contours = cv2.findContours(img_contours, cv2.RETR_EXTERNAL, \
cv2.CHAIN_APPROX_SIMPLE)[-2]
# Find the largest contour
center = None
radius = 0
if len(contours) > 0:
c = max(contours, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
if M["m00"] > 0:
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius < MIN_RADIUS:
center = None
# Print out the location and size (radius) of the largest detected contour
if center != None:
# Draw a green circle
cv2.circle(img, center, int(round(radius)), (0, 255, 0))
size = radius * 2
distance = 70 * 41 / size
print str(center) + " " + str(distance)
# Show image windows
cv2.imshow('webcam', img)
cv2.imshow('binary', img_binary)
cv2.imshow('contours', img_contours)
cv2.waitKey(1) |
import click
import knowlify
import config
@click.command()
@click.argument('filename_or_url', type=click.STRING, default='https://en.wikipedia.org/wiki/Mathematics')
@click.option('-p','path', type=click.STRING, default=None)
def main(filename_or_url, path):
page = knowlify.get_page(filename_or_url)
file_path = knowlify.output_page(page, path)
with knowlify.engine.MicroServerEngine(file_path=file_path) as f:
f.open_page()
while True:
pass
return None
if __name__ == '__main__':
main()
|
from transformers import ElectraTokenizer, ElectraForTokenClassification
from ner_pipeline import NerPipeline
from pprint import pprint
tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-small-finetuned-naver-ner")
model = ElectraForTokenClassification.from_pretrained("monologg/koelectra-small-finetuned-naver-ner")
ner = NerPipeline(model=model,
tokenizer=tokenizer,
ignore_labels=[],
ignore_special_tokens=True)
texts = [
"문재인 대통령은 28일 서울 코엑스에서 열린 ‘데뷰 (Deview) 2019’ 행사에 참석해 젊은 개발자들을 격려하면서 우리 정부의 인공지능 기본구상을 내놓았다. 출처 : 미디어오늘 (http://www.mediatoday.co.kr)",
"2017년 장점마을 문제가 본격적으로 이슈가 될 무렵 임 의원은 장점마을 민관협의회 위원들과 여러 차례 마을과 금강농산을 찾아갔다.",
"2009년 7월 FC서울을 떠나 잉글랜드 프리미어리그 볼턴 원더러스로 이적한 이청용은 크리스탈 팰리스와 독일 분데스리가2 VfL 보훔을 거쳐 지난 3월 K리그로 컴백했다. 행선지는 서울이 아닌 울산이었다"
]
pprint(ner(texts))
|
#FabianGonzalez.py
#I pledge my honor that I have abided by the Stevens Honor System. Fabian Gonzalez.
def main():
print("This program converts all lowercase text to all uppercase text.")
infileName= input("Which file would you like to convert into uppercase text?:")
outfileName= input("In Which file would you like to write the converted text in?:")
infile= open(infileName, "r")
outfile=open(outfileName, "w")
for i in infile:
print()
print(i.upper())
infile.close()
outfile.close()
print("The converted, uppercase text has been written to "+outfileName)
print()
main()
input('Press ENTER to exit') |
from modelWithMultiplierAndNumberOfMachines import *
mBreakdown2 = []
kBreakdown2 = []
for k in range(1,51):
m = 1000
while True:
# Run the simulation
# https://etherscan.io/chart/hashrate
# 28/09/2018 total hash rate of Ethereum is equal to 266 TH/s
targetTotalHashRate = 266e12 # H/s e.g. totat hash rate of Ethereum
numMiningPools = 10
averageMaxHashRatePerMiningPool = targetTotalHashRate / numMiningPools # H/s Sum of the hash rates of all units of all machines of all mining pools
technologicalMaximumHashRatePerUnit = 30e6 # H/s
energyConsumptionPerUnit = 140 # W
unitsPerMachine = 10
energyConsumptionPerMachine = energyConsumptionPerUnit * unitsPerMachine
maxHashRatePerMachine = technologicalMaximumHashRatePerUnit * unitsPerMachine # H/s each machine contains 'unitPerMachine' units with an hash rate of 'technologicalMaximumHashRatePerUnit'
averageNumMachinePerMiningPool = averageMaxHashRatePerMiningPool / maxHashRatePerMachine
initialReward = 3 # ETH
blockTime = 15 # s
initialCurrencyValueWrtFiat = 200 # Euro
steps = 10 # In the case of Ethereum each step is about 15 seconds, 172800 steps is about 1 month
np.random.seed(1) # If seed is not fixed there is noise in the plot, but the shape is the same
# superMiningPool parameters are changed in order to simulate different scenarios
# note that a lambda is used because in order to initialize an agent its model is required
superMiningPool = lambda model: MiningPool(0, k, m, model)
network = Network(superMiningPool, numMiningPools, averageNumMachinePerMiningPool, maxHashRatePerMachine, energyConsumptionPerMachine, initialReward, blockTime, initialCurrencyValueWrtFiat)
for i in range(steps):
network.step()
if network.totalHashRate == network.schedule.agents[0].hashRate:
break
if network.totalHashRate == network.schedule.agents[0].hashRate:
#print(str(k) + ' ' + str(m))
kBreakdown2.append(k)
mBreakdown2.append(m)
break
m += 100
mBreakdown3 = []
kBreakdown3 = []
for k in range(1,51):
m = 1000
while True:
# Run the simulation
# https://etherscan.io/chart/hashrate
# 28/09/2018 total hash rate of Ethereum is equal to 266 TH/s
targetTotalHashRate = 266e12 # H/s e.g. totat hash rate of Ethereum
numMiningPools = 10
averageMaxHashRatePerMiningPool = targetTotalHashRate / numMiningPools # H/s Sum of the hash rates of all units of all machines of all mining pools
technologicalMaximumHashRatePerUnit = 30e6 # H/s
energyConsumptionPerUnit = 140 # W
unitsPerMachine = 10
energyConsumptionPerMachine = energyConsumptionPerUnit * unitsPerMachine
maxHashRatePerMachine = technologicalMaximumHashRatePerUnit * unitsPerMachine # H/s each machine contains 'unitPerMachine' units with an hash rate of 'technologicalMaximumHashRatePerUnit'
averageNumMachinePerMiningPool = averageMaxHashRatePerMiningPool / maxHashRatePerMachine
initialReward = 3 # ETH
blockTime = 15 # s
initialCurrencyValueWrtFiat = 200 # Euro
steps = 10 # In the case of Ethereum each step is about 15 seconds, 172800 steps is about 1 month
np.random.seed(1) # TODO Investigate why plots with seed equal to 1 and 2 have different shape for low k
# superMiningPool parameters are changed in order to simulate different scenarios
# note that a lambda is used because in order to initialize an agent its model is required
superMiningPool = lambda model: MiningPool(0, k, m, model)
network = Network(superMiningPool, numMiningPools, averageNumMachinePerMiningPool, maxHashRatePerMachine, energyConsumptionPerMachine, initialReward, blockTime, initialCurrencyValueWrtFiat)
for i in range(steps):
network.step()
if network.totalHashRate == 0:
break
if network.totalHashRate == 0:
#print(str(k) + ' ' + str(m))
kBreakdown3.append(k)
mBreakdown3.append(m)
break
m += 1000
plt.title('Only super mining pool is active (1) and no mining pool is active (2) for m and k', y=1.08)
plt.xlabel('k')
plt.ylabel('m')
#plt.yscale('log')
plt.plot(kBreakdown2, mBreakdown2, label='1')
plt.plot(kBreakdown3, mBreakdown3, label='2')
plt.legend()
plt.savefig('plots/Only super mining pool is active (1) and no mining pool is active (2) for m and k', bbox_inches='tight')
plt.clf()
|
import requests
import pandas as pd
import time
# Note that the apikey parameter in the url string should be replaced with your own api key which can be obtained for free
# at https://www.alphavantage.co/support/
def get_exchange_rates(apikey, symbol="BTC"):
"""
Downloads daily historical time series for Bitcoin (BTC) traded in the USD market, refreshed daily at midnight (UTC).
params: apikey (str), symbol (str)
returns: dataframe
"""
url = 'https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_DAILY&symbol={}&market=USD&apikey={}'.format(symbol, apikey)
r = requests.get(url)
data = r.json()
df = pd.DataFrame.from_dict(data["Time Series (Digital Currency Daily)"], orient="index").sort_index(axis=1)
df = df.rename(columns={ '1a. open (USD)': 'Open (USD)', '2a. high (USD)': 'High (USD)', '3a. low (USD)': 'Low (USD)', '4a. close (USD)': 'Close (USD)', '5. volume': 'Volume', '6. market cap (USD)': 'Market Cap (USD)'})
df = df[['Open (USD)', 'High (USD)', 'Low (USD)', 'Close (USD)', 'Volume', 'Market Cap (USD)']]
df.to_csv("prices.csv")
return df
def get_SMA(apikey, symbol="BTC", time_period=50):
"""
Downloads the daily simple moving average (SMA) values for Bitcoin in USD. Since SMA is considered to react relatively
slow in price changes, we use the time period of 50 days. Additionally, since SMA is usually calculated using closing prices
we set the series type parameter to close.
params: apikey (str), symbol (str), time_period (positive int)
returns dataframe
"""
url = 'https://www.alphavantage.co/query?function=SMA&symbol={}USD&interval=daily&time_period={}&series_type=close&apikey={}'.format(symbol, time_period, apikey)
r = requests.get(url)
data = r.json()
df = pd.DataFrame.from_dict(data['Technical Analysis: SMA'], orient="index").sort_index(ascending=False)
df.to_csv("sma.csv")
return df
def get_EMA(apikey, symbol="BTC", time_period=20):
"""
Downloads the daily exponential moving average (EMA) values for Bitcoin in USD. Since SMA is considered to be a shorter indicator, we use the time period of 20 days.
Additionally, since SMA is usually calculated using closing prices we also use closing prices for the EMA series type parameter.
params: apikey (str), symbol (str), time_period (positive int)
returns dataframe
"""
url = 'https://www.alphavantage.co/query?function=EMA&symbol={}USD&interval=daily&time_period={}&series_type=close&apikey={}'.format(symbol, time_period, apikey)
r = requests.get(url)
data = r.json()
df = pd.DataFrame.from_dict(data['Technical Analysis: EMA'], orient="index").sort_index(ascending=False)
df.to_csv("ema.csv")
return df
def get_RSI(apikey, symbol="BTC", time_period=14):
"""
Downloads the daily relative strength index (RSI) values for Bitcoin. Popular value for time period of the
indicator is 14 which we set as the default value
params: apikey (str), symbol (str), time_period (positive int)
returns dataframe
"""
url = 'https://www.alphavantage.co/query?function=RSI&symbol={}USD&interval=daily&time_period={}&series_type=close&apikey={}'.format(symbol, time_period, apikey)
r = requests.get(url)
data = r.json()
df = pd.DataFrame.from_dict(data["Technical Analysis: RSI"], orient="index").sort_index(ascending=False)
df.to_csv("rsi.csv")
return df
def get_BBANDS(apikey, symbol="BTC", time_period=20, nbdevup=2, nbdevdn=2, matype=0):
"""
Downloads the daily Bollinger Bands values for Bitcoin. Here we use the standard Bollinger Band formula where we
set the centerline as a 20 day simple moving average (SMA) and use a 2x multiplier for the upper and lower bands.
Hence, time_period is 20, nbdevup and nbdevdn are both 2, and matype is 0 where 0 signifies SMA. Check alpha vantage
documentation for more information.
params: apikey (str), symbol (str), time_period (positive int), nbdevup(positive int)
nbdevdn (postive int), matype (int [0,8])
returns: df
"""
url = 'https://www.alphavantage.co/query?function=BBANDS&symbol={}USD&interval=daily&time_period=20&series_type=close&nbdevup={}&nbdevdn={}&matype={}&apikey={}'.format(symbol, time_period, nbdevup, nbdevdn, matype, apikey)
r = requests.get(url)
data = r.json()
df = pd.DataFrame.from_dict(data["Technical Analysis: BBANDS"], orient="index").sort_index(axis=1)
df.to_csv("bbands.csv")
return df
def get_MACD(apikey, symbol="BTC", fastperiod=12, slowperiod=26, signalperiod=9):
"""
Downloads the moving average convergence / divergence (MACD) values. The MACD represents a trend
following indicator that highlights the short-term price momentum and whether it follows the direction
of the long-term price momentum or if a trend is near. The indicator uses the difference between
a slow period EMA and fast period EMA which is popularly set to 12 and 26, respectively. Likewise,
there is a signal line which is generally defined by a 9 period EMA.
params: apikey (str), symbol (str), fastperiod (positive int), slowperiod (positive int), signalperiod (positive int)
returns: dataframe
"""
url = 'https://www.alphavantage.co/query?function=MACD&symbol={}USD&interval=daily&series_type=close&fastperiod={}&slowperiod{}&signalperiod={}&apikey={}'.format(symbol, fastperiod, slowperiod, signalperiod, apikey)
r = requests.get(url)
data = r.json()
df = pd.DataFrame.from_dict(data["Technical Analysis: MACD"], orient="index").sort_index(axis=1)
df.to_csv("macd.csv")
return df
def get_STOCH(apikey, symbol="BTC", fastkperiod=14, slowkperiod=3, slowdperiod=3, slowkmatype=0, slowdmatype=0):
"""
Downloads the daily stochastic oscillator (STOCH) values. The indicator shows momentum by comparing the
closing price with a range of its prices over a certain period of time. Generally uses simple moving average
hence the default values of slowkmatype and slowdmatype. Additional parameters are the fastkperiod,
slowkperiod, and slowdperiod which are commonly set to 14 for the fast parameter and 3 for the slow parameters.
params: apikey (str), symbol (str), fastperiod (positive int), slowkperiod (positive int), slowdperiod (positive int),
slowkmatype (int [0,8]) slowdmatype (int [0,8])
returns: dataframe
"""
url = 'https://www.alphavantage.co/query?function=STOCH&symbol={}USD&interval=daily&fastkperiod={}&slowkperiod={}&slowdperiod={}&slowkmatype={}&slowdmatype={}&apikey={}'.format(symbol, fastkperiod, slowkperiod, slowdperiod, slowkmatype, slowdmatype, apikey)
r = requests.get(url)
data = r.json()
df = pd.DataFrame.from_dict(data["Technical Analysis: STOCH"], orient="index").sort_index(axis=1)
df.to_csv("stoch.csv")
return df
def get_data(apikey):
"""
Calls the get_ functions to retrieve the necessary data. Since we are using the free api which is limited
to 5 calls/minute we need to implement a timer to split the api calls so that we don't go over the api call
limit. Then we merge the data into a single dataframe using outer union logic which we write to the current
directory as csv file
params: apikey (str)
returns: dataframe
"""
exchange_rates = get_exchange_rates(apikey)
sma = get_SMA(apikey)
ema = get_EMA(apikey)
rsi = get_RSI(apikey)
bbands = get_BBANDS(apikey)
time.sleep(60) # Wait a minute before using the API again
macd = get_MACD(apikey)
stoch = get_STOCH(apikey)
datasets = [exchange_rates, sma, ema, rsi, bbands, macd, stoch]
data = pd.concat(datasets, axis=1)
data = data[::-1]
data = data.dropna(axis=0)
data.to_csv("data.csv")
return data
if __name__ == "__main__":
apikey = "S8YIUGVLMYAG3S4E"
data = get_data(apikey)
|
import PySimpleGUI as sg
from datetime import datetime
import pandas as pd
df = pd.read_csv(r"C:\Users\avivy\PycharmProjects\pythonProject\pysimplegui\test_for_main.csv", index_col=False)
first_col = df.iloc[:, 0].values
second_col = df.iloc[:, 1].values
third_col = df.iloc[:, 2].values
# print(df.loc[df['name'] == 'aviv'])
# print(df.loc[df['name'].str.contains('ha')])
# print(df['ID'].value_counts())
# print(df['ID'].value_counts())
buttons_names = [first_col[0], first_col[1], first_col[2]]
def make_win2():
sg.ChangeLookAndFeel('DarkGreen4')
layout = [
[sg.Submit('Submit', font='consolas 10'), sg.Button('Exit', font='consolas 10')],
[sg.Button('All checked', font='consolas 10', enable_events=True, key='Check_All'),
sg.Button('All unchecked', font='consolas 10', enable_events=True, key='Uncheck_All')],
[sg.HorizontalSeparator()]] + [
[sg.Checkbox(f'{first_col[i]}', enable_events=True, font='consolas 10', key=f'{first_col[i]}')
for i in range(len(first_col))],
[sg.T(first_col[0]), sg.InputOptionMenu(first_col)],
[sg.T(df.sort_values('AWS'), font='consolas 10')],
[sg.InputOptionMenu(first_col)]]
window = sg.Window('Checklist01', layout, finalize=True)
while True:
event, values = window.read(timeout=100)
if event == sg.WINDOW_CLOSED or event == 'Exit':
break
elif event == 'Submit':
dateTimeObj = datetime.now()
f = open('lolo.py', 'a+')
f.write(str('Checklist01 = [' + '\n'))
f.write("'" + str(dateTimeObj) + "'" ',\n')
f.write(str(values) + ']\n')
sg.popup('You have successfully submitted')
f.close()
if event == 'Check_All':
for i in range(len(first_col)):
window[f'{first_col[i]}'].update(True)
elif event == 'Uncheck_All':
for i in range(len(first_col)):
window[f'{first_col[i]}'].update(False)
window.close()
def main():
make_win2()
while True: # Event Loop
window, event, values = sg.read_all_windows()
if window == sg.WIN_CLOSED: # if all windows were closed
break
if event == sg.WIN_CLOSED or event == 'Exit':
window.close()
else:
window['-OUTPUT-'].update('Other window is closed')
main()
|
class Solution:
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
https://leetcode.com/problems/3sum-closest/discuss/7871/Python-O(N2)-solution
https://leetcode.com/problems/3sum-closest/discuss/7873/A-n2-Solution-Can-we-do-better?page=1
"""
answer = nums[0] + nums[1] + nums[2]
sortlist = sorted(nums)
for i in range(len(sortlist)):
left, right = i+1, len(sortlist)-1
while left < right:
sum = sortlist[i] + sortlist[left] + sortlist[right]
if sum == target:
return sum
if abs(sum - target) < abs(answer - target):
answer = sum
if sum < target:
left += 1
else:
right -= 1
return answer
|
# 进程-线程-协程
# 举例 迅雷下载电影,首先将电影区分很多小块,再去下载
# 这里迅雷APP下载电影就是一个进程
# 很多小块组成一个线程
# 每个线程包含很多小块,这样每个小块又可以称为一个协程
from collections.abc import Iterable
def task1(n):
for i in range(n):
print("正在搬{}块砖".format(i))
yield i
def task2(n):
for i in range(n):
print("正在听{}首歌".format(i))
yield None
g1 = task1(5)
print(g1)
g2 = task2(6)
while True:
try:
g1.__next__()
print(g1.__next__())
g2.__next__()
except:
break
# 可迭代对象 1、生成器 2、元祖、列表、集合、字典、字符串 3、整数不可迭代
# 如何判断一个对象是否是可迭代的
list1 = [1, 2, 4, 5, 6, 7]
f = isinstance(list1, Iterable)
print(f) # 返回结果为True 为可迭代的对象
''''
迭代器是访问集合元素的一种方式,迭代器是一个可以记住遍历位置的对象
迭代器对象从估计和的第一个元素开始访问,知道所有的元素访问完为止
迭代器是不会后退的
可以被next()函数调用,并不段返回下一个值的对象成为迭代器
可迭代的是不是肯定就是迭代器? 错误
例如:列表是可以迭代的
但是列表不是迭代器,使用next函数调用时会报错
生成器是迭代器,可以使用next函数调用
'''
# print(next(list1)) # 运行时报错,说明列表不是迭代器
list2 = [2, 3, 4, 56, 2] # list2 此时还不是迭代器
# 将列表2 转化成迭代器
list2 = iter(list2) # iter 函数是可以将可迭代的对象转化为迭代器
# list2 已经转化为迭代器了,此时可以使用next 函数调用list2
print(next(list2))
'''
生成器 与 迭代器
生成器是迭代器的一部分,但是迭代器不仅仅是生成器
因为可迭代元素可以转化为迭代器 ,通过iter 函数转换
''' |
import connexion
import six
import os
import googleapiclient.discovery
import json
from swagger_server.models.vm import VM # noqa: E501
from swagger_server import util
creds = os.environ['HOME'] + '/.cloudmesh/configuration_gce_419.json'
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = creds
with open(creds) as json_data:
d = json.load(json_data)
project = d['project_id']
def vms_get(): # noqa: E501
"""vms_get
Returns a list of VMs # noqa: E501
:rtype: List[VM]
"""
vms = []
compute = googleapiclient.discovery.build('compute', 'v1')
zones = compute.zones().list(project=project).execute()
results=[]
for zone in zones['items']:
instances = compute.instances().list(project=project, zone=zone['name']).execute()
if 'items' in instances.keys():
results = results + instances['items']
for result in results:
vm = VM(id=result['id'],
creation_timestamp=result['creationTimestamp'],
name=result['name'],
description=result['description'],
machine_type=result['machineType'],
status=result['status'],
zone=result['zone'],
can_ip_forward=result['canIpForward'])
vms.append(vm)
return vms
def vms_id_get(id): # noqa: E501
"""vms_id_get
Returns information on a VM instance # noqa: E501
:param id: ID of VM to fetch
:type id: str
:rtype: VM
"""
vms = vms_get()
vm = vms['id'==id]
return vm
|
from django.apps import AppConfig
class ValdesangelConfig(AppConfig):
name = 'valdesangel'
|
# File to perform segmentation of given gray scaled image into different regions based on shade
# and maintain count per shade
import numpy as np
import cv2
# extended display showing count of different areas in gray scale image per index for readability
def extended_display(arr):
index = 0
print("Shade --> Count" )
print("---------------")
for element in arr:
print(f'{index} --> {int(element)}\n')
index += 1
# to display count of different areas in gray scale image
def display(arr):
for element in arr:
print(int(element))
# function to retrieve (threshold value , max value, threshold type) per gray shade retrieved to extract
# binary image from grayscale through threshold process
def get_threshold_maxVal_perGrayShade(shade):
if shade == 0: #black
return 128,255, cv2.THRESH_BINARY_INV
elif shade == 255: #white
return 225,255, cv2.THRESH_BINARY
else:
return shade+10, 255, cv2.THRESH_TOZERO_INV
# function to count areas per shade retrieved
def count_areas_per_shade(levels, isBin, image, gray):
np_count = np.zeros ( [256 , 1] )
for shade in levels:
threshold, maxVal, thres_type = get_threshold_maxVal_perGrayShade(shade)
if isBin == 0:
canvas_str = np.zeros(image.shape, np.uint8)
ret,thresh = cv2.threshold(gray,threshold,maxVal,thres_type)
erode = cv2.erode ( thresh , None , iterations=3 )
contours_str = "contours" + str(shade)
contours,hierarchy = cv2.findContours(erode, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
count = len(contours)
# print(f'{contours_str}: {count}')
np_count[shade] = count
if isBin ==0:
for cont in contours:
cv2.drawContours ( canvas_str , cont , -1 , (0 , 255 , 0) , 3 )
# *** uncomment this code to view different contours per gray scale ***
#cv2.imshow ( contours_str , canvas_str )
#cv2.waitKey ( 0 )
return np_count
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 10 15:18:25 2020
@author: Aryaman
"""
stan=float(input("Enter the step angle in degrees"))
dist= float(input("Enter distance to be covered in cm"))
dia=float(input("Enter wheel diameter in cm"))
circum= dia*3.14
distep=stan/360*circum
totste=dist/distep
print ("The number of steps to cover " + str(dist) + (" cm is " + str(totste) + (" Steps")))
|
"""py-motmetrics - metrics for multiple object tracker (MOT) benchmarking.
Christoph Heindl, 2017
https://github.com/cheind/py-motmetrics
"""
import argparse
import glob
import pdb
import os
import logging
import motmetrics as mm
import pandas as pd
import datetime
import numpy as np
from collections import OrderedDict
from pathlib import Path
from HATracking.visualize import show_tracks
OUTPUT_FORMAT_STRING = "MOT_summary_{}_{}.txt"
OUTPUT_FOLDER = "data/ADL/py_mot_metric_scores"
FRAMES_PER_CHUNK = 10000
def format_output(input_filename):
"""
Format in the style needed for latex
"""
with open(input_filename, "w") as infile:
for line in infile:
print(line.split())
def parse_args():
parser = argparse.ArgumentParser(description="""
Compute metrics for trackers using MOTChallenge ground-truth data.
Files
-----
All file content, ground truth and test files, have to comply with the
format described in
Milan, Anton, et al.
"Mot16: A benchmark for multi-object tracking."
arXiv preprint arXiv:1603.00831 (2016).
https://motchallenge.net/
Structure
---------
Layout for ground truth data
<GT_ROOT>/<SEQUENCE_1>/gt/gt.txt
<GT_ROOT>/<SEQUENCE_2>/gt/gt.txt
...
Layout for test data
<TEST_ROOT>/<SEQUENCE_1>.txt
<TEST_ROOT>/<SEQUENCE_2>.txt
...
Sequences of ground truth and test will be matched according to the `<SEQUENCE_X>`
string.""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('groundtruths', type=str,
help='Directory containing ground truth files.')
parser.add_argument('tests', type=str,
help='Directory containing tracker result files')
parser.add_argument('--output-folder', type=str,
help='Where to write the score file, defaults to one level up from the preds',
default=None)
parser.add_argument('--loglevel', type=str, help='Log level',
default='info')
parser.add_argument('--fmt', type=str, help='Data format',
default='mot15-2D')
parser.add_argument('--solver', type=str, help='LAP solver to use')
parser.add_argument('--frames-per-chunk', default=FRAMES_PER_CHUNK,
type=int, help='The number of frames per chunk')
parser.add_argument('--vis', type=str, default=None,
help="visualize tracks. Options are 'gt', 'pred', 'both'. No input will result in no visualization")
parser.add_argument('--video-folder', type=str,
help='The folder containing the ADL videos')
return parser.parse_args()
def compare_dataframes(gts, ts):
accs = []
names = []
for k, tsacc in ts.items():
if k in gts:
logging.info('Comparing {}...'.format(k))
accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))
names.append(k)
else:
logging.warning('No ground truth for {}, skipping.'.format(k))
return accs, names
def ADL_scorer(args):
gtfiles = sorted(glob.glob(os.path.join(args.groundtruths, '*')))
tsfiles = sorted([f for f in sorted(glob.glob(os.path.join(args.tests, '*'))) if not os.path.basename(f).startswith('eval')])
logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
logging.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
logging.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
logging.info('Loading files.')
print("GT files: {}\n TS files: {} ".format(gtfiles, tsfiles))
mm.io.loadtxt(tsfiles[0], args.fmt)
gt = OrderedDict([(Path(f).parts[-1][-8:-4], mm.io.loadtxt(f, fmt=args.fmt, min_confidence=1)) for f in gtfiles])
ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt=args.fmt)) for f in tsfiles])
print("GT keys: {}\n TS keys: {}".format(gt.keys(),ts.keys()))
mh = mm.metrics.create()
accs, names = compare_dataframes(gt, ts)
logging.info('Running metrics')
summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
logging.info('Completed')
exit()
def chunk_df(df_dict, chunk_size=10000, verbose=False):
"""
Break the video up into chunk_size
df_dict : OrderedDict[(string, pd.df)]
The data
chunk_size : int
How many frames per chunk
"""
output_dict = OrderedDict()
for key, df in df_dict.items():
index = df.index.to_frame(index=False)
frame_ids = index['FrameId']
max_frame = max(frame_ids)
for i in range(0, max_frame, chunk_size):
inds = frame_ids.between(i, i + chunk_size - 1, inclusive=True)
# TODO determine why values is required, seems kinda dumb
new_chunk = df.loc[inds.values, :]
new_key = "{}_{}".format(key, i)
output_dict[new_key] = new_chunk
if verbose:
print("new chunk : {}".format(new_chunk))
return output_dict
if __name__ == '__main__':
args = parse_args()
if args.output_folder is None: # no output folder was specified
# Take all but the last folder
args.output_folder = os.path.join(*os.path.split(args.tests)[:-1])
# set up logging
loglevel = getattr(logging, args.loglevel.upper(), None)
if not isinstance(loglevel, int):
raise ValueError('Invalid log level: {} '.format(args.loglevel))
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s - %(message)s', datefmt='%I:%M:%S')
if args.solver:
mm.lap.default_solver = args.solver
# TODO look into that movable stuff
# sort them just for prettier output
gtfiles = sorted(glob.glob(os.path.join(args.groundtruths, '*/gt/gt.txt')))
tsfiles = sorted([f for f in glob.glob(os.path.join(args.tests, '*.txt'))
if not os.path.basename(f).startswith('eval')])
logging.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
logging.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
logging.info('Default LAP solver \'{}\''.format(mm.lap.default_solver))
logging.info('Loading files.')
print("GT files: {}\n TS files: {} ".format(gtfiles, tsfiles))
mm.io.loadtxt(tsfiles[0], fmt=args.fmt)
gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt=args.fmt,
min_confidence=1)) for f in gtfiles[:1]])
ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0],
mm.io.loadtxt(f, fmt=args.fmt)) for f in tsfiles[:1]])
if args.vis is not None:
# loop over the shared keys
gt_keys = gt.keys()
ts_keys = ts.keys()
shared_keys = list(gt_keys | ts_keys)
for key in shared_keys:
video_file = "{}.mp4".format(key)
video_file = os.path.join(args.video_folder, video_file)
# TODO clean this up so it's more readable
output_folder = "vis_{}.avi".format(args.vis)
output_folder = os.path.join(args.output_folder, "vis")
os.makedirs(output_folder, exist_ok=True)
output_file = "{}_{}.avi".format(key, args.vis)
output_file = os.path.join(output_folder, output_file)
current_ts = ts[key]
current_gt = gt[key]
print("Going to write visualizations to {}".format(output_file))
if args.vis == "both":
show_tracks(video_file, output_file, current_ts, current_gt)
elif args.vis == "gt":
show_tracks(video_file, output_file, current_gt)
elif args.vis == "pred":
show_tracks(video_file, output_file, current_ts)
else:
raise ValueError("The vis option {} was not included".format(args.vis))
new_ts = chunk_df(ts)
new_gt = chunk_df(gt)
NUM_ROWS = 600000
#ts = OrderedDict([(k, v.iloc[:NUM_ROWS, :]) for k, v in ts.items()])
print("GT keys: {}\n TS keys: {}".format(gt.keys(), ts.keys()))
print("new GT keys: {}\n new TS keys: {}".format(new_gt.keys(), new_ts.keys()))
# compute the metrics
mh = mm.metrics.create()
accs, names = compare_dataframes(new_gt, new_ts)
logging.info('Running metrics')
print(mm.metrics)
summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)
rendered_summary = mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names)
print(rendered_summary)
print(args)
test_folder = os.path.split(args.tests)[-1]
score_file = OUTPUT_FORMAT_STRING.format(test_folder,
str(datetime.datetime.now()).replace(" ", ""))
output_file = os.path.join(args.output_folder, score_file)
with open(output_file, "w") as outfile:
outfile.write(rendered_summary)
outfile.write("\n")
outfile.write(str(args))
logging.info('Completed')
pdb.set_trace()
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" OpenViszla platform definitions.
This is a non-core platform. To use it, you'll need to set your LUNA_PLATFORM variable:
> export LUNA_PLATFORM="luna.gateware.platform.openvizsla:OpenVizslaPlatform
"""
from amaranth import *
from amaranth.build import *
from amaranth.vendor.xilinx_spartan_3_6 import XilinxSpartan6Platform
from amaranth_boards.resources import *
from .core import LUNAPlatform
__all__ = ["OpenVizslaPlatform"]
class OpenVizslaClockDomainGenerator(Elaboratable):
""" OpenVizsla clock domain generator.
Assumes the ULPI PHY will be providing a USB clock.
"""
def __init__(self, *, clock_frequencies=None, clock_signal_name=None):
pass
def elaborate(self, platform):
m = Module()
# Create our domains; but don't do anything else for them, for now.
m.domains.sync = ClockDomain()
m.domains.usb = ClockDomain()
m.domains.fast = ClockDomain()
m.d.comb += [
ClockSignal("sync") .eq(ClockSignal("usb")),
ClockSignal("fast") .eq(ClockSignal("usb"))
]
return m
class OpenVizslaPlatform(XilinxSpartan6Platform, LUNAPlatform):
""" Board description for OpenVizsla USB analyzer. """
name = "OpenVizsla"
device = "xc6slx9"
package = "tqg144"
speed = "3"
default_clk = "clk_12MHz"
clock_domain_generator = OpenVizslaClockDomainGenerator
default_usb_connection = "target_phy"
#
# I/O resources.
#
resources = [
# Clocks.
Resource("clk_12MHz", 0, Pins("P50", dir="i"), Clock(12e6), Attrs(IOSTANDARD="LVCMOS33")),
# Buttons / LEDs.
*ButtonResources(pins="P67", attrs=Attrs(IOSTANDARD="LVCMOS33")),
*LEDResources(pins="P57 P58 P59", attrs=Attrs(IOSTANDARD="LVCMOS33")),
# Core ULPI PHY.
ULPIResource("target_phy", 0,
data="P120 P119 P118 P117 P116 P115 P114 P112", clk="P123",
dir="P124", nxt="P121", stp="P126", rst="P127", rst_invert=True,
attrs=Attrs(IOSTANDARD="LVCMOS33")
),
# FTDI FIFO connection.
Resource("ftdi", 0,
Subsignal("clk", Pins("P51")),
Subsignal("d", Pins("P65 P62 P61 P46 P45 P44 P43 P48")),
Subsignal("rxf_n", Pins("P55")),
Subsignal("txe_n", Pins("P70")),
Subsignal("rd_n", Pins("P41")),
Subsignal("wr_n", Pins("P40")),
Subsignal("siwua_n", Pins("P66")),
Subsignal("oe_n", Pins("P38")),
Attrs(IOSTANDARD="LVCMOS33", SLEW="FAST")
),
# Trigger in/out pins.
Resource("trigger_in", 0, Pins("P75"), Attrs(IOSTANDARD="LVCMOS33")),
Resource("trigger_out", 0, Pins("P74"), Attrs(IOSTANDARD="LVCMOS33")),
]
connectors = [
Connector("spare", 0,
"- - P102 P101 P100 P99 P98 P97 P95 P94 P93 P92" # continued
"P88 P87 P85 P84 P83 P82 P81 P80 P79 P78 P75 P74"
)
]
def toolchain_program(self, products, name):
""" Programs the OpenVizsla's FPGA. """
try:
from openvizsla import OVDevice
from openvizsla.libov import HW_Init
except ImportError:
raise ImportError("pyopenvizsla is required to program OpenVizsla boards")
# Connect to our OpenVizsla...
device = OVDevice()
failed = device.ftdi.open()
if failed:
raise IOError("Could not connect to OpenVizsla!")
# ... and pass it our bitstream.
try:
with products.extract(f"{name}.bit") as bitstream_file:
HW_Init(device.ftdi, bitstream_file.encode('ascii'))
finally:
device.ftdi.close()
|
from rendering import Geom2d, Transform
import numpy as np
from utils import dist
class Device(Geom2d):
def __init__(self, env, parent, kp=np.array([[-1, 0], [1, 0]]), color=(1,0,0,0.5), geom_type=None, filled=True):
self.env = env
self.kp = kp
self.geom = None
self.color = color
self.parent = parent
self.parent.devices.append(self)
super().__init__(env=self.env, kp=self.kp, color=self.color, parent=parent, filled=filled)
self.geom = super()._render()
self.geom.add_attr(self.env.move_to_center)
def _render(self):
return self.geom
class BlobFinder(Device):
def __init__(self, env, parent, radius, color=(1,0,0,0.5), geom_type=None, filled=True):
self.radius = radius
kp=np.array([[-radius, 0], [radius, 0]])
Device.__init__(self, env, parent, kp=kp, color=color, filled=filled)
def read(self):
blob = []
for a in self.env.agents:
if a != self.parent and dist(self.parent, a) < self.radius:
blob.append({'pos2d':a.loc() - self.parent.loc(), 'color':a.color, 'dist': dist(self.parent, a) - self.parent.sz/2 - a.sz/2})
return blob
|
# encoding=utf8
def depuratorQRY():
# dato = 'prueba'
importedQRY = '''INSERT INTO `contento-bi.MetLife.descargable-plantilla-ventas-inspector` (
CAMPANA_PLANTILLA,
TABLA_COD_PLAN,
TABLA_COD_SUBPLAN,
TABLA_CODIGO_PRODUCTO,
CANAL_PLANTILLA,
TIPO_DOCUMENTO,
DOCUMENTO,
NOMBRE,
APELLIDO_1,
APELLIDO_2,
GENERO,
FECHA_NACIMIENTO,
CODIGO_PROFESION,
OCUPACION,
COD_CIUDAD_RESIDENCIA,
DEPARTAMENTO,
DIRECCION,
TEL_RESIDENCIA,
INDCORE_PLANTILLA,
BENF1NOM,
BENF1APE,
BENF1APE2,
BENF1PARENTESCO,
BENF1PORCENTAJE,
BENF2NOM,
BENF2APE,
BENF2APE2,
BENF2PARENTESCO,
BENF2PORCENTAJE,
BENF3NOM,
BENF3APE,
BENF3APE2,
BENF3PARENTESCO,
BENF3PORCENTAJE,
BENF4NOM,
BENF4APE,
BENF4APE2,
BENF4PARENTESCO,
BENF4PORCENTAJE,
BENF5NOM,
BENF5APE,
BENF5APE2,
BENF5PARENTESCO,
BENF5PORCENTAJE,
BENF6NOM,
BENF6_APEL1,
BENF6_APEL2,
BENF6_PARENTESCO,
BENF6_PORCENTAJE,
BENF7NOM,
BENF7_APELL1,
BENF7_APELL2,
BENF7_PARENTESCO,
BENF7_PORCENTAJE,
BENF8NOM,
BENF8_APELL1,
BENF8_APELL2,
BENF8_PARENTESCO,
BENF8_PORCENTAJE,
BENF9NOM,
BENF9_APELL1,
BENF9_APELL2,
BENF9_PARENTESCO,
BENF9_PORCENTAJE,
BENF10NOM,
BENF10_APELL1,
BENF10_APELL2,
BENF10_PARENTESCO,
BENF10_PORCENTAJE,
FRECUE_PLANTILLA,
OBSSOM_PLANTILLA,
CALLCENTER_PLANTILLA,
WOLKVOX_FECHA_CREACION,
CONVENIO_PLANTILLA,
ENTIFI_PLANTILLA,
REFERENCIA,
DIGCHEQ_PLANTILLA,
FEVETA_PLANTILLA,
FORPAG_PLANTILLA,
MAIL,
TIPO_ENVIO,
INDACD_PLANTILLA,
AGENTE_DOCUMENTO,
AGENTE,
WOLKVOX_PHONECALL,
TABLA_PRIMA_MENSUAL,
WOLKVOX_IDCALL,
FECHA_CARGA,
CELULAR,
TEL_OFICINA,
CELULAR2,
WOLKVOX_TIPIFY_DESC,
WOLKVOX_TIPIFY_CODE,
TIPIFICACION_COD_GESTION,
COBTOT,
MESESG,
NUM_GRA,
ESTADO_CIVIL,
COMENTARIOS,
CIUDAD_LAB,
TARJETA,
NUMCUO,
FMUNCUE,
FECHA_DE_VENCIMIENTO,
WOLKVOX_FECHA_MODIFICACION,
SEGMENTO,
TIPO_DE_BASE,
BENFADICIONAL,
BENFADICIONALAPE,
BENFADICIONALAPE2,
BENFADICIONALNUMDOC,
BENFADICIONALTIPODOC,
BENFADICIONALFECHANAC,
BENFADICIONALPARENTESCO,
BENFADICIONALOCUPACION,
BENFADICIONALGENERO,
BENFADICIONAL2,
BENFADICIONAL2APE,
BENFADICIONAL2APE2,
BENFADICIONAL2NUMDOC,
BENFADICIONAL2TIPODOC,
BENFADICIONAL2FECHANAC,
BENFADICIONAL2PARENTESCO,
BENFADICIONAL2OCUPACION,
BENFADICIONAL2GENERO,
BENFADICIONAL3,
BENFADICIONAL3APE,
BENFADICIONAL3APE2,
BENFADICIONAL3NUMDOC,
BENFADICIONAL3TIPODOC,
BENFADICIONAL3FECHANAC,
BENFADICIONAL3PARENTESCO,
BENFADICIONAL3OCUPACION,
BENFADICIONAL3GENERO,
BENFHIJO,
BENFHIJOAPE,
BENFHIJOAPE2,
BENFHIJONUMDOCUMENTO,
BENFHIJOTIPODOC,
BENFHIJOFECHANAC,
BENFHIJOPARENTESCO,
BENFHIJOOCUPACION,
BENFHIJOGENERO,
BENFHIJO2,
BENFHIJO2APE,
BENFHIJO2APE2,
BENFHIJO2NUMDOCUMENTO,
BENFHIJO2TIPODOC,
BENFHIJO2FECHANAC,
BENFHIJO2PARENTESCO,
BENFHIJO2OCUPACION,
BENFHIJO2GENERO,
BENFHIJO3,
BENFHIJO3APE,
BENFHIJO3APE2,
BENFHIJO3NUMDOCUMENTO,
BENFHIJO3TIPODOC,
BENFHIJO3FECHANAC,
BENFHIJO3PARENTESCO,
BENFHIJO3OCUPACION,
BENFHIJO3GENERO,
BENFHIJO4,
BENFHIJO4APE,
BENFHIJO4APE2,
BENFHIJO4NUMDOCUMENTO,
BENFHIJO4TIPODOC,
BENFHIJO4FECHANAC,
BENFHIJO4PARENTRESCO,
BENFHIJO4OCUPACION,
BENFHIJO4GENERO,
BENFHIJO5,
BENFHIJO5APE,
BENFHIJO5APE2,
BENFHIJO5NUMDOCUMENTO,
BENFHIJO5TIPODOC,
BENFHIJO5FECHANAC,
BENFHIJO5PARENTESCO,
BENFHIJO5OCUPACION,
BENFHIJO5GENERO,
IPDIAL_CODE,
CAMPANA,
FECHA_CARGUE_A_BIGQUERY,
DURATION,
ALERTS,
PLAAUT,
TIPO_DE_PRODUCTO,
NUM_GRABA,
VALOR_CUPO,
NOMBRE_CRM,
APELLIDO_1_CRM,
APELLIDO_2_CRM,
CAMPANA2
)
(
--inicio de select depurador
SELECT DATOS_HARDCODEADOS.Codigo_Campana CAMPANA_PLANTILLA,
TABLA_COD_PLAN,
TABLA_COD_SUBPLAN,
CASE WHEN CAMPANA = 'Plantilla_venta_colsubsidio_cupo_bienvenida' OR CAMPANA = 'Plantilla_venta_colsubsidio_cupo_stock' THEN '945'
ELSE TABLA_CODIGO_PRODUCTO END TABLA_CODIGO_PRODUCTO,
'SPO' CANAL_PLANTILLA,
CASE WHEN TIPO_DOCUMENTO = 'CC' THEN 'C'
ELSE TIPO_DOCUMENTO END TIPO_DOCUMENTO,
DOCUMENTO,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BD_INICIAL.NOMBRE IS NULL THEN REPLACE(CRM_VENTAS.NOMBRE,'ñ','Ñ')
ELSE REPLACE(BD_INICIAL.NOMBRE,'ñ','Ñ') END) NOMBRE,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BD_INICIAL.APELLIDO_1 IS NULL THEN REPLACE(CRM_VENTAS.APELLIDO_1,'ñ','Ñ')
ELSE REPLACE(BD_INICIAL.APELLIDO_1,'ñ','Ñ') END) APELLIDO_1,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BD_INICIAL.APELLIDO_2 IS NULL THEN REPLACE(CRM_VENTAS.APELLIDO_2,'ñ','Ñ')
ELSE REPLACE(BD_INICIAL.APELLIDO_1,'ñ','Ñ') END) APELLIDO_2,
GENERO,
CASE WHEN INSTR(CRM_VENTAS.FECHA_NACIMIENTO,"/") > 0 THEN REPLACE(CAST(PARSE_DATE('%d/%m/%Y',CRM_VENTAS.FECHA_NACIMIENTO) AS STRING),'-','')
WHEN LENGTH(CRM_VENTAS.FECHA_NACIMIENTO) = 8 THEN CRM_VENTAS.FECHA_NACIMIENTO
WHEN LENGTH(CRM_VENTAS.FECHA_NACIMIENTO) > 10 THEN 'FECHA DE CRM ERRADA' -- ejemplo 19992-09-04
WHEN CRM_VENTAS.FECHA_NACIMIENTO = "" THEN
CASE WHEN INSTR(BD_INICIAL.FECHA_DE_NACIMIENTO,"/") > 0 THEN
REPLACE(CAST(PARSE_DATE('%d/%m/%Y',BD_INICIAL.FECHA_DE_NACIMIENTO) AS STRING),'-','')
WHEN INSTR(BD_INICIAL.FECHA_DE_NACIMIENTO,"-") > 0 THEN
REPLACE(CAST(PARSE_DATE('%d-%m-%y',CONCAT(SPLIT(BD_INICIAL.FECHA_DE_NACIMIENTO,"-")[OFFSET(0)],
'-',
MES_EN_FORMATO_NUM,
'-',
SPLIT(BD_INICIAL.FECHA_DE_NACIMIENTO,"-")[OFFSET(2)]
))AS STRING),'-','')
WHEN LENGTH(BD_INICIAL.FECHA_DE_NACIMIENTO) = 5 THEN REPLACE(CAST(DATE_ADD('1899-12-30', INTERVAL CAST(BD_INICIAL.FECHA_DE_NACIMIENTO AS INT64) DAY) AS STRING),'-','')
ELSE 'SIN FECHA DE NACIMIENTO'
END END FECHA_NACIMIENTO,
CODIGO_PROFESION,
OCUPACION,
MetLife.depuradorCodCiuReal(COD_CIUDADES.CODREAL,CODREAL_V2) COD_CIUDAD_RESIDENCIA, #CODREAL ES COD CIUDAD REAL
MetLife.depuradorDeptoReal(COD_CIUDADES.DEPTO,DEPTO_V2) DEPARTAMENTO,
MetLife.depuradorCaracteresEspeciales(
MetLife.depuradorDirVacia( --> único parámetro de la tercera función
MetLife.depuradorFinalizacionDir( --> parámetro 1 de la segunda función
UPPER(MetLife.depuradorNomDirecciones(CRM_VENTAS.DIRECCION)), --> parámetro 1 de la primera función
UPPER(BD_INICIAL.DIR_RES), --> parámetro 2 de la primera función
DESCIU), --> parámetro 3 de la primera función
DESCIU) --> parámetro 2 de la segunda función
) DIRECCION,
--> existe el escenario donde no haya cruce con id_call, y por tanto no se puede hacer substring
MetLife.depuradorEligeTelMasPositivo(TEL_NUMBER, CELULAR, TEL_RESIDENCIA, TEL_OFICINA, CELULAR2, WOLKVOX_PHONECALL) TEL_RESIDENCIA, --> Se excluye el prefijo 9
INDCORE_PLANTILLA,
-- >> BENEFICIARIO 1 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF1NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF1NOM) END) BENF1NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF1NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF1APE) = 'ERROR' THEN ''
ELSE UPPER(BENF1APE) END END) BENF1APE,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF1NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF1APE2) = 'ERROR' THEN ''
ELSE UPPER(BENF1APE2) END END) BENF1APE2,
CASE WHEN BENF1PARENTESCO = '' OR MetLife.depuradorNombres(BENF1NOM) = 'ERROR' THEN '0'
WHEN BENF1PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF1PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF1PARENTESCO END BENF1PARENTESCO,
CASE WHEN BENF1PORCENTAJE = '' OR MetLife.depuradorNombres(BENF1NOM) = 'ERROR' THEN '0'
ELSE BENF1PORCENTAJE END BENF1PORCENTAJE,
-- >> BENEFICIARIO 2 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF2NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF2NOM) END) BENF2NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF2NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF2APE) = 'ERROR' THEN ''
ELSE UPPER(BENF2APE) END END) BENF2APE,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF2NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF2APE2) = 'ERROR' THEN ''
ELSE UPPER(BENF2APE2) END END) BENF2APE2,
CASE WHEN BENF2PARENTESCO = '' OR MetLife.depuradorNombres(BENF2NOM) = 'ERROR' THEN '0'
WHEN BENF2PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF2PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF2PARENTESCO END BENF2PARENTESCO,
CASE WHEN BENF2PORCENTAJE = '' OR MetLife.depuradorNombres(BENF2NOM) = 'ERROR' THEN '0'
ELSE BENF2PORCENTAJE END BENF2PORCENTAJE,
-- >> BENEFICIARIO 3 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF3NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF3NOM) END) BENF3NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF3NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF3APE) = 'ERROR' THEN ''
ELSE UPPER(BENF3APE) END END) BENF3APE,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF3NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF3APE2) = 'ERROR' THEN ''
ELSE UPPER(BENF3APE2) END END) BENF3APE2,
CASE WHEN BENF3PARENTESCO = '' OR MetLife.depuradorNombres(BENF3NOM) = 'ERROR' THEN '0'
WHEN BENF3PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF3PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF3PARENTESCO END BENF3PARENTESCO,
CASE WHEN BENF3PORCENTAJE = '' OR MetLife.depuradorNombres(BENF3NOM) = 'ERROR' THEN '0'
ELSE BENF3PORCENTAJE END BENF3PORCENTAJE,
-- >> BENEFICIARIO 4 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF4NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF4NOM) END) BENF4NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF4NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF4APE) = 'ERROR' THEN ''
ELSE UPPER(BENF4APE) END END) BENF4APE,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF4NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF4APE2) = 'ERROR' THEN ''
ELSE UPPER(BENF4APE2) END END) BENF4APE2,
CASE WHEN BENF4PARENTESCO = '' OR MetLife.depuradorNombres(BENF4NOM) = 'ERROR' THEN '0'
WHEN BENF4PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF4PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF4PARENTESCO END BENF4PARENTESCO,
CASE WHEN BENF4PORCENTAJE = '' OR MetLife.depuradorNombres(BENF4NOM) = 'ERROR' THEN '0'
ELSE BENF4PORCENTAJE END BENF4PORCENTAJE,
-- >> BENEFICIARIO 5 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF5NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF5NOM) END) BENF5NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF5NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF5APE) = 'ERROR' THEN ''
ELSE UPPER(BENF5APE) END END) BENF5APE,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF5NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF5APE2) = 'ERROR' THEN ''
ELSE UPPER(BENF5APE2) END END) BENF5APE2,
CASE WHEN BENF5PARENTESCO = '' OR MetLife.depuradorNombres(BENF5NOM) = 'ERROR' THEN '0'
WHEN BENF5PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF5PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF5PARENTESCO END BENF5PARENTESCO,
CASE WHEN BENF5PORCENTAJE = '' OR MetLife.depuradorNombres(BENF5NOM) = 'ERROR' THEN '0'
ELSE BENF5PORCENTAJE END BENF5PORCENTAJE,
-- >> BENEFICIARIO 6 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF1NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF6NOM) END) BENF6NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF6NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF6_APEL1) = 'ERROR' THEN ''
ELSE UPPER(BENF6_APEL1) END END) BENF6_APEL1,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF6NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF6_APEL2) = 'ERROR' THEN ''
ELSE UPPER(BENF6_APEL2) END END) BENF6_APEL2,
CASE WHEN BENF6_PARENTESCO = '' OR MetLife.depuradorNombres(BENF6NOM) = 'ERROR' THEN '0'
WHEN BENF6_PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF6_PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF6_PARENTESCO END BENF6_PARENTESCO,
CASE WHEN BENF6_PORCENTAJE = '' OR MetLife.depuradorNombres(BENF6NOM) = 'ERROR' THEN '0'
ELSE BENF6_PORCENTAJE END BENF6_PORCENTAJE,
-- >> BENEFICIARIO 7 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF1NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF7NOM) END) BENF7NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF7NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF7_APELL1) = 'ERROR' THEN ''
ELSE UPPER(BENF7_APELL1) END END) BENF7_APELL1,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF7NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF7_APELL2) = 'ERROR' THEN ''
ELSE UPPER(BENF7_APELL2) END END) BENF7_APELL2,
CASE WHEN BENF7_PARENTESCO = '' OR MetLife.depuradorNombres(BENF7NOM) = 'ERROR' THEN '0'
WHEN BENF7_PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF7_PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF7_PARENTESCO END BENF7_PARENTESCO,
CASE WHEN BENF7_PORCENTAJE = '' OR MetLife.depuradorNombres(BENF7NOM) = 'ERROR' THEN '0'
ELSE BENF7_PORCENTAJE END BENF7_PORCENTAJE,
-- >> BENEFICIARIO 8 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF1NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF8NOM) END) BENF8NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF8NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF8_APEll1) = 'ERROR' THEN ''
ELSE UPPER(BENF8_APEll1) END END) BENF8_APEll1,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF8NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF8_APEll2) = 'ERROR' THEN ''
ELSE UPPER(BENF8_APEll2) END END) BENF8_APEll2,
CASE WHEN BENF8_PARENTESCO = '' OR MetLife.depuradorNombres(BENF8NOM) = 'ERROR' THEN '0'
WHEN BENF8_PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF8_PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF8_PARENTESCO END BENF8_PARENTESCO,
CASE WHEN BENF8_PORCENTAJE = '' OR MetLife.depuradorNombres(BENF8NOM) = 'ERROR' THEN '0'
ELSE BENF8_PORCENTAJE END BENF8_PORCENTAJE,
-- >> BENEFICIARIO 9 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF1NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF9NOM) END) BENF9NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF9NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF9_APELL1) = 'ERROR' THEN ''
ELSE UPPER(BENF9_APELL1) END END) BENF9_APELL1,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF9NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF9_APELL2) = 'ERROR' THEN ''
ELSE UPPER(BENF9_APELL2) END END) BENF9_APELL2,
CASE WHEN BENF9_PARENTESCO = '' OR MetLife.depuradorNombres(BENF9NOM) = 'ERROR' THEN '0'
WHEN BENF9_PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF9_PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF9_PARENTESCO END BENF9_PARENTESCO,
CASE WHEN BENF9_PORCENTAJE = '' OR MetLife.depuradorNombres(BENF9NOM) = 'ERROR' THEN '0'
ELSE BENF9_PORCENTAJE END BENF9_PORCENTAJE,
-- >> BENEFICIARIO 10 << --
MetLife.depuradorCaracteresEspeciales(CASE WHEN MetLife.depuradorNombres(BENF1NOM) = 'ERROR' THEN ''
ELSE UPPER(BENF10NOM) END) BENF10NOM,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF10NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF10_APELL1) = 'ERROR' THEN ''
ELSE UPPER(BENF10_APELL1) END END) BENF10_APELL1,
MetLife.depuradorCaracteresEspeciales(CASE WHEN BENF10NOM = '' THEN ''
ELSE CASE WHEN MetLife.depuradorNombres(BENF10_APELL2) = 'ERROR' THEN ''
ELSE UPPER(BENF10_APELL2) END END) BENF10_APELL2,
CASE WHEN BENF10_PARENTESCO = '' OR MetLife.depuradorNombres(BENF10NOM) = 'ERROR' THEN '0'
WHEN BENF10_PARENTESCO = '1' THEN 'ERROR, SE DIGITO 1'
WHEN LENGTH(BENF10_PARENTESCO) > 2 THEN 'ERROR DIGITACION'
ELSE BENF10_PARENTESCO END BENF10_PARENTESCO,
CASE WHEN BENF10_PORCENTAJE = '' OR MetLife.depuradorNombres(BENF10NOM) = 'ERROR' THEN '0'
ELSE BENF10_PORCENTAJE END BENF10_PORCENTAJE,
'M' FRECUE_PLANTILLA,
CASE WHEN LENGTH(CRM_VENTAS.OBSSOM_PLANTILLA) = 0 THEN 'COLOMBIANO'
WHEN LENGTH(CRM_VENTAS.OBSSOM_PLANTILLA) = 10 THEN 'COLOMBIANO'
WHEN LENGTH(CRM_VENTAS.OBSSOM_PLANTILLA) = 6 THEN CRM_VENTAS.OBSSOM_PLANTILLA
WHEN LENGTH(CRM_VENTAS.OBSSOM_PLANTILLA) = 4 THEN CONCAT(CRM_VENTAS.OBSSOM_PLANTILLA,"00")
WHEN LENGTH(CRM_VENTAS.OBSSOM_PLANTILLA) = 2 THEN CONCAT(CRM_VENTAS.OBSSOM_PLANTILLA,".000")
END OBSSOM_PLANTILLA,
'CBPS' CALLCENTER_PLANTILLA,
CAST(CAST(PARSE_DATETIME('%d/%m/%Y %H:%M',WOLKVOX_FECHA_CREACION) AS DATE) AS STRING) WOLKVOX_FECHA_CREACION,
CASE WHEN CAMPANA = 'Plantilla_ventas_mix'
OR CAMPANA = 'Plantilla_ventas_mix_migracion_vida'
OR CAMPANA = 'Plantilla_ventas_compra_recurrente'
OR CAMPANA = 'Plantilla_ventas_mix_migracion_vida_stock'
THEN CONVENIO_PLANTILLA
ELSE DATOS_HARDCODEADOS.CONVENIO END CONVENIO_PLANTILLA,
ENTIFI_PLANTILLA,
CASE WHEN LENGTH(CRM_VENTAS.REFERENCIA) != 10 OR CRM_VENTAS.REFERENCIA IS NULL THEN
CASE WHEN LENGTH(BD_INICIAL.REFERENCIA) != 10 OR BD_INICIAL.REFERENCIA IS NULL THEN 'REVISAR NUM REFERENCIA'
ELSE BD_INICIAL.REFERENCIA END
ELSE CRM_VENTAS.REFERENCIA END REFERENCIA,
DIGCHEQ_PLANTILLA,
DATOS_HARDCODEADOS.FEVETA_PLANTILLA,
FORPAG_PLANTILLA,
CASE WHEN MetLife.depuradorMail(MAIL) = 'ERROR' THEN ''
ELSE MetLife.depuradorMail(MAIL) END MAIL,
CASE WHEN MetLife.depuradorMail(MAIL) = 'ERROR' THEN 'F'
WHEN MetLife.depuradorMail(MAIL) != 'ERROR' AND LENGTH(MAIL) > 0 THEN 'E'
ELSE 'F' END TIPO_ENVIO,
'S' INDACD_PLANTILLA,
AGENTE_DOCUMENTO AGENTE_DOCUMENTO,
AGENTE AGENTE,
SUBSTR(WOLKVOX_PHONECALL,2) WOLKVOX_PHONECALL,
TABLA_PRIMA_MENSUAL,
CRM_VENTAS.WOLKVOX_IDCALL,
FECHA_CARGA,
CASE WHEN LENGTH(CELULAR) = 10 THEN CELULAR
WHEN LENGTH(CELULAR) = 11 THEN SUBSTR(CELULAR,2)
ELSE MetLife.depuradorEligeTelMasPositivo(TEL_NUMBER, CELULAR, TEL_RESIDENCIA, TEL_OFICINA, CELULAR2, WOLKVOX_PHONECALL)
END CELULAR,
CASE WHEN LENGTH(TEL_OFICINA) = 10 THEN TEL_OFICINA
WHEN LENGTH(TEL_OFICINA) = 11 THEN SUBSTR(TEL_OFICINA,2)
ELSE MetLife.depuradorEligeTelMasPositivo(TEL_NUMBER, CELULAR, TEL_RESIDENCIA, TEL_OFICINA, CELULAR2, WOLKVOX_PHONECALL)
END TEL_OFICINA,
CASE WHEN LENGTH(CELULAR2) = 10 THEN CELULAR2
WHEN LENGTH(CELULAR2) = 11 THEN SUBSTR(CELULAR2,2)
ELSE MetLife.depuradorEligeTelMasPositivo(TEL_NUMBER, CELULAR, TEL_RESIDENCIA, TEL_OFICINA, CELULAR2, WOLKVOX_PHONECALL)
END CELULAR2,
WOLKVOX_TIPIFY_DESC,
WOLKVOX_TIPIFY_CODE,
TIPIFICACION_COD_GESTION,
'N' COBTOT,
DATOS_HARDCODEADOS.MESESG MESESG,
DOCUMENTO NUM_GRA,
ESTADO_CIVIL,
COMENTARIOS,
CIUDAD_LAB,
TARJETA,
'' NUMCUO,
'' FMUNCUE,
'0' FECHA_DE_VENCIMIENTO,
WOLKVOX_FECHA_MODIFICACION,
SEGMENTO,
TIPO_DE_BASE,
--> Se trocan los valores de ocupación y parentezco xq en el crm se arroja info de esta manera
MetLife.depuradorCaracteresEspeciales(UPPER(BENFADICIONAL)) BENFADICIONAL,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFADICIONALAPE)) BENFADICIONALAPE,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFADICIONALAPE2)) BENFADICIONALAPE2,
CASE WHEN BENFADICIONALNUMDOC = "" THEN '0'
ELSE BENFADICIONALNUMDOC END BENFADICIONALNUMDOC,
BENFADICIONALTIPODOC,
CASE WHEN BENFADICIONALFECHANAC = "" THEN '0'
ELSE CASE WHEN INSTR(BENFADICIONALFECHANAC,"/") > 0 THEN REPLACE(CAST(PARSE_DATE('%d/%m/%Y',BENFADICIONALFECHANAC) AS STRING),"-","")
WHEN LENGTH(BENFADICIONALFECHANAC) = 8 THEN BENFADICIONALFECHANAC
ELSE BENFADICIONALFECHANAC --> es porq vino con -
END END BENFADICIONALFECHANAC,
CASE WHEN BENFADICIONALOCUPACION = "" then '0'
ELSE BENFADICIONALOCUPACION END BENFADICIONALPARENTESCO,
CASE WHEN BENFADICIONALPARENTESCO = "" THEN "0"
ELSE BENFADICIONALPARENTESCO END BENFADICIONALOCUPACION,
BENFADICIONALGENERO,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFADICIONAL2)) BENFADICIONAL2,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFADICIONAL2APE)) BENFADICIONAL2APE,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFADICIONAL2APE2)) BENFADICIONAL2APE2,
CASE WHEN BENFADICIONAL2NUMDOC = "" THEN '0'
ELSE BENFADICIONAL2NUMDOC END BENFADICIONAL2NUMDOC,
BENFADICIONAL2TIPODOC,
CASE WHEN BENFADICIONAL2FECHANAC = "" THEN '0'
ELSE CASE WHEN INSTR(BENFADICIONAL2FECHANAC,"/") > 0 THEN REPLACE(CAST(PARSE_DATE('%d/%m/%Y',BENFADICIONAL2FECHANAC) AS STRING),"-","")
WHEN LENGTH(BENFADICIONAL2FECHANAC) = 8 THEN BENFADICIONAL2FECHANAC
ELSE BENFADICIONAL2FECHANAC --> es porq vino con -
END END BENFADICIONAL2FECHANAC,
CASE WHEN BENFADICIONAL2OCUPACION = "" THEN '0'
ELSE BENFADICIONAL2OCUPACION END BENFADICIONAL2PARENTESCO,
CASE WHEN BENFADICIONAL2PARENTESCO = "" THEN '0'
ELSE BENFADICIONAL2PARENTESCO END BENFADICIONAL2OCUPACION,
BENFADICIONAL2GENERO,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFADICIONAL3)) BENFADICIONAL3,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFADICIONAL3APE)) BENFADICIONAL3APE,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFADICIONAL3APE2)) BENFADICIONAL3APE2,
CASE WHEN BENFADICIONAL3NUMDOC = "" THEN '0'
ELSE BENFADICIONAL3NUMDOC END BENFADICIONAL3NUMDOC,
BENFADICIONAL3TIPODOC,
CASE WHEN BENFADICIONAL3FECHANAC = "" THEN '0'
ELSE CASE WHEN INSTR(BENFADICIONAL3FECHANAC,"/") > 0 THEN REPLACE(CAST(PARSE_DATE('%d/%m/%Y',BENFADICIONAL3FECHANAC) AS STRING),"-","")
WHEN LENGTH(BENFADICIONAL3FECHANAC) = 8 THEN BENFADICIONAL3FECHANAC
ELSE BENFADICIONAL3FECHANAC --> es porq vino con -
END END BENFADICIONAL3FECHANAC,
CASE WHEN BENFADICIONAL3OCUPACION = "" THEN '0'
ELSE BENFADICIONAL3OCUPACION END BENFADICIONAL3PARENTESCO,
CASE WHEN BENFADICIONAL3PARENTESCO = "" THEN '0'
ELSE BENFADICIONAL3PARENTESCO END BENFADICIONAL3OCUPACION,
BENFADICIONAL3GENERO,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO)) BENFHIJO,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJOAPE)) BENFHIJOAPE,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJOAPE2)) BENFHIJOAPE2,
CASE WHEN BENFHIJONUMDOCUMENTO = "" THEN '0'
ELSE BENFHIJONUMDOCUMENTO END BENFHIJONUMDOCUMENTO,
BENFHIJOTIPODOC,
CASE WHEN BENFHIJOFECHANAC = "" THEN '0'
ELSE CASE WHEN INSTR(BENFHIJOFECHANAC,"/") > 0 THEN REPLACE(CAST(PARSE_DATE('%d/%m/%Y',BENFHIJOFECHANAC) AS STRING),"-","")
WHEN LENGTH(BENFHIJOFECHANAC) = 8 THEN BENFHIJOFECHANAC
ELSE BENFHIJOFECHANAC --> es porq vino con -
END END BENFHIJOFECHANAC,
CASE WHEN BENFHIJOOCUPACION = "" THEN '0'
ELSE BENFHIJOOCUPACION END BENFHIJOPARENTESCO,
CASE WHEN BENFHIJOPARENTESCO = "" THEN '0'
ELSE BENFHIJOPARENTESCO END BENFHIJOOCUPACION,
BENFHIJOGENERO,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO2)) BENFHIJO2,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO2APE)) BENFHIJO2APE,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO2APE2)) BENFHIJO2APE2,
CASE WHEN BENFHIJO2NUMDOCUMENTO = "" THEN '0'
ELSE BENFHIJO2NUMDOCUMENTO END BENFHIJO2NUMDOCUMENTO,
BENFHIJO2TIPODOC,
CASE WHEN BENFHIJO2FECHANAC = "" THEN '0'
ELSE CASE WHEN INSTR(BENFHIJO2FECHANAC,"/") > 0 THEN REPLACE(CAST(PARSE_DATE('%d/%m/%Y',BENFHIJO2FECHANAC) AS STRING),"-","")
WHEN LENGTH(BENFHIJO2FECHANAC) = 8 THEN BENFHIJO2FECHANAC
ELSE BENFHIJO2FECHANAC --> es porq vino con -
END END BENFHIJO2FECHANAC,
CASE WHEN BENFHIJO2OCUPACION = "" THEN '0'
ELSE BENFHIJO2OCUPACION END BENFHIJO2PARENTESCO,
CASE WHEN BENFHIJO2PARENTESCO = "" THEN '0'
ELSE BENFHIJO2PARENTESCO END BENFHIJO2OCUPACION,
BENFHIJO2GENERO,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO3)) BENFHIJO3,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO3APE)) BENFHIJO3APE,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO3APE2)) BENFHIJO3APE2,
CASE WHEN BENFHIJO3NUMDOCUMENTO = "" THEN '0'
ELSE BENFHIJO3NUMDOCUMENTO END BENFHIJO3NUMDOCUMENTO,
BENFHIJO3TIPODOC,
CASE WHEN BENFHIJO3FECHANAC = "" THEN '0'
ELSE CASE WHEN INSTR(BENFHIJO3FECHANAC,"/") > 0 THEN REPLACE(CAST(PARSE_DATE('%d/%m/%Y',BENFHIJO3FECHANAC) AS STRING),"-","")
WHEN LENGTH(BENFHIJO3FECHANAC) = 8 THEN BENFHIJO3FECHANAC
ELSE BENFADICIONALFECHANAC --> es porq vino con -
END END BENFHIJO3FECHANAC,
CASE WHEN BENFHIJO3OCUPACION = "" THEN '0'
ELSE BENFHIJO3OCUPACION END BENFHIJO3PARENTESCO,
CASE WHEN BENFHIJO3PARENTESCO = "" THEN '0'
ELSE BENFHIJO3PARENTESCO END BENFHIJO3OCUPACION,
BENFHIJO3GENERO,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO4)) BENFHIJO4,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO4APE)) BENFHIJO4APE,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO4APE2)) BENFHIJO4APE2,
CASE WHEN BENFHIJO4NUMDOCUMENTO = "" THEN '0'
ELSE BENFHIJO4NUMDOCUMENTO END BENFHIJO4NUMDOCUMENTO,
BENFHIJO4TIPODOC,
CASE WHEN BENFHIJO4FECHANAC = "" THEN '0'
ELSE CASE WHEN INSTR(BENFHIJO4FECHANAC,"/") > 0 THEN REPLACE(CAST(PARSE_DATE('%d/%m/%Y',BENFHIJO4FECHANAC) AS STRING),"-","")
WHEN LENGTH(BENFHIJO4FECHANAC) = 8 THEN BENFHIJO4FECHANAC
ELSE BENFADICIONALFECHANAC --> es porq vino con -
END END BENFHIJO4FECHANAC,
CASE WHEN BENFHIJO4OCUPACION = "" THEN '0'
ELSE BENFHIJO4OCUPACION END BENFHIJO4PARENTRESCO,
CASE WHEN BENFHIJO4PARENTRESCO = "" THEN '0'
ELSE BENFHIJO4PARENTRESCO END BENFHIJO4OCUPACION,
BENFHIJO4GENERO,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO5)) BENFHIJO5,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO5APE)) BENFHIJO5APE,
MetLife.depuradorCaracteresEspeciales(UPPER(BENFHIJO5APE2)) BENFHIJO5APE2,
CASE WHEN BENFHIJO5NUMDOCUMENTO = "" THEN '0'
ELSE BENFHIJO5NUMDOCUMENTO END BENFHIJO5NUMDOCUMENTO,
BENFHIJO5TIPODOC,
CASE WHEN BENFHIJO5FECHANAC = "" THEN '0'
ELSE CASE WHEN INSTR(BENFHIJO5FECHANAC,"/") > 0 THEN REPLACE(CAST(PARSE_DATE('%d/%m/%Y',BENFHIJO5FECHANAC) AS STRING),"-","")
WHEN LENGTH(BENFHIJO5FECHANAC) = 8 THEN BENFHIJO5FECHANAC
ELSE BENFHIJO5FECHANAC --> es porq vino con -
END END BENFHIJO5FECHANAC,
CASE WHEN BENFHIJO5OCUPACION = "" THEN '0'
ELSE BENFHIJO5OCUPACION END BENFHIJO5PARENTESCO,
CASE WHEN BENFHIJO5PARENTESCO = "" THEN '0'
ELSE BENFHIJO5PARENTESCO END BENFHIJO5OCUPACION,
BENFHIJO5GENERO,
IPDIAL_CODE,
CAMPANA,
FECHA_CARGUE_A_BIGQUERY,
-->> Campo TMO <<--
CASE WHEN TABLA_TMO.DURATION IS NULL THEN 'ID_CALL NOT FOUND'
ELSE TABLA_TMO.DURATION END DURATION,
-->> Campo Alerts <<--
CASE WHEN SAFE_CAST(BENF1PORCENTAJE AS INT64) IS NULL AND
SAFE_CAST(BENF2PORCENTAJE AS INT64) IS NULL AND
SAFE_CAST(BENF3PORCENTAJE AS INT64) IS NULL AND
SAFE_CAST(BENF4PORCENTAJE AS INT64) IS NULL AND
SAFE_CAST(BENF5PORCENTAJE AS INT64) IS NULL AND
SAFE_CAST(BENF6_PORCENTAJE AS INT64) IS NULL AND
SAFE_CAST(BENF7_PORCENTAJE AS INT64) IS NULL AND
SAFE_CAST(BENF8_PORCENTAJE AS INT64) IS NULL AND
SAFE_CAST(BENF9_PORCENTAJE AS INT64) IS NULL AND
SAFE_CAST(BENF10_PORCENTAJE AS INT64) IS NULL THEN 'NOT ALERTS'
ELSE
CASE WHEN
--> Se agrega IfNull para que me pueda operar con las colum que si traigan datos
IFNULL(SAFE_CAST(BENF1PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF2PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF3PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF4PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF5PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF6_PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF7_PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF8_PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF9_PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF10_PORCENTAJE AS INT64),0)
= 0 THEN 'NOT ALERTS'
WHEN
--> Se agrega IfNull para que me pueda operar con las colum que si traigan datos
IFNULL(SAFE_CAST(BENF1PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF2PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF3PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF4PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF5PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF6_PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF7_PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF8_PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF9_PORCENTAJE AS INT64),0)+
IFNULL(SAFE_CAST(BENF10_PORCENTAJE AS INT64),0)
<> 100 THEN 'CHECK PART% BENEF'
ELSE 'NOT ALERTS'
END END ALERTS,
PLAAUT,
DATOS_HARDCODEADOS.TIPO_DE_PRODUCTO,
NUM_GRABA,
VALOR_CUPO,
MetLife.depuradorCaracteresEspeciales(REPLACE(CRM_VENTAS.NOMBRE,'ñ','Ñ')) NOMBRE_CRM,
MetLife.depuradorCaracteresEspeciales(REPLACE(CRM_VENTAS.APELLIDO_1,'ñ','Ñ')) APELLIDO_1_CRM,
MetLife.depuradorCaracteresEspeciales(REPLACE(CRM_VENTAS.APELLIDO_2,'ñ','Ñ')) APELLIDO_2_CRM,
CAMPANA2
FROM (SELECT *
-- >> se hace except para extraer wolkvox_idcall con el fin de operarlo en el campo siguiente de este select << --
EXCEPT(WOLKVOX_IDCALL),
-- >> se corrige el apostrofe generado por el crm de wolkvox << -
CASE WHEN SUBSTR(WOLKVOX_IDCALL,1,1) = "'" THEN SUBSTR(WOLKVOX_IDCALL,2)
WHEN SUBSTR(WOLKVOX_IDCALL,1,1) = "*" THEN SUBSTR(WOLKVOX_IDCALL,2)
ELSE WOLKVOX_IDCALL END WOLKVOX_IDCALL,
-- >>> Se hace case when para construir campo de fecha para ser usado en el where de la consulta exterior<<< --
-- >>> el 09-06-2021 se decide quitar restricción para que haya libertad de cargar ventas de cualquier fecha <<< --
CASE WHEN LENGTH(SPLIT(WOLKVOX_FECHA_CREACION," ")[OFFSET(0)]) = 10
THEN CONCAT(SUBSTR(WOLKVOX_FECHA_CREACION,7,4),'-',SUBSTR(WOLKVOX_FECHA_CREACION,4,2),'-',SUBSTR(WOLKVOX_FECHA_CREACION,1,2))
WHEN LENGTH(SPLIT(WOLKVOX_FECHA_CREACION," ")[OFFSET(0)]) = 9
THEN CONCAT(SUBSTR(WOLKVOX_FECHA_CREACION,6,4),'-',SUBSTR(WOLKVOX_FECHA_CREACION,3,2),'-',SUBSTR(WOLKVOX_FECHA_CREACION,1,1))
END WOLKVOX_FECHA_CREACION_PARA_FILTRO
FROM
-- >> se quitan duplicados para entregar ventas unitarias << --
(SELECT *
FROM (SELECT *,
ROW_NUMBER() OVER (PARTITION BY WOLKVOX_IDCALL) X
FROM `contento-bi.MetLife.bases_ventas_crm_wolkvox` )
WHERE X = 1)
) CRM_VENTAS
-- >>> Se extrae TMO y id_agent <<< --
-- >>> Se hace select dentro del Join para ahorrar memoria <<< --
LEFT JOIN (SELECT ID_CALL,
DURATION,
TEL_NUMBER,
ID_AGENT
FROM `contento-bi.MetLife.exportable-duraciones-tabla`) TABLA_TMO
ON CRM_VENTAS.WOLKVOX_IDCALL = TABLA_TMO.ID_CALL
-- >>> Se toma ID_AGENT para extraer nombre y cc de agente <<< --
-- LEFT JOIN (SELECT ID_COLABORADOR,
-- ID_GRABADOR,
-- NOMBRE_COLABORADOR,
-- ID_CLIENTE
-- FROM `contento-bi.Contento.Jerarquias_Metas`) JERARQ
-- ON JERARQ.ID_GRABADOR = TABLA_TMO.ID_AGENT AND
-- JERARQ.ID_CLIENTE = '42' --> 42 corresponde a Metlife <--
-- >>> Se extrae nombre y apellidos desde las bases iniciales <<< --
LEFT JOIN ( SELECT NUM_DOCUMENTO,
NOMBRE,
APELLIDO_1,
APELLIDO_2,
DIR_RES,
MUNRESIDENCIA,
FECHA_DE_NACIMIENTO,
REFERENCIA,
MES_EN_FORMATO_NUM,
VALOR_CUPO,
DEPAR_RESIDENCIA,
CAMPANA2
FROM
(SELECT NUM_DOCUMENTO,
NOMBRE,
APELLIDO_1,
APELLIDO_2,
DIR_RES,
MUNRESIDENCIA,
REFERENCIA,
VALOR_CUPO,
CAMPANA2,
DEPAR_RESIDENCIA,
--> a continuaciónse traducen los nombres de mes para ser usados en el escenario donde la base inicial
-- traiga fechas en formato dd-mmm-yy y adicional la fecha de nacimiento no vino desde la bd ventas crm
CASE WHEN INSTR(FECHA_DE_NACIMIENTO,"-") > 0 THEN
CASE WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'ene' then '01'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'feb' then '02'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'mar' then '03'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'abr' then '04'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'may' then '05'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'jun' then '06'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'jul' then '07'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'ago' then '08'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'sep' then '09'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'oct' then '10'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'nov' then '11'
WHEN SUBSTR(FECHA_DE_NACIMIENTO, INSTR(FECHA_DE_NACIMIENTO,"-")+1,3) = 'dic' then '12'
ELSE 'fecha no tiene formato dd-mmm-yy'
END END MES_EN_FORMATO_NUM,
FECHA_DE_NACIMIENTO,
--> PARA GARANTIZAR CRUCE TOMA DE DATOS DE BD INICIAL, QUE SEAN
-- UNICOS Y SI NO CRUZAN PUES QUE ME TRAIGA LOS NULL
-- los null corresponden a los clientes que llegan en ventas crm pero no están en base inicial
ROW_NUMBER() OVER(PARTITION BY NUM_DOCUMENTO ORDER BY FECHA_CARGUE_A_BIGQUERY DESC) ROW
FROM `contento-bi.MetLife.bases_iniciales`)
WHERE ROW = 1 OR ROW IS NULL
) BD_INICIAL
ON BD_INICIAL.NUM_DOCUMENTO = CRM_VENTAS.DOCUMENTO
-- >>> Extrae los tipos de campana en funcion de los nombre de campana <<< --
LEFT JOIN `contento-bi.MetLife.base_datos_hardcodeados` DATOS_HARDCODEADOS
ON DATOS_HARDCODEADOS.Nombre_Campana = CRM_VENTAS.CAMPANA
-- >>> Para insertar datos unicos <<< --
LEFT JOIN (SELECT WOLKVOX_IDCALL
FROM `contento-bi.MetLife.descargable-plantilla-ventas-inspector`) DESCARGABLE
ON DESCARGABLE.WOLKVOX_IDCALL = CRM_VENTAS.WOLKVOX_IDCALL
-- >>> este extrae las descripciones de ciudad en función del codciu del crm en caso que no haya una dirección<<< --
LEFT JOIN (SELECT CODREAL,
DEPTO,
LLAVE_CODCIUD_CODDPTO,
DESCIU
FROM `contento-bi.MetLife.codigos_ciudades_string_only`) COD_CIUDADES
--Se adiciona IFNULL ya que cuando en la bd inicial no hay datos, se destruye la concatenación
ON COD_CIUDADES.LLAVE_CODCIUD_CODDPTO = CONCAT(CRM_VENTAS.COD_CIUDAD_RESIDENCIA,IFNULL(BD_INICIAL.DEPAR_RESIDENCIA,""))
-- >>> Se trae el código de ciudad real basados en la concatenación de la ciudad y el dpto de las bases iniciales
-- >>> Este se hace para el escenario donde el resultado el join anterior sea null.
LEFT JOIN (SELECT CODREAL CODREAL_V2,
DEPTO DEPTO_V2,
LLAVE_CODCIUD_CODDPTO
FROM `contento-bi.MetLife.codigos_ciudades_string_only`) COD_CIUDADES_V2
ON COD_CIUDADES_V2.LLAVE_CODCIUD_CODDPTO = CONCAT(IFNULL(BD_INICIAL.MUNRESIDENCIA,""),IFNULL(BD_INICIAL.DEPAR_RESIDENCIA,""))
-- >>> Contrato de desarrollo: se filtra informacion de los últimos 4 dias calendario contando el actual y se valida con la herramienta Null para pegar solo registros unicos<<< --
WHERE DESCARGABLE.WOLKVOX_IDCALL IS NULL
)
'''
return importedQRY |
# -*- coding: utf-8 -*-
__author__ = 'yesdauren'
import urllib.request
import re
from shutil import copyfile
import time
import datetime
from datetime import date
from datetime import datetime
import os.path
import zipfile
import xlrd
from xlrd import open_workbook
import sys
import io
import csv
import logging
from sys import argv
# from parsers import settings
dir_path = os.path.dirname(os.path.realpath(__file__))
# create logger
logging.basicConfig(format='%(levelname)s \t %(asctime)s \t %(module)s \t %(message)s', level=logging.INFO,
filename=dir_path + "/logs/load_list.log")
host = argv[1]
username = argv[2]
password = argv[3]
database = argv[4]
if password == 'nopass':
password = ''
import pymysql.cursors
# Connect to the database
connection = pymysql.connect(host=host,
user=username,
password=password,
db=database,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
local_infile=True)
dirs = []
def findFilials():
for filename in os.listdir(dir_path + '/files/stat.gov.kz/legal_entity'):
dirs.append(filename)
dirs.sort(key=lambda x: time.mktime(time.strptime(x, "%d.%m.%y")))
dirs.reverse()
current_dir = dirs[0]
# print(current_dir)
csv_fields = [
'BIN',
'name'
]
with open(dir_path + '/files/stat.gov.kz/legal_entity/'+ current_dir +'/csv/filials.csv', 'w', encoding='UTF-8') as csvfile_write:
csv_writer = csv.DictWriter(csvfile_write, fieldnames=csv_fields,delimiter='\t', quotechar='"', escapechar='\\',quoting=csv.QUOTE_NONNUMERIC, lineterminator='\n')
with io.open(dir_path + '/files/stat.gov.kz/legal_entity/' + current_dir + '/csv/legal_entity.csv', encoding='utf-8') as file:
reader = csv.reader(file, delimiter='\t')
for row in reader:
BIN = row[0]
name = row[1]
if(len(BIN) > 11):
if(BIN[5] == '1'):
csv_writer.writerow({
'BIN': BIN,
'name': name,
})
copyfile(dir_path + '/files/stat.gov.kz/legal_entity/'+ current_dir +'/csv/filials.csv', "interprises_parsers/tmp/filials.csv")
logging.info(dir_path + '/files/stat.gov.kz/legal_entity/'+ current_dir +'/csv/filials.csv' + " was copied to interprises_parsers/tmp/ folder")
def import_filials_to_db():
try:
with connection.cursor() as cursor:
sqlfile = dir_path + "/filials.sql"
for line in open(sqlfile, encoding='UTF-8'):
if len(line) == 0:
continue
cursor.execute(line)
result = cursor.fetchone()
connection.commit()
print("filials were imported to db")
except Exception as e:
print("import to db error: %s" % str(e))
finally:
connection.close()
findFilials()
import_filials_to_db()
|
"""
TSurvey WSGI application
"""
from . import db
from . import schemas
from datetime import date
from flask import Flask, request, render_template
from flask.json import jsonify
app = Flask("tsurvey")
app.debug = True
@app.route("/<uuid:token>/", methods=["GET"])
def home(token):
"""
Display a HTML page
"""
return render_template("base.html", token=token)
@app.route("/tokens/<uuid:token>/", methods=["GET", "POST"])
def token(token):
"""
Get a list of questions or post a list of answers
"""
# Fetching the survey based on the Token
try:
token = db.Token.get(db.Token.id == token)
survey = token.survey
except (db.Token.DoesNotExist, db.Token.Invalid) as e:
# If the token is invalid or does not exist, always return a File
# Not Found error.
return jsonify({"message": "The token '{}' is invalid or does not exist".format(token)}), 404
# Handle the request
if request.method == "GET":
# Returns a list of questions for a survey
response = jsonify({
"title": survey.name,
"questions": [q.to_json() for q in survey.questions]
})
response.headers['Accept'] = 'application/json'
return response
elif request.method == "POST":
# Add answers to a survey
try:
token.complete(request.get_json(force=True))
except db.Token.BadAnswers as e:
# Bad answers for that survey
return jsonify({"message": str(e)}), 400
except Exception as e:
# Other errors
return jsonify({"message": str(e)}), 500
return jsonify({"message": "The answers have been added"}), 200
# Requests other than POST or GET are not supported
# Not needed with the method kwargs in the route, but it's better to leave
# it just in case someone changes the route without updating the function
return jsonify({"message": "Method Not Allowed"}), 405
@app.route("/surveys/", methods=["GET", "POST"])
def surveys():
"""
List or create surveys
"""
if request.method == "GET":
# List surveys
# TODO: Add authentication
# Parse the request arguments
page = int(request.args.get("page", 0))
count = int(request.args.get("count", 20))
# Collect all surveys
surveys = db.Survey.select().paginate(page, count)
# Send the Response
return jsonify([s.to_json() for s in surveys])
elif request.method == "POST":
# Add a new survey
# TODO: Add authentication
data = request.get_json()
try:
schemas.validate_survey(data)
except schemas.ValidationError as e:
return jsonify({"message": str(e).split("\n")[0]}), 400
if "expiration_date" in data:
# Quite hackish, but should work
# TODO: make a cleaner version of this
expiration_date = date(*[int(i) for i in data["expiration_date"].split("-")])
else:
expiration_date = None
survey = db.Survey.create(name=data["name"],
questions=data["questions"],
expiration_date=expiration_date)
return jsonify(survey.to_json())
# Requests other than POST or GET are not supported
# Not needed with the method kwargs in the route, but it's better to leave
# it just in case someone changes the route without updating the function
return jsonify({"message": "Method Not Allowed"}), 405
@app.route("/surveys/<uuid:survey_id>/",
methods=["GET", "PUT", "PATCH", "DELETE"])
def survey(survey_id):
"""
Manage one survey
"""
if request.method == "GET":
survey = db.Survey.get(survey_id)
return jsonify(survey.to_json())
elif request.method == "PUT":
# TODO: Implement this
return jsonify({"message": "Not Implemented"}), 501
elif request.method == "PATCH":
# TODO: Implement this
return jsonify({"message": "Not Implemented"}), 501
elif request.method == "DELETE":
# TODO: Implement this
return jsonify({"message": "Not Implemented"}), 501
# Requests other than GET, PUT, PATCH or DELETE are not supported
# Not needed with the method kwargs in the route, but it's better to leave
# it just in case someone changes the route without updating the function
return jsonify({"message": "Method Not Allowed"}), 405
@app.route("/surveys/<uuid:survey_id>/tokens/", methods=["POST"])
def survey_token(survey_id):
"""
Manage tokens for a survey
"""
survey = db.Survey.get(db.Survey.id == survey_id)
if request.method == "POST":
# Add a new token
email = request.args.get("email", None)
token = survey.add_token(email=email)
return jsonify({"token": str(token.id)})
# Requests other than POST are not supported
# Not needed with the method kwargs in the route, but it's better to leave
# it just in case someone changes the route without updating the function
return jsonify({"message": "Method Not Allowed"}), 405
|
from gather_data import *
consumer_key = "XXXXX"
consumer_secret = "XXXXX"
access_token = "XXXXX"
access_token_secret = "XXXXX"
def start_tweets_api():
listener = TweetListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, listener)
print stream
while True:
try:
stream.sample(languages=['en'])
except Exception as ex:
print str(ex)
if __name__ == "__main__":
start_tweets_api()
|
# -*- coding: utf-8 -*-
import sys
import contextlib as _contextlib
@_contextlib.contextmanager
def with_sys_path(dirname):
sys.path.insert(0, dirname)
try:
yield
finally:
sys.path.remove(dirname)
|
import logging
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from gwhiz.lib.base import BaseController, render
log = logging.getLogger(__name__)
from gwhiz.model import meta
from gwhiz import model
class KeyController(BaseController):
def list(self,id):
key = meta.Session.query(model.Key).get(id)
c.cat = key
c.works = meta.Session.query(model.Work).filter_by(key=key)
c.movements = meta.Session.query(model.Movement).filter_by(key=key)
#c.works = works.filter(model.Work.key.contains(style)).all()
c.info = 'Viewing by Key: %s'%key.name
return render('/catalog/combinedlist.html')
|
import operator #per riordinare dizionario
class Constraint():
def __init__(self, variables):
self.variables = variables
#abstractmethod
def satisfied(self, assignment):
...
# Eurisiche
def minimum_remaining_values(unassigned_vars, domains): #next Xi
number_of_var = []
for var in unassigned_vars:
count = 0
for value in domains[var]:
count += 1
number_of_var.append(count)
return unassigned_vars[number_of_var.index(min(number_of_var))]
def degree_heuristic(unassigned_vars, constraints): #next Xi
number_of_constraints = []
for var in unassigned_vars:
count = 0
for constraint in constraints[var]:
for variable in constraint.variables:
if variable in unassigned_vars and variable != var: #conto i constraint sulle var non assegnate
count += 1
number_of_constraints.append(count)
return unassigned_vars[number_of_constraints.index(max(number_of_constraints))]
def last_costraining_value(var, domains,constraints, unassigned): #selezione del prossimo elemento del dominio per la data Xi
domain_count = {}
assignment = {}
for value in domains[var]: #Per ogni valore della next_var
count = 0
for constraint in constraints[var]:
for neighbour in constraint.variables:
if neighbour != var: #and neighbour in unassigned:
for n_value in domains[neighbour]:
assignment = {}
assignment[var] = value
assignment[neighbour] = n_value
#verifica vincoli
for constraint in constraints[var]:
if not constraint.satisfied(assignment, var):
count += 1
domain_count[value] = count
if len(assignment) != 0:
sorted_domain_count= sorted(domain_count.items(), key=operator.itemgetter(1))
result = []
for x in sorted_domain_count:
result.append(x[0])
return result
return domains
def foward_checking(csp, assignment, next_var, unassigned, new_domains):
if not csp.consistent(next_var,assignment):
return False
removed_values = {}
for var in csp.variables:
removed_values[var] = []
new_assignment = assignment.copy()
for constraint in csp.constraints[next_var]: #tutti i constraint della next var
for neighbour in constraint.variables: #tutti i vicini dei constraint
to_remove = []
to_remove.clear()
if neighbour != next_var and neighbour in unassigned:
for value in new_domains[neighbour]:
new_assignment[neighbour] = value
#verifica vincoli
if not constraint.satisfied(new_assignment, next_var):
to_remove.append(value)
if len(to_remove)==len(csp.domains[neighbour]):
return False
removed_values[neighbour] = to_remove
#vincolo all diff
for var in csp.variables:
if var != next_var:
if assignment[next_var] not in removed_values[var]:
removed_values[var].append(assignment[next_var])
for v in csp.variables:
if len(new_domains[v])==0:
return False
return removed_values
class CSP():
def __init__(self, variables, domains):
self.variables = variables # variabili che devono essere vincolate
self.domains = domains # dominii delle variabili
self.constraints = {} #vincoli sulle variabili
for variable in self.variables:
self.constraints[variable] = []
def add_constraint(self, constraint):
for variable in constraint.variables:
if variable in self.variables:
self.constraints[variable].append(constraint)
else:
print('Attenzione: ' ,variable,' non definita!')
#controlla se l'assignment soddisfa tutti i constraint per la data variabile
def consistent(self, variable, assignment):
for constraint in self.constraints[variable]:
if not constraint.satisfied(assignment,variable):
return False
return True
def backtracking_search(self, assignment = {}, local_domains = None):
if local_domains == None:
local_domains = self.domains.copy()
# Se vero tutte le variabili sono state assegnate a valori del Dominio
if len(assignment) == len(self.variables):
return assignment
x = []
for v in self.variables:
if v not in assignment:
x.append(v)
unassigned = x
# recupero la prossima variabile da valorizzare secondo una opportuna euristica
'''Cambio Euristica'''
#next_var: V = unassigned[0] #euristica fifo
#next_var: V = minimum_remaining_values(unassigned, self.domains) #euristica minimum_remaining_values
next_var = degree_heuristic(unassigned, self.constraints) #euristica degree_heuristic
#self.domains[next_var] = last_costraining_value(next_var,self.domains, self.constraints,unassigned)
removed = {}
for value in local_domains[next_var]:
new_domains = local_domains.copy()
if removed == False:
removed = {}
for var in removed:
for removed_value in removed[var]:
local_domains[var].append(removed_value)
local_assignment = assignment.copy()
local_assignment[next_var] = value #prendo il primo valore disponibile
#se i vincoli sono consistenti proseguo nel backtracking ricorsivo
#if self.consistent(next_var, local_assignment):
removed = foward_checking(self, local_assignment, next_var, unassigned, local_domains)
if removed != False:
for var in removed:
for removed_value in removed[var]:
if removed_value in local_domains[var]:
local_domains[var].remove(removed_value)
result = self.backtracking_search(local_assignment, local_domains)
if result is not None: #se result = None termino backtracking_search
return result
return None |
import json
import falcon
class JSONResource(object):
def on_get(self, request, response):
response.body = json.dumps({'message': 'Hello, world!'})
app = falcon.API()
app.add_route("/json", JSONResource())
|
# Get the good stuff
import redis, json, mimeparse, os, sys
from bottle import route, run, request, response, abort
config = { 'servers': [{ 'host': 'localhost', 'port': 6379 }] }
if (len(sys.argv) > 1):
config = json.loads(sys.argv[1])
# Connect to a single Redis instance
client = redis.StrictRedis(host=config['servers'][0]['host'], port=config['servers'][0]['port'], db=0)
# Add a route for a user updating their rating of something which can be accessed as:
# curl -XPUT -H'Content-type: application/json' -d'{ "rating": 5, "source": "charles" }' http://localhost/rating/bob
# Response is a JSON object specifying the new rating for the entity:
# { rating: 5 }
@route('/rating/<entity>', method='PUT')
def put_rating(entity):
# Check to make sure JSON is ok
type = mimeparse.best_match(['application/json'], request.headers.get('Accept'))
if not type: return abort(406)
# Check to make sure the data we're getting is JSON
if request.headers.get('Content-Type') != 'application/json': return abort(415)
response.headers.append('Content-Type', type)
# Read in the data
data = json.load(request.body)
rating = data.get('rating')
source = data.get('source')
# Basic sanity checks on the rating
if isinstance(rating, int): rating = float(rating)
if not isinstance(rating, float): return abort(400)
# Update the rating for the entity
key = '/rating/'+entity
#client.set(key, rating)
client.hmset(source, {'tea':entity,'rating':rating })
#r.hmset('neha', {'tea':'a','rating':20,'avg':15})
#r.hmset('ted', {'tea':'b','rating':30,'avg':30})
# Return the new rating for the entity
return {
"rating": rating
}
# Add a route for getting the aggregate rating of something which can be accesed as:
# curl -XGET http://localhost/rating/bob
# Response is a JSON object specifying the rating for the entity:
# { rating: 5 }
@route('/rating/<entity>', method='GET')
def get_rating(entity):
keys = client.keys('*')
prev_sum=0
count=0
search_tea = entity
for key in keys:
if client.type(key) == 'hash':
#vals = client.hgetall(key)
#print float(client.hmget(key,'rating'))
rate = float(client.hmget(key,'rating')[0])
#print client.type(key),
tea = client.hmget(key,'tea')[0]
if tea == search_tea:
#print 'calculate tea : ', tea, rate
prev_sum=prev_sum+rate
count=count+1
if count ==0:
avg_rate = 0
else:
avg_rate=prev_sum/count
return {
"rating": avg_rate
}
# Add a route for deleting all the rating information which can be accessed as:
# curl -XDELETE http://localhost/rating/bob
# Response is a JSON object showing the new rating for the entity (always null)
# { rating: null }
@route('/rating/<entity>', method='DELETE')
def delete_rating(entity):
count = client.delete('/rating/'+entity)
if count == 0: return abort(404)
return { "rating": None }
# Fire the engines
if __name__ == '__main__':
run(host='0.0.0.0', port=os.getenv('PORT', 2500), quiet=True)
|
# Generated by Django 3.2.4 on 2021-06-27 02:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='atom',
name='Serie',
field=models.CharField(choices=[('', ''), ('Alkali metals', 'Alkali metals'), ('Alkaline earth metals', 'Alkaline earth metals'), ('Lanthanoids', 'Lanthanoids'), ('Actinoids', 'Actinoids'), ('Transition metals', 'Transition metals'), ('Post-transition metals', 'Post-transition metals'), ('Metalloids', 'Metalloids'), ('Reactive nonmetals', 'Reactive nonmetals'), ('Noble gases', 'Noble gases')], max_length=200),
),
]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import argparse
from dataloader.dataloader import InriaDataset
# UNet Model
# UNet Fonctions ----------------------------------------------------------------------------------
# Double Conv2D
def conv_block(in_channel, out_channel):
"""
in_channel : number of input channel, int
out_channel : number of output channel, int
Returns : Conv Block of 2x Conv2D with ReLU
"""
conv = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=3,padding=1),
nn.ReLU(inplace= True),
nn.Conv2d(out_channel, out_channel, kernel_size=3,padding=1),
nn.ReLU(inplace= True),
)
return conv
# crop the image(tensor) to equal size, half left side image concatenate with right side image
def crop(target_tensor, tensor): # x,c
"""
target_tensor : target the tensor to crop
tensor: tensor
Returns : tensor cropped by half left side image concatenate with right side image
"""
target_size = target_tensor.size()[2]
tensor_size = tensor.size()[2]
delta = tensor_size - target_size
delta = delta // 2
if (tensor_size - 2*delta)%2 == 0:
tens = tensor[:, :, delta:tensor_size- delta , delta:tensor_size-delta]
elif (tensor_size -2*delta)%2 ==1:
tens = tensor[:, :, delta:tensor_size- delta -1 , delta:tensor_size-delta -1]
return tens
class EncoderBlock(nn.Module):
def __init__(self,input_channel, output_channel,depth,n_block):
super(EncoderBlock,self).__init__()
self.input_channel = input_channel
self.output_channel = output_channel
self.depth = depth
self.n_block = n_block
self.conv = conv_block(self.input_channel, self.output_channel)
self.pool = nn.MaxPool2d(kernel_size = 2, stride = 2)
# weight initialization
self.conv[0].apply(self.init_weights)
def init_weights(self,layer): #gaussian init for the conv layers
nn.init.kaiming_normal_(layer.weight, mode='fan_out', nonlinearity='relu')
def forward(self,x):
c = self.conv(x)
if self.depth != self.n_block :
y = self.pool(c)
else :
y = self.conv(x)
return y,c
class DecoderBlock(nn.Module):
def __init__(self,input_channel, output_channel):
super(DecoderBlock,self).__init__()
self.input_channel = input_channel
self.output_channel = output_channel
self.conv_t = nn.ConvTranspose2d(self.input_channel,self.output_channel, kernel_size= 2, stride=2)
self.conv = conv_block(self.input_channel,self.output_channel)
self.conv[0].apply(self.init_weights)
def init_weights(self,layer): #gaussian init for the conv layers
nn.init.kaiming_normal_(layer.weight, mode='fan_out', nonlinearity='relu')
def forward(self,x,skip):
u = self.conv_t(x)
concat =torch.cat([u,skip],1)
x = self.conv(concat)
return x
# UNet Fonctions END ----------------------------------------------------------------------------------
# Original UNet ---------------------------------------------------------------------------------------
class OriginalUNet(nn.Module):
"""
UNet network for semantic segmentation
"""
def __init__(self, n_channels, conv_width, n_class, cuda = 1):
"""
initialization function
n_channels, int, number of input channel
conv_width, int list, depth of the convs
n_class = int, the number of classes
"""
super(OriginalUNet, self).__init__() #necessary for all classes extending the module class
self.is_cuda = cuda
self.n_class = n_class
## Encoder
# Conv2D (input channel, outputchannel, kernel size)
self.c1 = conv_block(3,16)
self.p1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c2 = conv_block(16,32)
self.p2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c3 = conv_block(32,64)
self.p3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c4 = conv_block(64,128)
self.p4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c5 = conv_block(128,256)
## Decoder
# Transpose & UpSampling Convblock
self.t6 = nn.ConvTranspose2d(256,128, kernel_size= 2, stride=2)
self.c6 = conv_block(256,128)
self.t7 = nn.ConvTranspose2d(128,64, kernel_size=2, stride=2)
self.c7 = conv_block(128,64)
self.t8 = nn.ConvTranspose2d(64,32, kernel_size=2, stride=2)
self.c8 = conv_block(64,32)
self.t9 = nn.ConvTranspose2d(32,16, kernel_size=2, stride=2)
self.c9 = conv_block(32,16)
# Final Classifyer layer
self.outputs = nn.Conv2d(16, n_class, kernel_size= 1)
#weight initialization
self.c1[0].apply(self.init_weights)
self.c2[0].apply(self.init_weights)
self.c3[0].apply(self.init_weights)
self.c4[0].apply(self.init_weights)
self.c5[0].apply(self.init_weights)
self.c6[0].apply(self.init_weights)
self.c7[0].apply(self.init_weights)
self.c8[0].apply(self.init_weights)
self.c9[0].apply(self.init_weights)
if cuda: #put the model on the GPU memory
self.cuda()
def init_weights(self,layer): #gaussian init for the conv layers
nn.init.kaiming_normal_(layer.weight, mode='fan_out', nonlinearity='relu')
def forward(self, input):
"""
the function called to run inference
"""
if self.is_cuda: #put data on GPU
input = input.cuda()
# Encoder (Left Side)
c1=self.c1(input)
p1=self.p1(c1)
c2=self.c2(p1)
p2=self.p2(c2)
c3=self.c3(p2)
p3=self.p3(c3)
c4=self.c4(p3)
p4=self.p4(c4)
c5=self.c5(p4)
# Decoder (Right Side)
u6=self.t6(c5)
y4 = crop(u6,c4)
concat4 = torch.cat([u6,y4],1)
x6=self.c6(concat4)
u7=self.t7(x6)
y3 = crop(u7,c3)
x7=self.c7(torch.cat([u7,y3],1))
u8=self.t8(x7)
y2 = crop(u8,c2)
x8=self.c8(torch.cat([u8,y2],1))
u9=self.t9(x8)
y1=crop(u9,c1)
x9=self.c9(torch.cat([u9,y1],1))
# Final Output Layer
out = self.outputs(x9)
return out
#--------------------------------------------------------------------------------------------------------
# Generic UNet :
#- Choix possible Block
#- Nombres d'étapes
#- Utilisation Batchnormes & Dropout
class GenericUNet(nn.Module):
"""
UNet network for semantic segmentation
"""
def __init__(self, n_channels, conv_width, n_class, n_block, cuda = 1):
"""
initialization function
n_channels, int, number of input channel
conv_width, int list, depth of the convs
n_class = int, the number of classes
n_block = int, the number of blocks
"""
super(GenericUNet, self).__init__() #necessary for all classes extending the module class
self.is_cuda = cuda
self.n_class = n_class
self.n_block = n_block
self.conv_width = conv_width
self.enc = []
self.dec = []
#-------------------------------------------------------------
## Encoder
# Conv2D (input channel, outputchannel, kernel size)
for i in range(self.n_block):
self.enc.append(EncoderBlock(self.conv_width[i],self.conv_width[i+1],i+1,self.n_block))
#--------------------------------------------------------------
self.enc = nn.ModuleList(self.enc)
## Decoder
# Transpose & UpSampling Convblock
for i in range(self.n_block-1):
self.dec.append(DecoderBlock(self.conv_width[self.n_block+i],self.conv_width[self.n_block+i+1]))
self.dec = nn.ModuleList(self.dec)
# Final Classifyer layer
self.outputs = nn.Conv2d(self.conv_width[-1], self.n_class, kernel_size= 1)
if cuda: #put the model on the GPU memory
self.cuda()
def init_weights(self,layer): #gaussian init for the conv layers
nn.init.kaiming_normal_(layer.weight, mode='fan_out', nonlinearity='relu')
def forward(self, input):
"""
the function called to run inference
"""
if self.is_cuda: #put data on GPU
input = input.cuda()
#-------------------------------------------------
# Encoder (Left Side)
enc = []
skip = []
for i in range(self.n_block):
if i == 0:
enc.append(self.enc[i](input)[0])
skip.append(self.enc[i](input)[1])
else :
enc.append(self.enc[i](enc[i-1])[0])
skip.append(self.enc[i](enc[i-1])[1])
#--------------------------------------------------
# Decoder (Right Side)
dec = []
for i in range(self.n_block-1):
if i==0:
dec.append(self.dec[i](skip[self.n_block -1 -i],skip[self.n_block -2 -i]))
else :
dec.append(self.dec[i](dec[i-1],skip[self.n_block -2 -i]))
# Final Output Layer
out = self.outputs(dec[-1])
return out
# Generic UNet with encodeur decoder class -------------------------------------------------------------------------
#-------------------------------------------------------------
# Encodeur
class GenericUNetEncoder(nn.Module):
"""
UNet network for semantic segmentation
"""
def __init__(self, n_channels, conv_width, n_class, n_block, cuda = 1):
"""
initialization function
n_channels, int, number of input channel
conv_width, int list, depth of the convs
n_class = int, the number of classes
n_block = int, the number of blocks
"""
super(GenericUNetEncoder, self).__init__() #necessary for all classes extending the module class
self.is_cuda = cuda
self.n_class = n_class
self.n_block = n_block
self.conv_width = conv_width
self.enc = []
# Conv2D (input channel, outputchannel, kernel size)
for i in range(self.n_block):
self.enc.append(EncoderBlock(self.conv_width[i],self.conv_width[i+1],i+1,self.n_block))
self.enc = nn.ModuleList(self.enc)
if cuda: #put the model on the GPU memory
self.cuda()
def init_weights(self,layer): #gaussian init for the conv layers
nn.init.kaiming_normal_(layer.weight, mode='fan_out', nonlinearity='relu')
def forward(self, input):
"""
the function called to run inference
"""
if self.is_cuda: #put data on GPU
input = input.cuda()
enc = []
skip = []
for i in range(self.n_block):
if i == 0:
enc.append(self.enc[i](input)[0])
skip.append(self.enc[i](input)[1])
else :
enc.append(self.enc[i](enc[i-1])[0])
skip.append(self.enc[i](enc[i-1])[1])
return enc, skip
#-------------------------------------------------------------
# Decoder
class GenericUNetDecoder(nn.Module):
"""
UNet network for semantic segmentation
"""
def __init__(self, n_channels, conv_width, n_class, n_block,encoder, cuda = 1):
"""
initialization function
n_channels, int, number of input channel
conv_width, int list, depth of the convs
n_class = int, the number of classes
n_block = int, the number of blocks
"""
super(GenericUNetDecoder, self).__init__() #necessary for all classes extending the module class
self.is_cuda = cuda
self.n_class = n_class
self.n_block = n_block
self.conv_width = conv_width
self.skip = encoder[1]
self.dec= []
## Decoder
# Transpose & UpSampling Convblock
for i in range(self.n_block-1):
self.dec.append(DecoderBlock(self.conv_width[self.n_block+i],self.conv_width[self.n_block+i+1]))
self.dec = nn.ModuleList(self.dec)
# Final Classifyer layer
self.outputs = nn.Conv2d(self.conv_width[-1], self.n_class, kernel_size= 1)
if cuda: #put the model on the GPU memory
self.cuda()
def init_weights(self,layer): #gaussian init for the conv layers
nn.init.kaiming_normal_(layer.weight, mode='fan_out', nonlinearity='relu')
def forward(self, input):
"""
the function called to run inference
"""
dec = []
for i in range(self.n_block-1):
if i==0:
dec.append(self.dec[i](self.skip[self.n_block -1 -i],self.skip[self.n_block -2 -i]))
else :
dec.append(self.dec[i](dec[i-1],self.skip[self.n_block -2 -i]))
# Final Output Layer
out = self.outputs(dec[-1])
return out
class GenericUNetClass(nn.Module):
"""
UNet network for semantic segmentation
"""
def __init__(self, n_channels, conv_width, n_class, n_block,encoder, decoder,cuda = 1):
"""
initialization function
n_channels, int, number of input channel
conv_width, int list, depth of the convs
n_class = int, the number of classes
n_block = int, the number of blocks
"""
super(GenericUNetClass, self).__init__() #necessary for all classes extending the module class
self.is_cuda = cuda
self.n_class = n_class
self.n_block = n_block
self.conv_width = conv_width
self.encoder = encoder
self.decoder = decoder
def init_weights(self,layer): #gaussian init for the conv layers
nn.init.kaiming_normal_(layer.weight, mode='fan_out', nonlinearity='relu')
def forward(self, input):
"""
the function called to run inference
"""
pred_encoder = self.encoder(input)
decoder = self.decoder
pred = decoder(pred_encoder)
return pred
|
# microphone
import pyaudio
# button
import RPi.GPIO as GPIO
# lights
from lights import Lights
# environment
import os
import requests
from google_streaming.ordering import OrderingRecording
BUTTON = 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON, GPIO.IN)
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = './google_speech_credentials.json'
url = "http://138.68.71.39:3000/orderkeywords"
headers = {
'Content-Type': "application/json",
'Cache-Control': "no-cache"
}
menu_keywords = requests.request("GET", url, headers=headers)
def main():
lights = Lights(3)
button = False
voice_rec_thread = OrderingRecording(pyaudio.PyAudio(), menu_keywords)
print('ready')
while True:
state = GPIO.input(BUTTON)
if not state and not button:
voice_rec_thread.start()
button = True
if state and button:
lights.change(255, 0, 0)
voice_rec_thread.stop()
voice_rec_thread.join(5)
voice_rec_thread = OrderingRecording(pyaudio.PyAudio(), menu_keywords)
lights.change(0, 0, 0)
button = False
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
#-*- codig: utf-8 -*-
import sys
import requests
import json
client_id = "i4gi2fi00r"
client_secret = "R1xBbgeApgGYZ90ZJFbmpD7bkGYCifd093xeczkW"
headers = {
"X-NCP-APIGW-API-KEY-ID": client_id,
"X-NCP-APIGW-API-KEY": client_secret,
"Content-Type": "application/json"
}
language = "ko" # Language of document (ko, ja )
model = "news" # Model used for summaries (general, news)
tone = "2" # Converts the tone of the summarized result. (0, 1, 2, 3)
summaryCount = "3" # This is the number of sentences for the summarized document.
url= "https://naveropenapi.apigw.ntruss.com/text-summary/v1/summarize"
title= "'하루 2000억' 판 커지는 간편송금 시장"
content = "간편송금 이용금액이 하루 평균 2000억원을 넘어섰다. 한국은행이 17일 발표한 '2019년 상반기중 전자지급서비스 이용 현황'에 따르면 올해 상반기 간편송금서비스 이용금액(일평균)은 지난해 하반기 대비 60.7% 증가한 2005억원으로 집계됐다. 같은 기간 이용건수(일평균)는 34.8% 늘어난 218만건이었다. 간편 송금 시장에는 선불전자지급서비스를 제공하는 전자금융업자와 금융기관 등이 참여하고 있다. 이용금액은 전자금융업자가 하루평균 1879억원, 금융기관이 126억원이었다. 한은은 카카오페이, 토스 등 간편송금 서비스를 제공하는 업체 간 경쟁이 심화되면서 이용규모가 크게 확대됐다고 분석했다. 국회 정무위원회 소속 바른미래당 유의동 의원에 따르면 카카오페이, 토스 등 선불전자지급서비스 제공업체는 지난해 마케팅 비용으로 1000억원 이상을 지출했다. 마케팅 비용 지출규모는 카카오페이가 491억원, 비바리퍼블리카(토스)가 134억원 등 순으로 많았다."
data = {
"document": {
"title": title,
"content" : content
},
"option": {
"language": language,
"model": model,
"tone": tone,
"summaryCount" : summaryCount
}
}
print(json.dumps(data, indent=4, sort_keys=True))
response = requests.post(url, data=json.dumps(data), headers=headers)
rescode = response.status_code
if(rescode == 200):
print (response.text)
else:
print("Error : " + response.text) |
import sys
import numpy as np
from astropy.table import Table
import statsmodels.api as sm
from bow_projection import Spline_R_theta_from_grid
class Dragoid(object):
def __init__(self, alpha, mu=None, lowess_frac=None):
if mu is None:
astring = 'dust-couple-stream'
else:
astring = 'dust-couple-div-stream'
astring += f'-alpha{int(100*alpha):03d}'
if mu is not None:
astring += f'-mu{int(100*mu):03d}'
astring += '.tab'
self.label = fr"$\alpha_\mathrm{{drag}} = {alpha:.02f}$"
if mu is not None:
self.label += ', ' + fr"$\mu = {mu:.02f}$"
t = Table.read(astring, format='ascii.tab')
dth = np.pi/len(t)
self.thgrid = t['theta'] + 0.5*dth
self.Rgrid = t['R']/t['R'][0]
self.thgrid = np.concatenate([-self.thgrid[::-1], self.thgrid])
self.Rgrid = np.concatenate([self.Rgrid[::-1], self.Rgrid])
if lowess_frac is not None:
# Optionally smooth the shape before fitting spline
Rsmooth = sm.nonparametric.lowess(
self.Rgrid, self.thgrid, frac=lowess_frac,
is_sorted=True, return_sorted=False)
# Gradually transition between smooth version for low
# theta and the original version for theta > 60.0 deg
smooth_mix = np.exp(-(self.thgrid/np.radians(45.0))**2)
self.Rgrid = self.Rgrid*(1. - smooth_mix) + Rsmooth*smooth_mix
self.splinefit = Spline_R_theta_from_grid(
theta_grid=self.thgrid, R_grid=self.Rgrid)
def __call__(self, theta):
# When called as a function, give the spline fitted result
return self.splinefit(theta)
if __name__ == "__main__":
from matplotlib import pyplot as plt
import seaborn as sns
lib_name = sys.argv[0].replace('.py', '')
figfile = f"test_{lib_name}_radius.pdf"
sns.set_style('ticks')
fig, ax = plt.subplots()
th = np.linspace(-np.pi, np.pi, 1001)
th_dg = np.degrees(th)
alphas = [0.25, 0.5, 1.0, 2.0] + [4.0, 4.0]
mus = [None]*4 + [0.2, 0.8]
for alpha, mu in zip(alphas, mus):
shape = Dragoid(alpha=alpha, mu=mu, lowess_frac=0.1)
ax.plot(np.degrees(shape.thgrid), shape.Rgrid,
color='b', alpha=0.2, lw=2, label='_nolabel_')
ax.plot(th_dg, shape(th), lw=0.8, label=shape.label)
ax.legend(title=r"Dragoid shapes")
ax.set(
xlabel=r"Polar angle: $\theta$, degrees",
ylabel=r"$R$",
xlim=[0, 180],
yscale='log',
ylim=[0.9, 200.0],
xticks=[0, 30, 60, 90, 120, 150, 180],
)
sns.despine()
fig.tight_layout()
fig.savefig(figfile)
print(figfile, end='')
|
from django.contrib import admin
from .models import *
@admin.register(Provincialstaff,Districtstaff,Staff,Title,Position,Userlevel)
class ViewAdmin(admin.ModelAdmin):
pass
|
from sqlalchemy.orm.exc import NoResultFound
from bitcoin_acks.database import session_scope
from bitcoin_acks.github_data.github_data import GitHubData
from bitcoin_acks.github_data.graphql_queries import user_graphql_query
from bitcoin_acks.logging import log
from bitcoin_acks.models import Users
class UsersData(GitHubData):
def get(self, login: str) -> dict:
variables = {
'userLogin': login
}
json_object = {
'query': user_graphql_query,
'variables': variables
}
log.debug('getting user', json_object=json_object)
r = self.graphql_post(json_object=json_object)
user = r.json()['data']['user']
return user
def upsert(self, data: dict) -> str:
with session_scope() as session:
try:
user_record = (
session.query(Users)
.filter(Users.login == data['login'])
.one()
)
except NoResultFound:
# if the login is not in the db, query github to get the ID
data = self.get(login=data['login'])
try:
user_record = (
session.query(Users)
.filter(Users.id == data['id'])
.one()
)
except NoResultFound:
user_record = Users()
user_record.id = data['id']
session.add(user_record)
for key, value in data.items():
setattr(user_record, key, value)
session.commit()
return user_record.id
|
# -*- coding: utf-8 -*-
from const import *
import sys, os, time
import json, struct
from errors import err
from config import config
class request:
def __init__(self, data):
if(isinstance(data, dict) == False):
return False
self.version = '2013111910'
self.uid = data["uuid"]
self.service = data["service"]
self.method = data["method"]
self.params = data["params"]
self.params['uuid'] = self.uid
def dump(self):
# obj = {"uid": self.uid, "service": self.service, "method": self.method, "version": str(self.version), "params": self.params}
obj = {"uid": self.uid, "method": self.method, "params": self.params}
self.format(obj['params'])
try:
encodedjson = json.dumps(obj)
encodedjson = (encodedjson + config['GAME']['msgsuffix'])
encodedjson = struct.pack(str(len(encodedjson)) + 's', encodedjson)
return encodedjson
except:
print 'json.dumps except', obj
def format(self, obj):
self.__format(obj)
if isinstance(obj, dict) == True:
for key, val in obj.items():
if isinstance(val, dict) == True:
self.__format(obj[key])
elif isinstance(val, list) == True:
for k, v in enumerate(obj[key]):
self.__format(obj[key][k])
else:
pass
def __format(self, obj):
if isinstance(obj, dict)== True:
for key, val in obj.items():
c= 0
if key== 'uniqid':
c= 1
obj[V_UNIQID]= val
elif key== 'locaX':
c= 1
obj[V_INDEX]= val
elif key== 'status':
c= 1
obj[V_STATUS]= val
elif key== 'type':
c= 1
obj[V_TYPE]= val
elif key== 'armor':
c= 1
obj[V_ARMOR]= val
elif key== 'aTime':
c= 1
obj[V_ATTACK_TIME]= val
elif key== 'sTime':
c= 1
obj[V_HERO_SKILL_TIME]= val
elif key== 'weapon':
c= 1
obj[V_WEAPON]= val
elif key== 'desktopOpp':
c= 1
obj[V_DESKTOP_OPP]= val
elif key== 'desktopSelf':
c= 1
obj[V_DESKTOP_SELF]= val
elif key== 'skillCardId':
c= 1
obj[V_SKILL_CARD_ID]= val
elif key== 'crystal':
c= 1
obj[V_CRYSTAL]= val
if c== 1:
del obj[key]
if __name__ == '__main__':
# c= request({})
print request.formatObj
print request.doFormatObj
|
from django.contrib.auth.models import User
from django.db import models
from edtech.models.choices import Choice
from edtech.models.questions import Question
from djutil.models import TimeStampedModel
from edtech.models.mixins import DefaultPermissions
class UserQuestionAnswer(TimeStampedModel, DefaultPermissions):
user = models.ForeignKey(User)
question = models.ForeignKey(Question)
choice = models.ForeignKey(Choice, null=True)
is_correct = models.BooleanField(default=False)
session_end = models.BooleanField(default=False)
|
from office365.directory.directoryObject import DirectoryObject
from office365.directory.directoryObjectCollection import DirectoryObjectCollection
from office365.onedrive.drive import Drive
from office365.outlook.contact_collection import ContactCollection
from office365.outlook.event_collection import EventCollection
from office365.outlook.message_collection import MessageCollection
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.teams.team_collection import TeamCollection
class User(DirectoryObject):
"""Represents an Azure AD user account. Inherits from directoryObject."""
def delete_object(self, permanent_delete=False):
"""
:param permanent_delete: Permanently deletes the user from directory
:type permanent_delete: bool
"""
super(User, self).delete_object()
if permanent_delete:
deleted_user = self.context.directory.deletedUsers[self.id]
deleted_user.delete_object()
return self
@property
def drive(self):
"""Retrieve the properties and relationships of a Drive resource."""
if self.is_property_available('drive'):
return self.properties['drive']
else:
return Drive(self.context, ResourcePath("drive", self.resource_path))
@property
def contacts(self):
"""Get a contact collection from the default Contacts folder of the signed-in user (.../me/contacts),
or from the specified contact folder."""
if self.is_property_available('contacts'):
return self.properties['contacts']
else:
return ContactCollection(self.context, ResourcePath("contacts", self.resource_path))
@property
def events(self):
"""Get an event collection or an event."""
if self.is_property_available('events'):
return self.properties['events']
else:
return EventCollection(self.context, ResourcePath("events", self.resource_path))
@property
def messages(self):
"""Get an event collection or an event."""
if self.is_property_available('messages'):
return self.properties['messages']
else:
return MessageCollection(self.context, ResourcePath("messages", self.resource_path))
def send_mail(self, message):
"""Send a new message on the fly"""
qry = ServiceOperationQuery(self, "sendmail", None, message)
self.context.add_query(qry)
return self
@property
def joinedTeams(self):
"""Get the teams in Microsoft Teams that the user is a direct member of."""
if self.is_property_available('joinedTeams'):
return self.properties['joinedTeams']
else:
return TeamCollection(self.context, ResourcePath("joinedTeams", self.resource_path))
@property
def memberOf(self):
"""Get groups and directory roles that the user is a direct member of."""
if self.is_property_available('memberOf'):
return self.properties['memberOf']
else:
return DirectoryObjectCollection(self.context, ResourcePath("memberOf", self.resource_path))
@property
def transitiveMemberOf(self):
"""Get groups, directory roles that the user is a member of. This API request is transitive, and will also
return all groups the user is a nested member of. """
if self.is_property_available('transitiveMemberOf'):
return self.properties['transitiveMemberOf']
else:
return DirectoryObjectCollection(self.context, ResourcePath("transitiveMemberOf", self.resource_path))
def set_property(self, name, value, persist_changes=True):
super(User, self).set_property(name, value, persist_changes)
# fallback: create a new resource path
if self._resource_path is None:
if name == "id" or name == "userPrincipalName":
self._resource_path = ResourcePath(
value,
self._parent_collection.resource_path)
return self
|
from pages.base import BasePage
from utilites.locators import LoginPageLocators
from utilites.static_data import BMCData
"""
This Login Page Class File in responsible for login into the home page.
So this will require username & password from the PageLocators Class.
written by: jiaul_islam
"""
class LoginPage(BasePage):
def __init__(self, driver) -> None:
super().__init__(driver)
def enter_username_textbox(self) -> None:
""" Search & Enter the data in username textbox """
self._driver.find_element(*LoginPageLocators.USERNAME_TEXTBOX).clear()
self.write(LoginPageLocators.USERNAME_TEXTBOX, BMCData.USERNAME)
def enter_password_textbox(self) -> None:
""" Search & Enter the data in password textbox """
self._driver.find_element(*LoginPageLocators.PASSWORD_TEXTBOX).clear()
self.write(LoginPageLocators.PASSWORD_TEXTBOX, BMCData.PASSWORD)
def click_login_button(self) -> None:
""" Click the Login Button on login page """
self.click(LoginPageLocators.LOGIN_BUTTON)
|
# Generated by Django 2.0.3 on 2018-05-04 10:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0030_singlevideo_views'),
]
operations = [
migrations.RemoveField(
model_name='singlevideo',
name='views',
),
]
|
print("CHild Branch Repo")
|
# -*- coding: utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def tree2str(self, t):
if t is None:
return ""
result = [str(t.val)]
if t.left is not None or t.right is not None:
result.extend(["(", self.tree2str(t.left), ")"])
if t.right is not None:
result.extend(["(", self.tree2str(t.right), ")"])
return "".join(result)
if __name__ == "__main__":
solution = Solution()
t0_0 = TreeNode(1)
t0_1 = TreeNode(2)
t0_2 = TreeNode(3)
t0_3 = TreeNode(4)
t0_1.left = t0_3
t0_0.right = t0_2
t0_0.left = t0_1
assert "1(2(4))(3)" == solution.tree2str(t0_0)
t1_0 = TreeNode(1)
t1_1 = TreeNode(2)
t1_2 = TreeNode(3)
t1_3 = TreeNode(4)
t1_1.right = t1_3
t1_0.right = t1_2
t1_0.left = t1_1
assert "1(2()(4))(3)" == solution.tree2str(t1_0)
|
a = input("enter the name=")
b = int(input("marks in english="))
c = int(input("marks in accounts="))
d = int(input("marks in economics="))
e = int(input("marks in buisness="))
f = int(input("marks in IP="))
total = b+c+d+e+f
n = (total/500)*100
if n>=90:
print("A Grade")
elif n>=80:
print("B Grade")
elif n>=70:
print("C Grade")
elif n>=60:
print("D Grade")
elif n>=40:
print("E Grade")
else:
print("F Grade")
|
# Generated by Django 3.1 on 2020-10-08 12:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prefLabel', models.CharField(max_length=100)),
('identifier', models.URLField(verbose_name='Identifier')),
],
),
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('headline', models.CharField(max_length=100)),
('body', models.TextField()),
('image_url', models.URLField(blank=True, null=True, verbose_name='Image URL')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='KnownRisk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='PersonAndOrganization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='Email')),
('website', models.URLField(blank=True, verbose_name='Website')),
],
),
migrations.CreateModel(
name='RiskMitigator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='RiskModifier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='RouteAccessRestrictionTerm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='RouteDesignation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=250)),
('url', models.URLField(verbose_name='Formal Definition URL')),
],
),
migrations.CreateModel(
name='RouteDesignationTerm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('term', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='RouteGuide',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, verbose_name='Name')),
('url', models.URLField(blank=True, verbose_name='Trackback URL')),
('date_published', models.DateField(null=True, verbose_name='Date Published')),
('date_modified', models.DateField(null=True, verbose_name='Date Modified')),
('description', models.TextField(blank=True, verbose_name='Description')),
('headline', models.CharField(blank=True, max_length=200, null=True, verbose_name='Headline (Brief Description)')),
('distance', models.CharField(max_length=9, verbose_name='Distance')),
('is_loop', models.BooleanField(blank=True, default=True, null=True, verbose_name='Is Loop')),
('id_as_url', models.URLField(verbose_name='ID (URL)')),
('activity', models.ManyToManyField(blank=True, to='protoroute.Activity')),
('additional_info', models.ManyToManyField(blank=True, related_name='additional_info', to='protoroute.Article', verbose_name='Additional Info')),
('author', models.ManyToManyField(blank=True, null=True, to='protoroute.PersonAndOrganization', verbose_name='Author')),
('categories', models.ManyToManyField(blank=True, related_name='categories', to='protoroute.Category', verbose_name='Category')),
],
),
migrations.CreateModel(
name='RouteGuideSegment',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, verbose_name='Name')),
('url', models.URLField(blank=True, null=True, verbose_name='Trackback URL')),
('date_published', models.DateField(blank=True, verbose_name='Date Published')),
('date_modified', models.DateField(blank=True, verbose_name='Date Modified')),
('description', models.TextField(blank=True, verbose_name='Description')),
('headline', models.CharField(blank=True, max_length=200, verbose_name='Headline (Brief Description)')),
('is_loop', models.BooleanField(default=True, verbose_name='Is Loop')),
('id_as_url', models.URLField(verbose_name='ID (URL)')),
('sequence', models.IntegerField(verbose_name='Segment Number')),
('activity', models.ManyToManyField(to='protoroute.Activity')),
('additional_info', models.ManyToManyField(blank=True, related_name='seg_additional_info', to='protoroute.Article', verbose_name='Additional Info')),
('author', models.ManyToManyField(to='protoroute.PersonAndOrganization', verbose_name='Author')),
],
),
migrations.CreateModel(
name='RoutePoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('is_access_point', models.BooleanField()),
('is_preferred_access_point', models.BooleanField(verbose_name='Is Preferred Access Point')),
('description', models.TextField(verbose_name='Description')),
('headline', models.CharField(blank=True, max_length=200, null=True, verbose_name='Headline (Brief Description)')),
('same_as', models.URLField(blank=True, null=True, verbose_name='Same As')),
('is_start_point', models.BooleanField(default=False, verbose_name='Is Start Point')),
('is_end_point', models.BooleanField(default=False, verbose_name='Is End Point')),
],
),
migrations.CreateModel(
name='SuggestedEquipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Surface',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surface', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='VerificationRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_verified', models.DateField(verbose_name='Date Verified')),
('route_guide', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_verification_record', to='protoroute.routeguide')),
('route_guide_segment', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_verification_record', to='protoroute.routeguidesegment')),
('verified_by', models.ManyToManyField(to='protoroute.PersonAndOrganization', verbose_name='Verified By')),
],
),
migrations.CreateModel(
name='UserGeneratedContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spatial_coverage', models.CharField(max_length=500)),
('associated_media', models.CharField(max_length=500)),
('accountable_person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='protoroute.personandorganization')),
('creator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_by', to='protoroute.personandorganization')),
('route_guide', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_generated_content', to='protoroute.routeguide')),
],
),
migrations.CreateModel(
name='TransportNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transport_mode', models.CharField(choices=[('Bus', 'Bus'), ('Rail', 'Rail'), ('Road', 'Road'), ('Foot', 'Foot'), ('Bicycle', 'Bicycle')], max_length=100)),
('description', models.CharField(max_length=500)),
('routepoint', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rp_transport_note', to='protoroute.routepoint')),
],
),
migrations.CreateModel(
name='RouteSegmentGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('id_as_url', models.URLField(verbose_name='@id')),
('name', models.CharField(max_length=100)),
('description', models.CharField(max_length=250)),
('alternatives', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seg_route_segment_group', to='protoroute.routesegmentgroup', verbose_name='Alternative Group To')),
('segments', models.ManyToManyField(related_name='rg_route_segment_group', to='protoroute.RouteGuideSegment', verbose_name='Includes Segments')),
],
),
migrations.CreateModel(
name='RouteRiskAdvisory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('risk_description', models.CharField(max_length=250)),
('user_safety_feedback', models.CharField(max_length=500)),
('is_maintained', models.BooleanField()),
('risk_information_url', models.URLField()),
('traffic_description', models.CharField(max_length=500)),
('known_risk', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='protoroute.knownrisk')),
('maintained_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='maintains', to='protoroute.personandorganization', verbose_name='Is Maintained By')),
('risk_mitigator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='protoroute.riskmitigator')),
('risk_modifier', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='protoroute.riskmodifier')),
('route_guide', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_risk_advisory', to='protoroute.routeguide')),
('route_guide_segment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_risk_advisory', to='protoroute.routeguidesegment')),
],
),
migrations.CreateModel(
name='RouteLegalAdvisory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=250)),
('legal_defurl', models.URLField(verbose_name='Legal Definition URL')),
('route_designation', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='protoroute.routedesignation', verbose_name='Route Designation')),
('route_guide', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_legal_advisory', to='protoroute.routeguide')),
('route_guide_segment', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_legal_advisory', to='protoroute.routeguidesegment')),
],
),
migrations.AddField(
model_name='routeguidesegment',
name='point_of_interest',
field=models.ManyToManyField(blank=True, to='protoroute.RoutePoint'),
),
migrations.AddField(
model_name='routeguidesegment',
name='route_guide',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_route_guide', to='protoroute.routeguide'),
),
migrations.AddField(
model_name='routeguide',
name='route_point',
field=models.ManyToManyField(blank=True, to='protoroute.RoutePoint'),
),
migrations.AddField(
model_name='routeguide',
name='suggested_equipment',
field=models.ManyToManyField(blank=True, related_name='equipment', to='protoroute.SuggestedEquipment', verbose_name='Equipment'),
),
migrations.AddField(
model_name='routeguide',
name='surfaces',
field=models.ManyToManyField(blank=True, related_name='surfaces', to='protoroute.Surface', verbose_name='Surface'),
),
migrations.CreateModel(
name='RouteGradient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('max_gradient', models.CharField(max_length=10)),
('avg_gradient', models.CharField(max_length=10)),
('total_elevation_gain', models.CharField(max_length=9, verbose_name='Total Elevation Loss')),
('total_elevation_loss', models.CharField(max_length=9, verbose_name='Total Elevation Loss')),
('gradient_term', models.CharField(max_length=100, verbose_name='Gradient Term')),
('gradient_defurl', models.URLField(verbose_name='Gradient Definition URL')),
('description', models.CharField(max_length=250)),
('route_guide', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_gradient', to='protoroute.routeguide')),
('route_guide_segment', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_gradient', to='protoroute.routeguidesegment')),
],
),
migrations.CreateModel(
name='RouteDifficulty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('difficulty_term', models.CharField(max_length=15)),
('description', models.CharField(max_length=250)),
('difficulty_defurl', models.URLField(verbose_name='Difficulty Definition URL')),
('activity', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='protoroute.activity', verbose_name='Activity')),
('route_guide', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_difficulty', to='protoroute.routeguide')),
('route_guide_segment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_difficulty', to='protoroute.routeguidesegment')),
],
),
migrations.AddField(
model_name='routedesignation',
name='legal_advisory',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_route_designation', to='protoroute.routelegaladvisory'),
),
migrations.AddField(
model_name='routedesignation',
name='term',
field=models.ManyToManyField(related_name='terms', to='protoroute.RouteDesignationTerm', verbose_name='Route Designation Term'),
),
migrations.CreateModel(
name='RouteAccessRestriction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=250)),
('information_url', models.URLField()),
('timespan', models.CharField(max_length=50)),
('route_guide', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_access_restriction', to='protoroute.routeguide')),
('route_guide_segment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_access_restriction', to='protoroute.routeguidesegment')),
('terms', models.ManyToManyField(blank=True, to='protoroute.RouteAccessRestrictionTerm')),
],
),
migrations.CreateModel(
name='Provenance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('provenance_url', models.URLField(verbose_name='Provenance')),
('version', models.DateField()),
('description', models.CharField(max_length=250)),
('publisher', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='protoroute.personandorganization')),
('route_guide', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_provenance', to='protoroute.routeguide')),
('route_guide_segment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_provenance', to='protoroute.routeguidesegment')),
],
),
migrations.CreateModel(
name='MapReference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('map_series', models.CharField(max_length=50)),
('map_number', models.CharField(max_length=10)),
('grid_reference', models.CharField(max_length=10)),
('publisher', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='publisher', to='protoroute.personandorganization', verbose_name='publisher')),
('routepoint', models.ManyToManyField(related_name='rp_mapref', to='protoroute.RoutePoint')),
],
),
migrations.CreateModel(
name='MapImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('map_type', models.CharField(choices=[('RouteMap', 'RouteMap'), ('ElevationMap', 'ElevationMap'), ('CustomMap', 'CustomMap')], max_length=12)),
('image', models.URLField()),
('encoding_format', models.CharField(max_length=40)),
('route_guide', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_mapimage', to='protoroute.routeguide')),
('route_guide_segment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_mapimage', to='protoroute.routeguidesegment')),
],
),
migrations.CreateModel(
name='IndicativeDuration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('duration', models.CharField(max_length=10, verbose_name='Duration (8601)')),
('activity', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='protoroute.activity', verbose_name='Activity')),
('route_guide', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_duration', to='protoroute.routeguide')),
('route_guide_segment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_duration', to='protoroute.routeguidesegment')),
],
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('caption', models.CharField(max_length=250, verbose_name='Caption')),
('url', models.URLField(verbose_name='Image URL')),
('encoding_format', models.CharField(max_length=40, verbose_name='Encoding Format')),
('size', models.CharField(max_length=20, verbose_name='Size')),
('width', models.IntegerField(verbose_name='Width')),
('height', models.IntegerField(verbose_name='Height')),
('route_guide', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='image', to='protoroute.routeguide')),
],
),
migrations.CreateModel(
name='GeoPath',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('map_type', models.CharField(choices=[('RouteMap', 'RouteMap'), ('ElevationMap', 'ElevationMap'), ('CustomMap', 'CustomMap')], max_length=12)),
('url', models.URLField()),
('encoding_format', models.CharField(max_length=40)),
('route_guide', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_geopath', to='protoroute.routeguide')),
('route_guide_segment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_geopath', to='protoroute.routeguidesegment')),
],
),
migrations.CreateModel(
name='GeoCoordinates',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('latitude', models.FloatField(verbose_name='Latitude')),
('longitude', models.FloatField(verbose_name='Longitude')),
('postal_code', models.CharField(blank=True, max_length=10, null=True, verbose_name='Post Code')),
('routepoint', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rp_geo', to='protoroute.routepoint')),
],
),
migrations.AddField(
model_name='article',
name='author',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='protoroute.personandorganization', verbose_name='Author'),
),
migrations.CreateModel(
name='AmenityFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=75)),
('routepoint', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rp_amenity', to='protoroute.routepoint')),
],
),
migrations.CreateModel(
name='AccessibilityDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=250)),
('route_guide', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rg_access_description', to='protoroute.routeguide')),
('route_guide_segment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seg_access_description', to='protoroute.routeguidesegment')),
],
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 7 01:59:51 2017
@author: ROZIN
"""
from django import forms
from .models import Post
class PostForm (forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'text',) |
# Generated by Django 2.2.13 on 2020-07-10 07:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0045_auto_20200710_1210'),
]
operations = [
migrations.AlterField(
model_name='about',
name='aboutus',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='contact',
name='contactus',
field=models.CharField(blank=True, max_length=255),
),
]
|
import json #to impost post.json
from blog.models import Post
# instance of opening and loading json data
with open('post.json') as f:
posts_json = json.load(f)
# Loop through JSON data
for post in posts_json:
"""
input:
title: the title of the json element
content: the cotent of the json element
author_id: the user number of the json element, which is used as the
ForeignKey to connect the blog site to the User database.
Still trying to verify, but SQL convention is that the blog primary
key would author and the foreign key should be author_id.
output:
After interation, it will post the JSON elements as new posts in blog
"""
post = Post(title=post['title'],
content=post['content'],
author_id = post['user_id'])
post.save()
|
# -*- coding=utf-8 -*-
# @Time:2020/10/11 8:40 下午
# Author :王文娜
# @File:网上代码.py
# @Software:PyCharm
import time
import re
from urllib import request,parse
import random
class maoyan(object):
def __init__(self):
self.url='https://maoyan.com/board/4?offset=0'
self.ua_list=['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0']
def get_page(self,url):
headers = {'User-agent': random.choice(self.ua_list)}
req = request.Request(url=url, headers=headers)
res = request.urlopen(req)
html = res.read().decode('utf-8')
def parse_page(self,html):
pattern = re.compile('<div class="movie-item-info">.*?title="(.*?)".*?class="star">(.*?)</p>.*?releasetime">(.*?)</p>'.re.S)
r_list = pattern.findall(html)
self.parse_page(html)
self.write_page(r_list)
def write_page(self,r_list):
one_film_dict={}
for rt in r_list:
one_film_dict['name']=rt[0].strip()
one_film_dict['star'] = rt[1].strip()
one_film_dict['time'] = rt[2].strip()
print(one_film_dict)
def main(self):
for offset in range(0,91,10):
url=self.url.format(offset)
self.get_page(url)
time.sleep(random.randint(1,3))
if __name__=='__main__':
start=time.time()
spider=maoyan()
spider.main()
end=time.time()
print('程序等等执行时间为:%.2f'%(end-start))
|
#이 함수 안에 기능을 구현하시오
#기능구현 클래스를 따로 만들고 그 객체를 생성하여 실행하는 코드를 넣으면 ok
def service():
print('기능구현 서비스 초기화')
|
# Generated by Django 2.1 on 2018-08-07 03:09
from django.db import migrations, models
import django.db.models.deletion
import team.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MemberSocialNetwork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.CharField(max_length=200)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='SocialNetwork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('icon', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='TeamMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(upload_to=team.models.upload_avatar)),
('name', models.CharField(max_length=100)),
('position', models.CharField(max_length=100)),
('bio', models.TextField()),
('social_network', models.ManyToManyField(through='team.MemberSocialNetwork', to='team.SocialNetwork')),
],
),
migrations.AddField(
model_name='membersocialnetwork',
name='member',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='team.TeamMember'),
),
migrations.AddField(
model_name='membersocialnetwork',
name='network',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='team.SocialNetwork'),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 11 03:33:23 2017
@author: ADITYA
"""
#IMPORTING DEPENDENCIES
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv(r'DatasetsCreated/datasetOne.csv')
X = dataset.iloc[:, 0:17].values
y = dataset.iloc[:, -1].values
#splitting the dataset into test and training
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
#feature scaling
from sklearn.preprocessing import StandardScaler
stanScalerX = StandardScaler()
X_train = stanScalerX.fit_transform(X_train)
X_test = stanScalerX.transform(X_test)
#--------------------------------------------------------------------------------------------
#MAKING THE ANN
#importing keras libraries
import keras
from keras.models import Sequential
from keras.layers import Dense
classifier = Sequential()
classifier.add(Dense(units = 10, kernel_initializer = 'uniform', activation = 'relu', input_dim = 17))
#classifier1.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(activation='relu', units=6, kernel_initializer='uniform'))
classifier.add(Dense(activation='sigmoid', units=1, kernel_initializer='uniform'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
classifier.fit(X_train, y_train, batch_size = 10, epochs = 250)
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
##TESTING THE DATASET ----------------------------------------------------------------------------
testDataset = pd.read_csv(r'DatasetsCreated\testSet.csv', header=None)
Xtesting = testDataset.iloc[:, 0:17].values
Xtesting = stanScalerX.transform(Xtesting)
new_prediction = classifier.predict(Xtesting)
new_predictionBinary = new_prediction
new_predictionBinary[new_predictionBinary > 0.5] = True
new_predictionBinary[new_predictionBinary < 0.5] = False
#
|
# -*-coding:utf-8-*-
import os
import torch
import cv2
from torch.utils import data
from PIL import Image, ImageFile
import pandas as pd
from torchvision import transforms
class MyCustomDataset(data.Dataset):
def __init__(self, csv_file, data_dir_raw, data_dir_exp, root_dir, transform):
self.pairs = pd.read_csv(csv_file, sep=',', header=None)
self.data_dir_raw = data_dir_raw
self.data_dir_exp = data_dir_exp
self.root_dir = root_dir
self.transform = transform
def __len__(self):
"""Return the number of images."""
return len(self.pairs)
def __getitem__(self, index):
"""Return one image and its corresponding unpaired image"""
ImageFile.LOAD_TRUNCATED_IMAGES = True
num_row, num_col = self.pairs.shape
if num_col == 1:
img_path1 = os.path.join(self.root_dir, self.data_dir_raw, str(self.pairs.iloc[index, 0]))
img_path2 = os.path.join(self.root_dir, self.data_dir_exp, str(self.pairs.iloc[index, 0])) # paired high quality image
image1 = Image.open(img_path1)
# image1 = image1.convert("L")
image2 = Image.open(img_path2)
# image2 = image2.convert("L")
name = str(self.pairs.iloc[index, 0])
imgName, _ = name.split('.', 1)
if self.transform:
try:
image1 = self.transform(image1)
image2 = self.transform(image2)
except:
print("Cannot transform images: {} and {}".format(img_path1, img_path2))
return image1, image2, imgName
elif num_col == 2:
img_path1 = os.path.join(self.root_dir, self.data_dir_raw, str(self.pairs.iloc[index, 0])) # low-quality image
img_path2 = os.path.join(self.root_dir, self.data_dir_exp, str(self.pairs.iloc[index, 1])) # unpaired high quality image
#img_path3 = os.path.join(self.root_dir, self.data_dir_exp, str(self.pairs.iloc[index, 1])) # paired high quality image
image1 = Image.open(img_path1)
image2 = Image.open(img_path2)
# print(len(image2.split()))
# print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
#image2 = cv2.imread(img_path2,1)
#image3 = Image.open(img_path3)
#image3 = cv2.imread(img_path3,1)
name = str(self.pairs.iloc[index, 0])
imgName, _ = name.split('.', 1)
if self.transform:
try:
image1 = self.transform(image1)
image2 = self.transform(image2)
#image3 = self.transform(image3)
except:
print("Cannot transform images: {}, {} and {}".format(img_path1, img_path2))
return image1, image2, imgName
class DataLoader():
def __init__(self, dataset, data_dir_raw, data_dir_exp, csv_file, root_dir, image_size, resize_size, batch_size, shuffle, num_workers, dropLast):
self.dataset = dataset
self.data_dir_raw = data_dir_raw
self.data_dir_exp = data_dir_exp
self.csv_file = csv_file
self.root_dir = root_dir
self.image_size = image_size
self.batch_size = batch_size
self.resize_size = resize_size
self.shuffle = shuffle
self.num_workers = num_workers
self.dropLast = dropLast
def __make_power_32(self, img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
print('image resized from {:} x {:} to {:} x {:}'.format(ow, oh, w, h))
return img.resize((w, h), method)
def transform(self, MakePower32, RandomHorizontalFlip, RandomVerticalFlip, ColorJitter, RandomCrop, CenterCrop, Resize, ToTensor, Normalize):
transform_options = []
if MakePower32:
transform_options.append(transforms.Lambda(lambda img: self.__make_power_32(img, base=32, method=Image.BICUBIC)))
if RandomHorizontalFlip:
transform_options.append(transforms.RandomHorizontalFlip(p=0.5))
if RandomVerticalFlip:
transform_options.append(transforms.RandomVerticalFlip(p=0.5))
if ColorJitter:
transform_options.append(transforms.ColorJitter(brightness=0, contrast=0.15, saturation=0))
if RandomCrop:
transform_options.append(transforms.RandomCrop(self.image_size, padding=0, pad_if_needed=False))
if CenterCrop:
transform_options.append(transforms.CenterCrop(self.image_size))
if Resize:
transform_options.append(transforms.Resize(self.resize_size))
if ToTensor:
transform_options.append(transforms.ToTensor())
if Normalize:
transform_options.append(transforms.Normalize([0.5], [0.5]))
# transform_options.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = transforms.Compose(transform_options)
return transform
def load_trainSet(self):
"""Build and return the training data loader"""
train_transform = self.transform(False, False, False, False, False, False, False, True, True)
trainSet = MyCustomDataset(
csv_file=self.csv_file,
data_dir_raw=self.data_dir_raw,
data_dir_exp=self.data_dir_exp,
root_dir=self.root_dir,
transform=train_transform
)
return trainSet
def load_valSet(self):
"""Build and return the validation data loader"""
val_transform = self.transform(False, False, False, False, False, False, False, True, True)
valSet = MyCustomDataset(
csv_file=self.csv_file,
data_dir_raw=self.data_dir_raw,
data_dir_exp=self.data_dir_exp,
root_dir=self.root_dir,
transform=val_transform
)
return valSet
def load_testSet(self):
"""Build and return the validation data loader"""
test_transform = self.transform(False, False, False, False, False, False, False, True, True)
testSet = MyCustomDataset(
csv_file=self.csv_file,
data_dir_raw=self.data_dir_raw,
data_dir_exp=self.data_dir_exp,
root_dir=self.root_dir,
transform=test_transform
)
return testSet
def loader(self):
"""Build and return a data loader"""
if self.dataset == 'train':
self.dataset_in = self.load_trainSet()
dataLoader = torch.utils.data.DataLoader(
dataset=self.dataset_in,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.dropLast,
# pin_memory=True
)
return dataLoader
elif self.dataset == 'val':
self.dataset_in = self.load_valSet()
dataLoader = torch.utils.data.DataLoader(
dataset=self.dataset_in,
batch_size=1,
# shuffle=self.shuffle,
shuffle=False,
num_workers=self.num_workers,
drop_last=self.dropLast,
# pin_memory=True
)
return dataLoader
elif self.dataset == 'test':
self.dataset_in = self.load_testSet()
dataLoader = torch.utils.data.DataLoader(
dataset=self.dataset_in,
batch_size=1,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.dropLast,
# pin_memory=True
)
return dataLoader
|
# -*- coding: utf-8 -*-
from .main import get_app, get_db
__all__ = [
get_app,
get_db,
]
|
data = open("input.txt", "r")
data = data.read().split(",")
check = True
index = 0
while check is True:
section = data[index]
firstNum = int(data[int(data[index + 1])])
secondNum = int(data[int(data[index + 2])])
if section == "1":
total = firstNum + secondNum
if section == "2":
total = firstNum * secondNum
data[int(data[index + 3])] = str(total)
if data[index + 4] == "99":
check = False
index = index + 4
print("Your answer is... " + data[0]) |
"""Simulated annealing beta schedulers"""
import torch
import math
class BetaScheduler(object):
"""
Scheduler base class for the simulated annealing strategy.
Any beta cooling strategy should inherit from this class and implement the get_beta method.
"""
def __init__(self, init_beta):
self.init_beta = init_beta
self.beta = init_beta
self.iteration = 0
self.batch_size = None
def step(self, energies):
self.iteration += 1
self.beta = self.get_beta(energies)
def get_beta(self, energies):
raise NotImplementedError()
class ConstantBetaScheduler(BetaScheduler):
"""
A trivial cooling strategy where beta is kept constant.
"""
def __init__(self, init_beta):
super().__init__(init_beta)
def get_beta(self, energies):
return self.init_beta
class StepBetaScheduler(BetaScheduler):
"""
A simple cooling strategy where beta is increased by a factor gamma every step_size iterations.
"""
def __init__(self, init_beta, step_size, gamma):
super().__init__(init_beta)
self.step_size = step_size
self.gamma = gamma
def get_beta(self, energies):
return self.init_beta * self.gamma ** (self.iteration // self.step_size)
|
from pyUbiForge.misc.file_object import FileObjectDataWrapper
from pyUbiForge.misc.file_readers import BaseReader
class Reader(BaseReader):
file_type = '939B245D'
def __init__(self, file_object_data_wrapper: FileObjectDataWrapper):
file_object_data_wrapper.read_bytes(22)
file_object_data_wrapper.read_file() # gameplay surface nav type
count = file_object_data_wrapper.read_uint_32()
for _ in range(count):
file_object_data_wrapper.read_file()
file_object_data_wrapper.read_bytes(22)
file_object_data_wrapper.read_float_32()
file_object_data_wrapper.read_file()
file_object_data_wrapper.read_bytes(39)
|
#!/usr/bin/python3
# filename: runscripts.py
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
import pyperclip
from MyGUI import cleanup
from MyGUI import scp_to_from
from MyGUI import list_this
def callback_scp_from():
entry = pyperclip.paste()
scp_to_from.scp_from(entry)
messagebox.showinfo(message='Commands printed in Console')
def callback_scp_to():
entry = pyperclip.paste()
scp_to_from.scp_to(entry)
messagebox.showinfo(message='Commands printed in Console')
def callback_cleanup():
text = pyperclip.paste()
cleanup.clean_up_markup(text)
def callback_list():
mylist = pyperclip.paste()
list_this.list_no_quotes(mylist)
def callback_list_quotes():
mylist = pyperclip.paste()
list_this.list_quotes(mylist)
# create window
def main():
root = tk.Tk()
style = ttk.Style()
root.title('Danny\'s Tools')
# set styling
style.theme_use('default')
style.configure('TButton',
background='firebrick',
foreground='white smoke',
font='Helvetica 16',
width=21,
borderwidth=2)
style.map('TButton',
foreground=[('pressed', 'sea green'),
('active', 'firebrick')],
background=[('pressed', '!focus', 'cyan'),
('active', 'white smoke')],
relief=[('pressed', 'groove'),
('!pressed', 'raised')])
# add widgets with callbacks
scp_from = ttk.Button(root, text="Copy From / Zip", command=callback_scp_from).pack()
scp_to = ttk.Button(root, text="Copy To", command=callback_scp_to).pack()
clean = ttk.Button(root, text="Cleanup Markup", command=callback_cleanup).pack()
lister_no = ttk.Button(root, text="Make List: No Quotes", command=callback_list).pack()
lister_quotes = ttk.Button(root, text='Make List: Quotes', command=callback_list_quotes).pack()
# run
root.mainloop()
if __name__ == "__main__":
main()
|
import torch
import librosa
import torchaudio
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from vctk import VCTK
from torch.utils.data import DataLoader
#import torchfile
from models import *
N_FFT = 512*4
n_iter=2000
def transform_stft(signal, pad=True):
D = librosa.stft(signal, n_fft=N_FFT)
S, phase = librosa.magphase(D)
S = np.log1p(S)
if(pad):
S = librosa.util.pad_center(S, 1700)
return S, phase
def reconstruction(S, phase):
exp = np.expm1(S)
comple = exp * np.exp(phase)
istft = librosa.istft(comple)
return istft
def load_audio(audio_path):
signal, fs = librosa.load(audio_path)
return signal, fs
def save_file(audio, phase, fs, filename, path = '../save/plots/preprocess/', save_audio = False):
matplotlib.pyplot.imsave(path+filename+'.png', audio[:, 5000:10000])
print("==> Saved Spectogram")
if(save_audio):
audio_res = reconstruction(audio, phase)
print(audio_res.shape)
librosa.output.write_wav(path+"audio/"+filename+".wav", audio_res, fs)
print("==> Saved Audio")
def inp_transform(inp):
inp = inp.astype(np.float32)
inp = inp.flatten()
inp, phase = transform_stft(inp, pad=False)
inp = torch.Tensor(inp)
inp = inp.unsqueeze(0)
inp = inp.unsqueeze(0)
return inp, phase
def test_preprocessing(audio_file, dir = "/home/nevronas/dataset/vctk/raw/"):
signal, fs = load_audio(dir+audio_file+".wav")
#signal=librosa.core.resample(signal,fs,44100)
print("Signal Size : ", signal.shape)
signal, phase = inp_transform(signal)
print("Processed Size : ", signal.shape)
signal = signal[0].numpy()
signal = signal[0]
save_file(signal, phase, fs, audio_file, save_audio = True)
def phase_restore(mag, random_phases, N=50):
p = np.exp(1j * (random_phases))
for i in range(N):
_, p = librosa.magphase(librosa.stft(
librosa.istft(mag * p), n_fft=args.n_fft))
update_progress(float(i) / N)
return p
random_phase = S.copy()
if __name__ == '__main__':
test_preprocessing("vocals", "/home/nevronas/dataset/dualaudio/DSD100/Sources/Dev/076 - Little Chicago's Finest - My Own/")
test_preprocessing("other", "/home/nevronas/dataset/dualaudio/DSD100/Sources/Dev/076 - Little Chicago's Finest - My Own/")
test_preprocessing("drums", "/home/nevronas/dataset/dualaudio/DSD100/Sources/Dev/076 - Little Chicago's Finest - My Own/")
# test_preprocessing("p351_423")
spectr = torchfile.load("/home/nevronas/dataset/dualaudio/DSD100/Sources/Dev/076 - Little Chicago's Finest - My Own/")
S = np.zeros([N_FFT / 2 + 1, spectr.shape[1]])
np.random.shuffle(random_phase)
p = phase_restore((np.exp(S) - 1), random_phase, N=n_iter)
# ISTFT
y = librosa.istft((np.exp(S) - 1) * p)
librosa.output.write_wav('../save/plots/preprocess/kuch_bhi.wav', y, args.sr, norm=False)
|
import ccxt
ym=None
yE=True
yz=object
yR=dir
yU=str
ys=input
yV=enumerate
ya=zip
yP=abs
yw=int
yd=len
yn=range
yY=max
yH=False
yh=float
yq=getattr
yp=map
yc=open
yl=round
Wc=ccxt.bittrex
Wp=ccxt.poloniex
import time
Wl=time.time
WX=time.sleep
import tensorflow as tf
Wf=tf.Session
Wb=tf.train
Wv=tf.float32
WC=tf.placeholder
WM=tf.Graph
import numpy as np
Wj=np.yl
Wu=np.subtract
Wx=np.set_printoptions
WT=np.stack
Wt=np.zeros
WN=np.squeeze
import socketio
import pause
WS=pause.minutes
WQ=pause.seconds
from flask import Flask,request,jsonify,session
We=request.sid
WL=request.json
from datetime import datetime,timedelta
Wk=datetime.now
yK=datetime.utcnow
WD=datetime.time
Wo=datetime.date
from flask_cors import CORS
import backtrader as bt
yW=bt.SignalStrategy
yI=bt.TimeFrame
yi=bt.num2date
yG=bt.sizers
yA=bt.Order
yr=bt.feeds
yF=bt.brokers
yJ=bt.ind
yg=bt.Cerebro
yB=bt.SIGNAL_LONGSHORT
from flask_socketio import SocketIO,emit
K=Flask(__name__)
F=SocketIO(K)
W=ym
y=yE
J=CORS(K)
class WH(yz):
def __init__(B,A,i):
B.exchanges=[A,i]
def WF(yR,U,a,f,M,KP):
with Wf(graph=WM())as G:
print(We)
F.emit('logs','Loading model...',namespace='/test',room=KP)
r=Wb.import_meta_graph('altmodel/1/netfile.meta')
r.restore(G,save_path='altmodel/1/netfile')
m=WC(Wv,shape=[3,11,1])
E=G.graph.get_operation_by_name('Adam')
z=G.graph.get_tensor_by_name('Softmax:0')
ys=G.graph.get_tensor_by_name('Placeholder:0')
i=G.graph.get_tensor_by_name("Placeholder_1:0")
i1=G.graph.get_tensor_by_name("Placeholder_2:0")
i2=G.graph.get_tensor_by_name("Placeholder_3:0")
R=[E,z]
U=U.reshape(1,3,11,31)
U=U/U[:,:,-1,0,ym,ym]
Wx(suppress=yE)
d2=Wt((1,11))
la="last w:"+' '+yU(a)
F.emit('logs',la,namespace='/test',room=KP)
w=WN(G.Wn(z,feed_dict={ys:1,i:U,i1:a}))
w=w[-11:]
V=Wu(a,w).tolist()[0]
print(V)
t='Transaction Vector: '+yU(V)
F.emit('logs',t,namespace='/test',room=KP)
a=V
Wy(f,V,M,a,KP)
def Wy(f,V,M,a,KP):
print(M,'exchange')
print(V)
P=M.fetch_balance()
P=[P[x]['free']for x in f]
print(P)
w=[]
Ws=f
for n in Ws:
if n=='USDT':
n='BTC/USDT'
Y=M.fetch_ticker(n)
print(Y)
w.append(Y['info']['lowestAsk'])
else:
n=n+'/BTC'
Y=M.fetch_ticker(n)
print(Y)
w.append(Y['info']['lowestAsk'])
print(w)
H=[]
print(V)
for h,(n,c)in yV(ya(f,V)):
print(c)
if c>0:
if n=="USDT":
q="BTC/USDT"
p=M.create_order(q,amount=c,Ww=w[h],side='buy',type='limit')
F.emit('logs',yU(p),namespace='/test',room=KP)
else:
q=n+'/BTC'
p=M.create_order(q,amount=c,Ww=w[h],side='buy',type='limit')
F.emit('logs',yU(p),namespace='/test',room=KP)
if c<0:
c=yP(c)
if n=="USDT":
q="BTC/USDT"
l=M.create_order(q,amount=c,Ww=w[h],side='sell',type='limit')
F.emit('logs',yU(l),namespace='/test',room=KP)
else:
q=n+'/BTC'
l=M.create_order(q,amount=c,Ww=w[h],side='sell',type='limit')
F.emit('logs',yU(l),namespace='/test',room=KP)
WS(30)
U,X,f,M=WJ(M,a)
X=X.reshape(1,-1)
b=WF('1',U,X,f,M)
def WJ(M,KP,last_w=ym):
C=Wk()-timedelta(hours=15,minutes=30)
print(C)
C=C.strftime("%Y-%m-%d %H:%M:%S")
v=M.parse8601(C)
x=M.fetch_ohlcv('ETH/BTC',timeframe='30m',since=v,limit=37)
t=M.fetch_ohlcv('LTC/BTC',timeframe='30m',since=v,limit=37)
N=M.fetch_ohlcv('XRP/BTC',timeframe='30m',since=v,limit=37)
print(N)
u=M.fetch_ohlcv('BTC/USDT',timeframe='30m',since=v,limit=37)
print(u)
for li in u:
li[:]=[1/x for x in li]
T=M.fetch_ohlcv('ETC/BTC',timeframe='30m',since=v,limit=37)
j=M.fetch_ohlcv('DASH/BTC',timeframe='30m',since=v,limit=37)
S=M.fetch_ohlcv('XMR/BTC',timeframe='30m',since=v,limit=37)
Q=M.fetch_ohlcv('XEM/BTC',timeframe='30m',since=v,limit=37)
e=M.fetch_ohlcv('FCT/BTC',timeframe='30m',since=v,limit=37)
L=M.fetch_ohlcv('GNT/BTC',timeframe='30m',since=v,limit=37)
k=M.fetch_ohlcv('ZEC/BTC',timeframe='30m',since=v,limit=37)
f=[x,t,N,u,T,j,S,Q,e,L,k]
o=['ETH','LTC','XRP','USDT','ETC','DASH','XMR','XEM','FCT','GNT','ZEC']
F.emit('logs',yU(o),namespace='/test',room=KP)
D=-1
L=[]
for n in f:
D+=1
for KF in n:
KW=o[D]
Ky=KF[0]
KJ=KF[2]
KB=KF[3]
KA=KF[4]
L.append([KW,Ky,KJ,KB,KA])
import pandas as pd
yO=pd.DataFrame
df=yO(L,columns=['coin','date','low','high','close'])
df=df.drop(['date'],axis=1)
if last_w==ym:
last_w=[[-0.98176,0.018265,0.01821,0.018255,-0.981775,0.01824,0.01822,0.018235,0.01827003,0.018145,0.01816]]
Ki=df[df.coin=='ETH']
KI=df[df.coin=='LTC']
Kg=df[df.coin=='XRP']
Kr=df[df.coin=='USDT']
KG=df[df.coin=='ETC']
KO=df[df.coin=='DASH']
Km=df[df.coin=='XMR']
KE=df[df.coin=='XEM']
Kz=df[df.coin=='FCT']
KR=df[df.coin=='GNT']
KU=df[df.coin=='ZEC']
Ki=Ki.drop(['coin'],axis=1).iloc[-31:]
KI=KI.drop(['coin'],axis=1).iloc[-31:]
Kg=Kg.drop(['coin'],axis=1).iloc[-31:]
Kr=Kr.drop(['coin'],axis=1).iloc[-31:]
KG=KG.drop(['coin'],axis=1).iloc[-31:]
KO=KO.drop(['coin'],axis=1).iloc[-31:]
Km=Km.drop(['coin'],axis=1).iloc[-31:]
KE=KE.drop(['coin'],axis=1).iloc[-31:]
Kz=Kz.drop(['coin'],axis=1).iloc[-31:]
KR=KR.drop(['coin'],axis=1).iloc[-31:]
KU=KU.drop(['coin'],axis=1).iloc[-31:]
li=[Ki,KI,Kg,Kr,KG,KO,Km,KE,Kz,KR,KU]
for l in li:
print(l.shape)
U=WT((Ki.values,KI.values,Kg.values,Kr.values,KG.values,KO.values,Km.values,KE.values,Kz.values,KR.values,KU.values))
U=U.reshape(3,11,31)
return U,last_w,o,M
@K.route('/')
def WB():
return "grettings wanderer"
@F.on('rl',namespace='/test')
def WA(message):
print(message,'this is the message')
print(We)
F.emit('logs','STARTING BOT...',namespace='/test',room=We)
Ks=message['KEY']
KV=message['SECRET']
Ka={'apiKey':Ks,'secret':KV,'nonce':lambda:yU(yw(Wl()*1000000000))}
M=Wp(Ka)
KP=We
P=M.fetch_balance()
Kw('balances',yU(P),namespace='/test',room=We)
print(M.secret,M.apiKey)
Kd=Wk()
Kn=Kd-timedelta(hours=8,minutes=30)
U,X,f,M=WJ(M,KP)
F.emit('coins',yU(f),namespace='/test',room=We)
print(X)
b=WF('1',U,X,f,M,KP)
'''
BEGIN ARBITRAGE
'''
class Wh(yz):
def __init__(B):
B.exchange=ym
B.config=ym
B.value=ym
def Wi(a,b):
KY=((a-b)/a)*100
return KY
def WI(alist,wanted_parts=1):
KH=yd(alist)
return[alist[i*KH//wanted_parts:(i+1)*KH//wanted_parts]for i in yn(wanted_parts)]
def Wg(w,bases,yY,o,Fr,KD):
global W,y,KT
O=Wh()
O1=Wh()
WJ=[]
Kh=yd(w)
Kq=[]
for t in w[:Kh]:
print(t)
t=[c for item in t for c in item]
print(t)
TT=t[1::4]
Kq.append(t[2::4][0])
Kp=t[3::4]
for Kc,Fz in ya(TT,Kp):
try:
try:
Y=Kc.fetch_ticker(Fz)
WJ.append(Y['last'])
print((Y,'this is the ticker'))
except:
Y=Kc.fetch_ticker(Fz)
WJ.append(Y['info']['Last'])
print((Y,'this is the ticker'))
except:
print('something_wrong')
F.emit('logs',yE,namespace='/test',broadcast=yE)
F.emit('logs','One or more pairs unavailable',namespace='/test',broadcast=yE)
y=yH
print(WJ)
Kl=""
for p in WJ:
Ws=yU(p)
Kl+=Ws+'\n'
em='Prices($): '+Kl
F.emit('logs',em,namespace='/test',broadcast=yE)
WX(2)
if y==yE:
KX,Kf=WI(WJ,Kh)
for KM,(v,v1)in yV(ya(KX,Kf)):
print(v,v1)
D=Wi(v,v1)
print(D)
D=Wj(D,decimals=3)
F.emit('logs','Difference: '+yU(D)+'%'+' [Value 1: $'+yU(v)+' Value 2: $'+yU(v1)+']',namespace='/test',broadcast=yE)
WQ(3)
if yP(D)>yh(Fr):
if v>v1:
Kb=bases[KM]
print(Kb)
KC=Kq[1]
Kv=Kq[0]
Kx=KC.describe()['fees']
Kt=Kx['trading']['maker']
KN=Kx['trading']['taker']
wi=Kx['funding']['withdraw']
print(wi)
try:
Ku=Kx['funding']['withdraw'][Kb]
KT=v1+(v1*Kt)+Ku
Kj=Kb+'/USDT'
try:
W=Kv.fetch_deposit_address(Kb)
print(W)
U='Wallet Address for Transfer:'+yU(W)
F.emit('logs',U,namespace='/test',broadcast=yE)
except:
try:
W=Kv.create_deposit_address(Kb)
print(W)
W=W['address']
print(W)
U='Wallet Address for Transfer:'+yU(W)
F.emit('logs',U,namespace='/test',broadcast=yE)
except:
F.emit('logs',yE,namespace='/test',broadcast=yE)
F.emit('logs','Exchange does not allow wallet creation via API, or API down',namespace='/test',broadcast=yE)
continue
KS=(Wi(v,KT))
print((Kt,KN,Ku))
if KT and v>v1:
try:
KC.create_limit_buy_order(Kj,yY,v*.001)
WP=KC.fetch_balance()[Kb]
p="Starting buy order on"+yU(o[1])+'for'+yU(Kb)
F.emit('logs',p,namespace='/test',broadcast=yE)
KC.withdraw(Kb,WP,W)
WX(3)
KQ=Kv.fetch_balance()[Kb]
Wr(Kv,Kb,Kj,WP,KQ,o[0])
except:
F.emit('logs',"Problem parsing deposit address, or Not enough funds",namespace='/test',)
except:
Ku=Kx['funding']['withdraw']
D="Can't dynamically parse withdrawl fees, here are the currencies we can:"+yU(Ku)
F.emit('logs',D,namespace='/test',broadcast=yE)
elif v1>v:
Kb=bases[KM]
print(Kb)
KC=Kq[0]
Kv=Kq[1]
Kx=KC.describe()['fees']
Kt=Kx['trading']['maker']
KN=Kx['trading']['taker']
wi=Kx['funding']['withdraw']
print(wi)
try:
Ku=Kx['funding']['withdraw'][Kb]
KT=v+(v*Kt)+Ku
Kj=Kb+'/USDT'
try:
W=Kv.fetch_deposit_address(Kb)
W=W['address']
print(W)
U='Wallet Address for Transfer:'+yU(W)
F.emit('logs',U,namespace='/test',broadcast=yE)
except:
try:
W=Kv.create_deposit_address(Kb)
W=W['address']
print(W)
U='Wallet Address for Transfer:'+yU(W)
F.emit('logs',U,namespace='/test',broadcast=yE)
except:
F.emit('logs',yE,namespace='/test',broadcast=yE)
F.emit('logs','Exchange does not allow wallet creation via API, or API down',namespace='/test',broadcast=yE)
KS=(Wi(v,KT))
print((Kt,KN,Ku))
except:
Ku=Kx['funding']['withdraw']
D="Can't dynamically parse withdrawl fees, here are the currencies we can:"+yU(Ku)
F.emit('logs',yE,namespace='/test',broadcast=yE)
F.emit('logs',D,namespace='/test',broadcast=yE)
if KT and v1>KT*.01:
try:
KC.create_limit_buy_order(Kj,yY,v*.001)
WP=KC.fetch_balance()[Kb]
p="Starting buy order on"+yU(o[0])+'for'+yU(Kb)
F.emit('logs',p,namespace='/test',broadcast=yE)
KC.withdraw(Kb,WP,W)
WX(3)
KQ=Kv.fetch_balance()[Kb]
Wr(Kv,Kb,Kj,WP,KQ,o[1])
except:
F.emit('logs',yE,namespace='/test',broadcast=yE)
F.emit('logs',"Problem parsing deposit address, or Not enough funds",namespace='/test',broadcast=yE)
print(Kq)
WX(5)
Wg(w,bases,yY,o,Fr,KD)
return 'something'
Ke=0
def Wr(trader,Kb,Kj,amount,KL,exch):
global Ke
Ke=trader.fetch_balance()[Kb]
while Ke==KL:
Ke=trader.fetch_balance()[Kb]
E="funds arrived at"+yU(exch)
F.emit('logs',E,namespace='/test',broadcast=yE)
Kk=yU(Kb)+'/USDT'
Ww=trader.fetch_ticker(Kk)['last']*.001
trader.create_limit_sell_order(Kj,Ke,Ww)
A='Selling'+' '+yU(Kb)+' '+"to USDT"
F.emit('logs',A,namespace='/test',broadcast=yE)
@K.route('/balance_arbi',methods=['POST'])
def WG():
U=WL
KD,FK,FW,Kp,Fy=U["EXCHANGES"],U["KEYS"],U["SECRETS"],U["CURRENCY"],U["USD"]
KD=[item for items in KD for item in items.split(",")]
FK=[item for items in FK for item in items.split(",")]
FW=[item for items in FW for item in items.split(",")]
FJ=[]
for h,(Ks,KV)in yV(ya(FK,FW)):
if KD[h]=='bitfinex':
FB={'apiKey':Ks,'secret':KV,'nonce':lambda:yU(yw(Wl()*100000))}
FJ.append(FB)
else:
FB={'apiKey':Ks,'secret':KV,'nonce':lambda:yU(yw(Wl()*1000))}
FJ.append(FB)
FA=[]
for i,c in yV(FJ,WE=0):
if KD[i]=='bitfinex':
Fi=yq(ccxt,'bitfinex2')
FA.append(Fi(c))
else:
Fi=yq(ccxt,KD[i])
FA.append(Fi(c))
FI=[]
for ex in FA:
b=ex.fetch_balance()
c=[]
print(b)
for h,(Ks,value)in yV(b.items()):
try:
print(value)
WQ(3)
if 'free' in value.keys():
if value['free']>0.0:
Fg=' '+yU(Ks)+': '+yU('{0:.5f}'.format(value['free']))
c.append(Fg)
else:
c.append('Wallets are empty')
except:
print('here')
FI.append(c)
di={}
di.setdefault('exchangeA',[])
di.setdefault('exchangeB',[])
for i,bal in yV(FI):
if i==0:
di['exchangeA']=bal
else:
di['exchangeB']=bal
return jsonify(di)
@K.route('/arbitrage',methods=['POST'])
def WO():
F.emit('logs','STARTING BOT...',namespace='/test',broadcast=yE)
U=WL
KD,FK,FW,Kp,Fy,Fr=U["EXCHANGES"],U["KEYS"],U["SECRETS"],U["CURRENCY"],U["USD"],U["TRADE_PERC"]
KD=[item for items in KD for item in items.split(",")]
FK=[item for items in FK for item in items.split(",")]
FW=[item for items in FW for item in items.split(",")]
Kp=[item for items in Kp for item in items.split(",")]
FG=yd(KD)
FJ=[]
print(Fr)
FO=WH(KD[0],KD[1])
Fr=Fr[0]
print(KD,'exchanges')
for h,(Ks,KV)in yV(ya(FK,FW)):
if KD[h]=='bitstamp':
FB={'uid':'mzxy9253','apiKey':Ks,'secret':KV,'nonce':lambda:yU(yw(Wl()*1000))}
else:
FB={'apiKey':Ks,'secret':KV,'nonce':lambda:yU(yw(Wl()*1000000))}
FJ.append(FB)
FA=[]
for i,c in yV(FJ,WE=0):
Fi=yq(ccxt,KD[i])
FA.append(Fi(c))
Fm=[]
print(FA,'indicators')
FE=ym
for n,c in ya(KD,FJ):
print(c)
if n=='kraken':
FE=yF.CCXTBroker(exchange=n,currency='USD',config=c)
else:
FE=yF.CCXTBroker(exchange=n,currency='USDT',config=c)
Fm.append(FE)
w=[]
for i,c in yV(FA):
print(i)
Y=ym
for h,Fz in yV(Kp):
a=Fz
if 'kraken'==KD[1]:
Fz=yU(Fz)+'/USD'
Y=FA[i]
w.append([a,Y,c,Fz])
else:
Fz=yU(Fz)+'/USDT'
Y=FA[i]
w.append([a,Y,c,Fz])
FR=WI(w,FG)
print(FR)
t=Wg(FR,Kp,Fy,KD,Fr,FO)
'''
BEGIN Ema
'''
Ks=''
KV=''
FU=ym
@F.on('ema',namespace='/test')
class Wq(yW):
global FE,Fk,helper
global Fd
Fs=0
FV=(('stop_loss',0.1),('take_profit',0.2),('low',14),('high',90))
def Wm(B,txt,dt=ym):
global FE,Fk,Fd
''' Logging function for this strategy'''
dt=dt or B.datas[0].Wo(0)
Fa=B.datas[0].WD()
print('%s - %s, %s'%(dt.isoformat(),Fa,txt))
def __init__(B):
global FE,Fk,Fd,M
global FB
B.dataclose=B.datas[0].close
FP,Fw=yJ.EMA(period=B.p.low),yJ.EMA(period=B.p.high)
B.signal_add(yB,yJ.CrossUp(FP,Fw))
B.signal_add(yB,yJ.CrossDown(FP,Fw))
B.crossover=yJ.CrossOver(FP,Fw)
B.crossup=yJ.CrossUp(FP,Fw)
B.crossdown=yJ.CrossDown(FP,Fw)
B.Wd=yJ.MomentumOscillator(period=B.p.high)
if FB!={}:
print(yU(FB)+'this is it')
if M=='bitfinex':
Fd=yq(ccxt,'bitfinex2')
Fd.trades=0
Fd.orders=ym
elif M=='hitbtc':
Fd=yq(ccxt,'hitbtc2')
Fd.trades=0
Fd.orders=ym
else:
Fd=yq(ccxt,M)
Fd.trades=0
Fd.orders=ym
B.buyprice=ym
B.buycomm=ym
B.order=ym
B.signal=0
B.price_at_signal=0
B.trades=0
def WE(B):
if FB!={}:
Fd.trades=0
Fn=0
def Wz(B,trade):
global FE,Fk,Fd
if not trade.isclosed or Fd.trades:
return
Wm= B.Wm('OPERATION PROFIT, GROSS %.2f, NET %.2f'%(trade.pnl,trade.pnlcomm))
F.emit('logs',Wm,namespace='/test',broadcast=yE)
return Wm
def WR(B,order):
global FE,Fk,Fd
if order.status in[order.Margin,order.Rejected]:
pass
if order.status in[order.Submitted,order.Accepted]:
return
elif order.status==order.Cancelled:
Wm=B.Wm(' '.join(yp(yU,['CANCEL ORDER. Type :',order.info['name'],"/ DATE :",B.data.num2date(order.executed.dt).date().isoformat(),"/ PRICE :",order.executed.price,"/ SIZE :",order.executed.size,])))
F.emit('logs',Wm,namespace='/test',broadcast=yE)
return Wm
elif order.status==order.Completed:
if 'name' in order.info:
Wm=B.Wm("%s: REF : %s / %s / PRICE : %.3f / SIZE : %.2f / COMM : %.2f"%(order.info['name'],order.ref,B.data.num2date(order.executed.dt).date().isoformat(),order.executed.price,order.executed.size,order.executed.comm))
F.emit('logs',Wm,namespace='/test',broadcast=yE)
return Wm
else:
if order.isbuy():
FY=order.executed.price*(1.0-B.params.stop_loss)
FH=order.executed.price*(1.0+B.params.take_profit)
Fh=(FE.getcash()*0.5)
Fq=Fh/B.data.close[0]
Fp=Fd.sell(exectype=yA.StopTrailLimit,Ww=FY,size=Fq)
Fp.addinfo(name="STOP")
Fc=Fd.sell(exectype=yA.StopTrailLimit,Ww=FH,size=Fq,oco=Fp)
Fc.addinfo(name="PROFIT")
Wm=B.Wm("SignalPrice : %.3f Buy: %.3f, Stop: %.3f, Profit : %.3f"%(B.price_at_signal,order.executed.price,FY,FH))
F.emit('logs',Wm,namespace='/test',broadcast=yE)
return Wm
elif order.issell():
FY=order.executed.price*(1.0+B.params.stop_loss)
FH=order.executed.price*(1.0-B.params.take_profit)
Fl=(Fk.getcash()*0.5)
FX=Fl/B.data.close[0]*-1
Fp=Fd.buy(exectype=yA.StopTrailLimit,Ww=FY,size=FX)
Fp.addinfo(name="STOP")
Fc=Fd.buy(exectype=yA.StopTrailLimit,Ww=FH,size=FX,oco=Fp)
Fc.addinfo(name="PROFIT")
Wm=B.Wm("SignalPrice: %.3f Sell: %.3f, Stop: %.3f, Profit : %.3f"%(B.price_at_signal,order.executed.price,FY,FH))
F.emit('logs',Wm,namespace='/test',broadcast=yE)
return Wm
def WU(B):
global FE,Fk,Fd,FU
for U in B.datas:
print('*'*5,'NEXT:',yi(U.datetime[0]),U._name,U.yc[0],U.high[0],U.low[0],U.close[0],U.volume[0],yI.getname(U._timeframe),yd(U))
Ff=('*'*5,'NEXT:',yi(U.datetime[0]),U._name,U.yc[0],U.high[0],U.low[0],U.close[0],U.volume[0],yI.getname(U._timeframe),yd(U))
FM=""
Ky='Date: '+yU(yi(U.datetime[0]).strftime('%Y-%m-%d'))
yc='Open: '+yU(U.yc[0])
KJ='Low: '+yU(U.low[0])
KB='High: '+yU(U.high[0])
Fb='Volume: '+yU(U.volume[0])
FC=Ky+'\n'+yc+'\n'+KJ+'\n'+KB+'\n'+Fb
FM+=FC
F.emit('logs',FM,namespace='/test',broadcast=yE)
print('binanceUSDT Value: ',FE.getcash())
Fv='Exchange USDT: '+yU(FE.getcash())
Fx='BTC: '+yU(Fk.getcash())
FU='EMA Trend Value: '+yU(B.Wd[0])
Kl=""
Kl+=Fv+'\n'
Kl+=Fx+'\n'
F.emit('logs',Kl,namespace='/test',broadcast=yE)
print(B.Wd[0])
Fh=(FE.getcash()*0.5)
Fq=Fh
Fl=(Fk.getcash()*0.5)
FX=Fl
Ft=(Fq*B.data.close[0])*(1-0.2)
FH=(Fq*B.data.close[0])*(1+0.3)
FN=(Fq*B.data.close[0])*(1+0.2)
if not Fd.trades:
if B.crossup:
B.Wm('CrossUp')
Fd.create_order(symbol='BTC/USDT',type='LIMIT',side='BUY',amount=15,Ww=yl(B.data.close[0],1),params={'timeInForce':'GTC','quantity':1,'price':B.data.close[0]})
if B.Wd[0]<B.Wd[-1]and B.Wd[-2]:
B.Wm('Greedy CrossUp')
Fd.create_order(symbol='BTC/USDT',type='LIMIT',side='BUY',amount=15,Ww=yl(B.data.close[0],1),params={'timeInForce':'GTC','quantity':1,'price':B.data.close[0]})
elif B.crossdown:
B.Wm('Crossdown')
Fd.create_order(symbol='BTC/USDT',type='LIMIT',side='SELL',amount=.0018,Ww=yl(B.data.close[0],1),params={'timeInForce':'GTC','quantity':1,'price':B.data.close[0]})
if B.Wd[0]>B.Wd[-1]and B.Wd[-2]:
B.Wm('Greedy Crossdown')
Fd.create_order(symbol='BTC/USDT',type='LIMIT',side='SELL',amount=.0018,Ww=yl(B.data.close[0],1),params={'timeInForce':'GTC','quantity':1,'price':B.data.close[0]})
else:
return
@F.on('connect',namespace='/test')
def Ws():
print('connect + thats sid')
KP=We
F.emit('connected',namespace='/test',broadcast=yE)
KP='this is sid'+yU(KP)
F.emit('connected',KP,namespace='/test',broadcast=yE)
print(KP)
return KP
@K.route('/fries',methods=['GET'])
def WV():
Fu=Wc()
Fx=yU(Fu.fetch_ticker('BTC/USDT')['last'])
Ki=yU(Fu.fetch_ticker('ETH/USDT')['last'])
Kg=yU(Fu.fetch_ticker('XRP/USDT')['last'])
KI=yU(Fu.fetch_ticker('LTC/USDT')['last'])
FT=yU(Fu.fetch_ticker('BCH/USDT')['last'])
Fx=Fx[0:6]
Ki=Ki[0:6]
Kg=Kg[0:6]
KI=KI[0:6]
FT=FT[0:6]
p=[Fx,Ki,Kg,KI,FT]
return jsonify(p)
@K.route('/balances_rl',methods=['POST'])
def Wa():
FB=ym
print(yw(Wl()))
if WL['EXCHANGE']=='bitfinex':
FB={'apiKey':WL['API_KEY'],'secret':WL['API_SECRET'],'nonce':lambda:yU(yw(Wl()*10001))}
elif WL['EXCHANGE']=='hitbtc':
FB={'apiKey':WL['API_KEY'],'secret':WL['API_SECRET'],'nonce':lambda:yU(yw(Wl()*10001))}
elif WL['EXCHANGE']=='poloniex':
FB={'apiKey':WL['KEY'],'secret':WL['SECRET'],'nonce':lambda:yU(yw(Wl()*1000000000))}
else:
FB={'apiKey':WL['API_KEY'],'secret':WL['API_SECRET'],'nonce':lambda:yU(yw(Wl()*1000))}
Fi=ym
if WL['EXCHANGE']=='bitfinex':
Fi=yq(ccxt,'bitfinex2')
if WL['EXCHANGE']=='hitbtc':
Fi=yq(ccxt,'hitbtc2')
else:
Fi=yq(ccxt,WL['EXCHANGE'])
Fj=Fi(FB)
b=Fj.fetch_balance()
c=[]
print(b)
if WL['EXCHANGE']=='poloniex':
for Ks,value in b.items():
if 'free' in value.keys():
if value['free']>0.0:
Fg=' '+yU(Ks)+': '+yU('{0:.5f}'.format(value['free']))
c.append(Fg)
elif WL['EXCHANGE']=='bitfinex':
WQ(10)
for Ks,value in b.items():
print(value)
if value!=[]and value!={}:
Fg=yU(Ks)+': '+yU('{0:.5f}'.format(value['free']))
c.append(Fg)
else:
c='Wallets are empty or API Issue'
elif WL['EXCHANGE']=='hitbtc':
WQ(10)
for Ks,value in b.items():
print(value)
if value!=[]and value!={}:
Fg=yU(Ks)+': '+yU('{0:.5f}'.format(value['free']))
c.append(Fg)
else:
c='Wallets are empty or API Issue'
else:
for Ks,value in b.items():
if 'Balance' in value.keys():
if value['Balance']>0.0:
Fg=yU(Ks)+': '+yU('{0:.5f}'.format(value['Balance']))
c.append(Fg)
print(c)
return jsonify(c)
@K.route('/balances',methods=['POST'])
def WP():
FB=ym
print(yw(Wl()))
if WL['EXCHANGE']=='bitfinex':
FB={'apiKey':WL['API_KEY'],'secret':WL['API_SECRET'],'nonce':lambda:yU(yw(Wl()*10001))}
elif WL['EXCHANGE']=='hitbtc':
FB={'apiKey':WL['API_KEY'],'secret':WL['API_SECRET'],'nonce':lambda:yU(yw(Wl()*10001))}
elif WL['EXCHANGE']=='poloniex':
FB={'apiKey':WL['API_KEY'],'secret':WL['API_SECRET'],'nonce':lambda:yU(yw(Wl()*1000000000))}
else:
FB={'apiKey':WL['API_KEY'],'secret':WL['API_SECRET'],'nonce':lambda:yU(yw(Wl()*1000))}
Fi=ym
if WL['EXCHANGE']=='bitfinex':
Fi=yq(ccxt,'bitfinex2')
if WL['EXCHANGE']=='hitbtc':
Fi=yq(ccxt,'hitbtc2')
else:
Fi=yq(ccxt,WL['EXCHANGE'])
Fj=Fi(FB)
b=Fj.fetch_balance()
c=[]
print(b)
if WL['EXCHANGE']=='poloniex':
for Ks,value in b.items():
if 'free' in value.keys():
if value['free']>0.0:
Fg=' '+yU(Ks)+': '+yU('{0:.5f}'.format(value['free']))
c.append(Fg)
elif WL['EXCHANGE']=='bitfinex':
WQ(10)
for Ks,value in b.items():
print(value)
if value!=[]and value!={}:
Fg=yU(Ks)+': '+yU('{0:.5f}'.format(value['free']))
c.append(Fg)
else:
c='Wallets are empty or API Issue'
elif WL['EXCHANGE']=='hitbtc':
WQ(10)
for Ks,value in b.items():
print(value)
if value!=[]and value!={}:
Fg=yU(Ks)+': '+yU('{0:.5f}'.format(value['free']))
c.append(Fg)
else:
c='Wallets are empty or API Issue'
else:
for Ks,value in b.items():
if 'Balance' in value.keys():
if value['Balance']>0.0:
Fg=yU(Ks)+': '+yU('{0:.5f}'.format(value['Balance']))
c.append(Fg)
print(c)
return jsonify(c)
@F.on('/prices',namespace='/test')
def Ww():
Fu=Wc()
Fu.fetch_balance()
Fx=yU(Fu.fetch_ticker('BTC/USDT')['last'])
Ki=yU(Fu.fetch_ticker('ETH/USDT')['last'])
Kg=yU(Fu.fetch_ticker('XRP/USDT')['last'])
Fx=Fx[:6]
Ki=Ki[:6]
Kg=Kg[:6]
p=[Fx,Ki,Kg]
F.emit(p,namespace='/test',broadcast=yE)
return p
@K.route('/trend',methods=['GET'])
def Wd():
global FU
return jsonify(FU)
FS=ym
@F.on('runner',namespace='/test')
def Wn(FQ):
global cross,helper,M
global FE,Fk,FB,FS
while FQ['API_KEY']!='':
Ks=FQ['API_KEY']
KV=FQ['API_SECRET']
M=FQ['EXCHANGE']
F.emit('logs','Starting bot...',namespace='/test',broadcast=yE)
print(Ks,KV)
print('they are above')
if Ks!='':
print('got here')
Fe=yg()
FL=yK()-timedelta(minutes=240)
if M=='poloniex' or 'bitfinex':
FB={'apiKey':Ks,'secret':KV,'nonce':lambda:yU(yw(Wl()*1000000000))}
else:
FB={'apiKey':Ks,'secret':KV,'nonce':lambda:yU(yw(Wl()*1000))}
F.emit('ema',namespace='/test')
FE=yF.CCXTBroker(exchange=M,currency='USDT',config=FB)
FE=FE
F.emit('logs',yU(FE.getcash())+' - USDT BALANCE',namespace='/test',broadcast=yE)
Fk=yF.CCXTBroker(exchange=M,currency='BTC',config=FB)
F.emit('logs',yU(Fk.getcash())+' - BTC BALANCE',namespace='/test',broadcast=yE)
if M=='poloniex':
FS=yr.CCXT(exchange=M,symbol="BTC/USDT",timeframe=yI.Minutes,compression=5,config=FB)
elif M=='bitfinex':
FS=yr.CCXT(exchange=M,symbol="BTC/USD",timeframe=yI.Minutes,compression=5,config=FB)
elif M=='gateio':
FS=yr.CCXT(exchange=M,symbol="BTC/USD",config=FB)
else:
FS=yr.CCXT(exchange=M,symbol="BTC/USDT",timeframe=yI.Minutes,compression=1,config=FB)
Fo=FE.getcash()
Fe.adddata(FS)
Fe.addsizer(yG.PercentSizer,percents=10)
Fe.addstrategy(strategy=cross,stop_loss=0.1,take_profit=0.08,low=14,high=90)
print('gotem')
Fe.Wn()
Fe.plot()
return
else:
return
@F.on('end_connection',namespace='/test')
def WY(KP,msg):
F.disconnect(KP,namespace='/test')
F.disconnect(KP)
print('disconnecting...')
F.emit('logs',"disconnecting...",namespace='/test',room=KP)
return 'Disconnected'
KT=0
Ke=0
if __name__=='__main__':
F.Wn(K)
|
"""
Author : Lily
Date : 2018-09-18
QQ : 339600718
C.P.U. C.P.U. CPU-s
抓取思路:在初始页面抓取省份列表,做为参数,请求到具体的stores信息
locator_index : http://www.cpuchina.cn/index.php?controller=site&action=store_search
url(post,json,参数 keyword: 北京市) : http://www.cpuchina.cn/index.php?controller=ajax&mod=site&act=search_store
"""
import requests
import re
import datetime
import json
from lxml import etree
fileanme = "CPU-s" + re.sub('[^0-9]', '', str(datetime.datetime.now())) + ".csv"
f = open(fileanme, 'w', encoding='utf-8')
f.write('stor_id, store_name, region, store_address, tel,postcode, latlong, visiblity,\n')
index_url = "http://www.cpuchina.cn/index.php?controller=site&action=store_search"
store_url = 'http://www.cpuchina.cn/index.php?controller=ajax&mod=site&act=search_store'
provinces_html = requests.get(index_url).text
provinces_lxml = etree.HTML(provinces_html)
provinces = provinces_lxml.xpath('//div[@class="shopR_contL_tips"]/ul/li/text()')
for pro in provinces:
print(pro)
data = {"keyword":pro}
stores_html = requests.post(store_url, data=data).text
stores_json = json.loads(stores_html)
print(stores_json)
for store in stores_json["data"]:
print(store.keys())
for k, v in store.items():
v = str(v).replace(',', ',').replace('\n', '')
f.write(v + ',')
f.write('\n')
f.close()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import scipy.stats as st
import statsmodels.api as sm
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from statsmodels.stats import outliers_influence
from statsmodels.compat import lzip
#from descstats import MyPlot, Univa
import warnings
warnings.filterwarnings(action="ignore", module="sklearn",
message="^internal gelsd")
###############################################################
# Linear Regression Analysis
###############################################################
def linear_regression_analysis(linear_regression):
""" Compute and plot a complete analysis of a linear regression computed with Stats Models.
Args:
linear_regression (Stats Models Results): the result obtained with Stats Models.
"""
# Data
resid = linear_regression.resid_pearson.copy()
resid_index = linear_regression.resid.index
exog = linear_regression.model.exog
endog = linear_regression.model.endog
fitted_values = linear_regression.fittedvalues
influences = outliers_influence.OLSInfluence(linear_regression)
p = exog.shape[1] # Number of features
n = len(resid) # Number of individuals
# Paramètres
color1 = "#3498db"
color2 = "#e74c3c"
##############################################################################
# Tests statistiques #
##############################################################################
# Homoscédasticité - Test de Breusch-Pagan
##########################################
names = ['Lagrande multiplier statistic',
'p-value', 'f-value', 'f p-value']
breusch_pagan = sm.stats.diagnostic.het_breuschpagan(resid, exog)
print(lzip(names, breusch_pagan))
# Test de normalité - Shapiro-Wilk
###################################
print(f"Shapiro pvalue : {st.shapiro(resid)[1]}")
##############################################################################
# Analyses de forme #
##############################################################################
# Histogramme des résidus
##########################
data = resid
data_filter = data[data < 5]
data_filter = data[data > -5]
len_data = len(data)
len_data_filter = len(data_filter)
ratio = len_data_filter / len_data
fig, ax = plt.subplots()
plt.hist(data_filter, bins=20, color=color1)
plt.xlabel("Residual values")
plt.ylabel("Number of residuals")
plt.title(f"Histogramme des résidus de -5 à 5 ({ratio:.2%})")
# Normal distribution vs residuals (QQ Plot, droite de Henry)
#############################################################
data = pd.Series(resid).sort_values()
len_data = len(data)
normal = pd.Series(np.random.normal(size=len_data)).sort_values()
fig, ax = plt.subplots()
plt.scatter(data, normal, c=color1)
plt.plot((-4, 4), (-4, 4), c=color2)
plt.xlabel("Residuals")
plt.ylabel("Normal distribution")
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.title("Residuals vs Normal (QQ Plot)")
# Plot
plt.show()
def plot_sortie_acf(y_acf, y_len, pacf=False):
"représentation de la sortie ACF"
if pacf:
y_acf = y_acf[1:]
plt.figure(figsize=(14, 6))
plt.bar(range(len(y_acf)), y_acf, width=0.1)
plt.xlabel('lag')
plt.ylabel('ACF')
plt.axhline(y=0, color='black')
plt.axhline(y=-1.96/np.sqrt(y_len), color='b',
linestyle='--', linewidth=0.8)
plt.axhline(y=1.96/np.sqrt(y_len), color='b',
linestyle='--', linewidth=0.8)
plt.ylim(-1, 1)
plt.show()
return
|
import sys
from .application import Application
application = Application()
application.apply(sys.argv[1:]) |
from pkg_resources import get_distribution
__version__ = get_distribution("betterproto").version
|
from setuptools import setup
setup(
name='mdat',
version='0.3.0',
packages=['mdat'],
url='https://github.com/ctsit/mdat',
license='Apache 2.0',
author='pbc',
author_email='ctsit@ctsi.ufl.edu',
description='A decision aid designed to select the best of two or more alternatives given responses to a list of criteria',
long_description=open('README.md').read(),
install_requires=[
"jsonschema",
],
entry_points={
'console_scripts': [
'mdat = mdat.__main__:main',
],
},
tests_require=[
"pytest",
"jsonschema",
],
test_suite='tests',
)
|
#!/usr/bin/env python
import infoblox
import requests
import os
requests.packages.urllib3.disable_warnings()
querystring = {
"_return_fields" : ["host"],
"ipv4addr": os.environ['nicIP_0']
}
headers = {}
url = "https://10.110.1.45/wapi/v1.0/record:host_ipv4addr"
response = requests.request("GET", url, headers=headers, params=querystring, verify=False, auth=('admin', 'infoblox'))
# response.json()
fqdn = response.json()[0]['host']
iba_api = infoblox.Infoblox('10.110.1.45', 'admin', 'infoblox', '1.6', 'default', 'default', False)
try:
# Create new host record with supplied network and fqdn arguments
ip = iba_api.delete_host_record(fqdn)
except Exception as e:
print e
|
from filecache import filecache
import tvdb_api
import time
from nab.database import Database
from nab.season import Season
from nab.episode import Episode
_t = tvdb_api.Tvdb()
@filecache(7 * 24 * 60 * 60)
def show_search(term):
return _t.search(term)
def show_get(show):
try:
if "tvdb" in show.ids:
return _t[int(show.ids["tvdb"])]
# search for longest names first (avoid searching for initials)
for title in reversed(sorted(show.titles, key=len)):
result = show_search(title)
if len(result):
return _t[int(result[0]["id"])]
except (tvdb_api.tvdb_error, KeyError):
# deal with errors where no match found
# also deals with KeyError bug in tvdb API
pass
TVDB.log.debug("Couldn't find %s" % show)
return None
class TVDB(Database):
def get_show_titles(self, show):
data = show_get(show)
if data is None:
return []
titles = [data["seriesname"]]
try:
titles += show_search(data["seriesname"])[0]["aliasnames"]
except KeyError:
pass
return titles
def get_show_ids(self, show):
data = show_get(show)
if data is None:
return {}
return {"tvdb": data["id"]}
def get_banner(self, show):
return show_get(show)['banner']
def get_seasons(self, show):
data = show_get(show)
if data is None:
return []
return [Season(show, senum) for senum in data]
def get_episodes(self, season):
data = show_get(season.show)[season.num]
if data is None:
return []
episodes = []
for epnum in data:
airstr = data[epnum]["firstaired"]
if airstr is not None:
try:
aired = time.mktime(time.strptime(airstr, "%Y-%m-%d"))
except OverflowError:
aired = 0 # Doctor Who is REALLY old
else:
aired = None
title = data[epnum]["episodename"]
if title:
# only add titled episodes
episodes.append(Episode(season, epnum, title, aired))
return episodes
TVDB.register("tvdb")
|
# Program to find the time taken by SHA-1 algorithm for collisions for different number of bits
# importing required libraries
import random
import hashlib
import time
import xlwt
from xlwt import Workbook
from xlrd import open_workbook
from xlutils.copy import copy
# Open excel sheet for storing data
rb = open_workbook("data-dict.xls")
wb = copy(rb)
sheet1 = wb.get_sheet(0)
for ch in range(1, 14): # ch denotes the number of characters or hexadecimal digits to be compared
sheet1.write(4 * (ch) - 3, 0, "Number of characters = " + str(ch))
sheet1.write(4 * (ch) - 2, 0, "POSITIONS")
sheet1.write(4 * (ch) - 1, 0, "TIME") # printing the required row headings
for i in range(10):
randlist = random.sample(range(40), ch) # generating a list of ch random positions
print(randlist)
total_time = 0
count = 100 # count = number of trials for each set of positions
for j in range(count):
collission = {} # initialising the dictionary
start_time = time.time()
while 1:
input = random.randint(1000, 100000000000000000000000000) # random number generator
# hashlib generates the hash value and stores the digest in result
# in this program, the algorithm used is SHA-1
# the algorithm can be changed by replacing 'sha1' in 'hashlib.sha1' with 'md5', 'sha256' or 'sha512'
result = hashlib.sha1(str(input).encode()).hexdigest()
# Generating a string by concatenating characters from the randomly chosen positions
hashstr = ""
for k in randlist:
hashstr = hashstr + str(result)[k]
if (hashstr in collission) == True: # checks if the string had been generated earlier
print(input) # printing collision details to the output console
print(collission[hashstr])
print("Digit " + str(ch) + " collision " + str(j + 1) + " in " + str(i + 1))
print(time.time() - start_time)
break
else:
collission.update({hashstr: input}) # appending the new string to the dictionary
final_time = (time.time() - start_time) # calculating the time taken for collision
total_time = total_time + final_time
total_time = total_time / count
print(total_time)
# code for writing the data values into the excel sheet
sheet1.write(4 * (ch) - 2, i + 1, str(randlist))
sheet1.write(4 * (ch) - 1, i + 1, total_time)
wb.save('data-dict.xls')
|
import urllib2
from bs4 import BeautifulSoup
url = "https://www.packtpub.com/all"
response = urllib2.urlopen(url)
soup = BeautifulSoup(response,"html.parser")
|
import django_filters
from django.db import models
from django import forms
from visits.models import Visit
class VisitFilter(django_filters.FilterSet):
patient_id__first_name = django_filters.CharFilter(lookup_expr='iexact')
patient_id__last_name = django_filters.CharFilter(lookup_expr='iexact')
class Meta:
model = Visit
fields = {'visit_date' : ['gt', 'lt', 'exact'],
'patient_id' : ['exact'],
'patient_id__first_name' : [],
'patient_id__last_name' : []}
|
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
if settings.CMS_TEMPLATES:
cms_templates = settings.CMS_TEMPLATES
else:
cms_templates = (
('default.html', 'Default'),
)
class ExternalDocsBranch(models.Model):
origin = models.CharField(
max_length=200,
help_text=_('External branch location, ie: lp:snappy/15.04 or '
'https://github.com/ubuntu-core/snappy.git'))
branch_name = models.CharField(
max_length=200,
help_text=_('For use with git branches, ie: "master" or "15.04" '
'or "1.x".'),
blank=True)
post_checkout_command = models.CharField(
max_length=100,
help_text=_('Command to run after checkout of the branch.'),
blank=True)
active = models.BooleanField(default=True)
def __str__(self):
if self.branch_name:
return "{} - {}".format(self.origin, self.branch_name)
return "{}".format(self.origin)
class Meta:
verbose_name = "external docs branch"
verbose_name_plural = "external docs branches"
class ExternalDocsBranchImportDirective(models.Model):
external_docs_branch = models.ForeignKey(ExternalDocsBranch)
import_from = models.CharField(
max_length=150,
help_text=_('File or directory to import from the branch. '
'Ie: "docs/intro.md" (file) or '
'"docs" (complete directory), etc.'),
blank=True)
write_to = models.CharField(
max_length=150,
help_text=_('Article URL (for a specific file) or article namespace '
'for a directory or a set of files.'),
blank=True)
advertise = models.BooleanField(
default=True,
help_text=_('Should the imported articles be listed in the '
'navigation? Default: yes.'),
)
template = models.CharField(
max_length=50,
default=cms_templates[0][0],
choices=cms_templates,
help_text=_('Django CMS template to use for the imported articles. '
'Default: {}'.format(cms_templates[0][0])),
)
def __str__(self):
return "{} -- {}".format(self.external_docs_branch,
self.import_from)
class ImportedArticle(models.Model):
url = models.CharField(
max_length=300,
help_text=_('URL of article, e.g. snappy/guides/security'),
)
branch = models.ForeignKey(ExternalDocsBranch)
last_import = models.DateTimeField(
_('Datetime'), help_text=_('Datetime of last import.'))
def __str__(self):
return '{} -- {} -- {}'.format(
self.url, self.branch, self.last_import)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import pickle
model_scores = {}
def predict_data(unscaled_predictions, original_data, flag=True):
""" Data prediction based on out-of-scale predictions and the original data
Args:
param unscaled_predictions: unscaled predictions returned by scaling function
param original_data: original training data
Returns:
return: Dataframe with the sum of last year's sales and their monthly dates
"""
result_list = []
dates = list(original_data[-12:].date)
sales = list(original_data[-12:].sales)
for x in range(0,len(sales)):
result_dict = {}
sum_predict = unscaled_predictions[x][0] + sales[x] if flag == True else unscaled_predictions[x] + sales[x]
result_dict['predict_value'] = int(sum_predict)
result_dict['date'] = dates[x]
result_list.append(result_dict)
df_result = pd.DataFrame(result_list)
return df_result
def load_original():
""" Loads training data from train.csv
Args: None
Return: Dataframe with data contained in data/train.csv
"""
original = pd.read_csv('data/train.csv')
original = original.drop(columns = ['store', 'item'])
original.date = pd.to_datetime(original.date, errors='coerce')
original = original.groupby(pd.Grouper(key='date', freq='1M',axis='index')).sum()
original = original.reset_index()
original.date = original.date.dt.strftime("%Y-%m-01")
original.date = pd.to_datetime(original.date, format='%Y-%m-%d', errors='coerce')
return original
def plot_results(results, original_data, model_name):
""" Prints the results in graphical format
Args:
param results: Dataframe with results data to be implemented
param original_data: Dataframe with original data
param model_name: Model to be implemented
Return: None
"""
fig, ax = plt.subplots(figsize=(15,5))
sns.lineplot(original_data.date, original_data.sales, data=original_data, ax=ax,
label='Original', color='mediumblue')
sns.lineplot(results.date, results.predict_value, data=results, ax=ax,
label='Predicted', color='Red')
ax.set(xlabel = "Date",
ylabel = "Sales",
title = f"{model_name} Sales Forecasting Prediction")
ax.legend()
sns.despine()
plt.savefig(f'model_output/{model_name}_forecast.png')
|
# LEVEL 7
# http://www.pythonchallenge.com/pc/def/oxygen.html
# png code in grayscale
from PIL import Image
im = Image.open("data/oxygen.png")
pix = im.load()
w, h = im.size
for y in range(h):
repeated_pixels = 0
for x in range(w - 1):
repeated_pixels += 1
left_pixel = pix[x, y]
right_pixel = pix[x + 1, y]
if left_pixel != right_pixel:
break
if repeated_pixels > 3:
index = 0
seq_sizes = [0]
for x in range(w - 1):
seq_sizes[index] += 1
left_pixel = pix[x, y]
right_pixel = pix[x + 1, y]
if left_pixel != right_pixel:
if seq_sizes[index] < 3:
del seq_sizes[index]
break
seq_sizes.append(0)
index += 1
message_width = sum(seq_sizes)
pixels = []
message = ''
x = 0
while x < message_width:
pixels.append(pix[x, y])
message += chr(pix[x, y][0])
x += 7
print(message)
break
next_codes = [105, 110, 116, 101, 103, 114, 105, 116, 121]
print(''.join([chr(code) for code in next_codes]))
|
from django.conf.urls import include, url
from django.views.generic import TemplateView
from accounts.views import UserRegistrationView
from django.contrib.auth.views import login,logout
urlpatterns = [
url(r'^new-user/$', UserRegistrationView.as_view(), name='user_registration'),
url(r'^login/$', login, {'template_name': 'login.html'},name='login'),
url(r'^logout/$', logout, {'next_page': '/'}, name='logout'),
] |
# 使用__slots__
# 正常情况下,当我们定义了一个类,我们可以给该class绑定任何的属性和方法,这相当的灵活
class Student(object):
pass
s = Student();
s.name = 'Test'
print(s.name)
# 可以尝试绑定一个方法
def set_age(self, age):
self.age = age
from types import MethodType # 给class绑定方法需要导入MethodType
s.set_age = MethodType(set_age, s) # 给实例绑定一个方法
s.set_age(25) # 调用实例方法
print(s.age)
# 为了给所有的实例绑定方法。可以给class绑定方法
def set_score(self, score): # 定义一个分数的方法
self.score = score
Student.set_score = set_score # 给Student这个class绑定上面的分数的方法
s = Student()
s.set_score(100)
print(s.score)
# 上面的方法在静态语言中容易实现,但是动态绑定允许我们在程序运行的过程中动态给class加上功能在静态语言中时不容易实现的
'''
__slots__
假如要限制实例的属性,比如,只允许对Student实例添加name和age属性
为了达到限制的目的,Python允许在定义class的时候,定义一个特殊的__slots__变量,来限制该class实例能添加的属性
使用__slots__要注意,__slots__定义的属性仅对当前类实例起作用,对继承的子类是不起作用的
'''
class student(object):
__slots__ = ('name', 'age') # 用tuple定义允许绑定的属性名称
# 测试
s = student() # 创建新的实例
s.name = 'Michael' # 绑定属性'name'
s.age = 25 # 绑定属性'age'
# s.score = 99 # 绑定属性'score'
print(s.name)
print(s.age)
# print(s.score) # 这个会报错因我们已经限制不能定义除name 和 age以外的东西了
# 测试对对继承的子类的作用
class GraduateStudent(student):
pass
g = GraduateStudent()
g.score = 99 # 父的限制没有继承到子类中
print(g.score)
class Test1(object):
def get_score(self):
return self._score
def set_score(self, value):
if not isinstance(value, int): # isinstance用于对参数类型进行限制
raise ValueError('TestError')
if value < 0 or value > 100:
raise ValueError('score must between 0 ~ 100!')
self._score = value
a = Test1()
a.set_score(88)
print(a.get_score())
'''
用Python内置的@property装饰器把一个方法变成属性调用的
这样做可以既能检查参数,又可以用类似属性这样简单的方式来访问类的变量
'''
class Test2(object):
@property
def score(self): # 定义属性
return self._score
# 把一个getter方法变成属性,只需要加上@property就可以了,此时,
# @property本身又创建了另一个装饰器@score.setter,负责把一个
# setter方法变成属性赋值,于是,我们就拥有一个可控的属性操作
@score.setter # 定义属性的setter方法
def score(self, value):
if not isinstance(value, int):
raise ValueError('TestError')
if value < 0 or value > 100:
raise ValueError('测试范围')
self._score = value
b = Test2()
b.score = 22
print(b.score)
'''
多重继承是为了解决类的设计层次增加而导致类的数量会呈指数增长
操作方法就是子类在继承了一个父类的前提下载继承多一个父类,这样
一个子类就拥有多个父类的功能了
'''
class Animal(object):
def pris(self):
print('这是一只鸟')
class flying(object):
def pri(self):
print('会飞')
class hawk(Animal, flying): # bird 继承了Animal 和 running,所以可以使用它们的方法
def __init__(self):
print('那是一只老鹰')
d = hawk()
d.pris()
d.pri() |
import socket
from IPy import IP
from colorama import Fore
class PortScan:
def __init__(self, target_addr, target_port):
self.target_addr = target_addr
self.target_port = target_port
def scan_target(self):
for port in range(1, self.target_port):
self.scan_port(port)
def check_addr(self):
try:
IP(self.target_addr)
return self.target_addr
except ValueError:
return socket.gethostbyname(self.target_addr)
def scan_port(self, target_port):
try:
converted_addr = self.check_addr()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
sock.connect((converted_addr, target_port))
try:
banner = sock.recv(1024).decode('utf8').strip('\n').strip('\r')
print(f'[*] Open port: = [{Fore.GREEN}{target_port}{Fore.WHITE}] : {banner}')
except:
print(f'[*] Open port: [{Fore.GREEN}{target_port}{Fore.WHITE}]')
sock.close()
except:
pass
def main():
target = input('[+] Enter target address: ')
ports = int(input('[+] Input amount of ports (100 - first 100 ports): '))
scanner = PortScan(target, ports)
scanner.scan_target()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.shortcuts import render, HttpResponse, redirect
from time import gmtime, strftime
from django.contrib import messages
from django.utils.crypto import get_random_string
# the index function is called when root is visited
# def index(request):
# response = "Hello, This is your time page"
# return HttpResponse(response)
# def yourMethodFromUrls(request):
# context = {
# "somekey":"somevalue"
# }
# return render(request,'appname/page.html', context)
def index(request):
context = {
"time": strftime("%Y-%m-%d %I:%M %p %z", gmtime())
}
return render(request,'clock/index.html', context)
|
from Tkinter import *
from tkMessageBox import *
from tkFileDialog import *
from SQLinjector import *
import time
import websitedata
def checkvuln(wsite,name):
inject=[]
global result
for x in name:
sqlinject=x
inject.append(wsite.replace("FUZZ",sqlinject))
showinfo('Wait'," Checking website for vulnerability please wait")
result=injector(inject)
process()
def deepXploit():
global columns
global version
global curr_user
global steal_usr
global passwrd
columns=detect_columns(wsite)
version=detect_version(wsite)
curr_user=detect_user(wsite)
steal_usr,passwrd=steal_users(wsite)
def xploit():
pro.destroy()
xploy=Tk()
showinfo('Exploit', "website is under deep Explotation wait ..!")
xploy.geometry('1024x577')
xploy.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
xploy.title("SQL Injection Vulnerability Scanner")
Label(xploy,image=pic).grid(row=0,column=0,rowspan=20,columnspan=10)
Label(xploy,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=0,column=0,columnspan=10)
Label(xploy,text='Results:', font='Harrington 16 bold underline' ,bg='white').grid(row=2,column=0)
Label(xploy,text='No. of columns:-', font='Harrington 14 bold underline' ,bg='white').grid(row=6,column=0)
Label(xploy,text='Version:-', font='Harrington 14 bold underline' ,bg='white').grid(row=7,column=0)
Label(xploy,text='Current Database User:-', font='Harrington 14 bold underline' ,bg='white').grid(row=8,column=0)
## Label(xploy,text='Usernames & passwords:-', font='Harrington 14 bold underline' ,bg='white').grid(row=10,column=0)
for x in columns:
Label(xploy, text=x,font='Harrington 14 bold underline' ,bg='white').grid(row=6,column=(1+(int(columns.index(x)))))
## xploy.mainloop()
Label(xploy, text=version,font='Harrington 14 bold underline',bg='white').grid(row=7,column=1)
Label(xploy, text=curr_user,font='Harrington 14 bold underline' ,bg='white').grid(row=8,column=1)
## for x in steal_usr:
## Label(xploy,text=x,font='Harrington 14 bold underline' ,bg='white').grid(row=10,column=(1+(int(steal_usr.index(x)))))
## xploy.mainloop()
## for x in passwrd:
## Label(xploy,text=x,font='Harrington 14 bold underline' ,bg='white').grid(row=11,column=(1+(int(passwrd.index(x)))))
## xploy.mainloop()
xploy.mainloop()
def report():
p1.destroy()
global rep
rep=Tk()
rep.geometry('1024x577')
rep.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
rep.title("SQL Injection Vulnerability Scanner")
Label(rep,image=pic).grid(row=0,column=0,rowspan=10,columnspan=10)
Label(rep,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=0,column=0,columnspan=10)
Button(rep, text="back", bg='white', command=repback).grid(row=1, column=8)
Label(rep,text='Report:', font='Harrington 16 bold underline' ,bg='white').grid(row=2,column=0)
rep.mainloop()
def repback():
rep.destroy()
Home()
def process():
global pro
p1.destroy()
pro=Tk()
pro.geometry('1024x577')
pro.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
Label(pro,image=pic).grid(row=0,column=0,rowspan=20,columnspan=10)
pro.title("SQL Injection Vulnerability Scanner")
Label(pro,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=1,column=0,columnspan=10)
Label(pro,text='Processing:', font='Harrington 16 bold underline' ,bg='white').grid(row=2,column=0,sticky='W')
Label(pro,text='Testing errors:-', font='Harrington 14 bold ' ,bg='white').grid(row=3,column=0,sticky='W')
'''def testres(wsite,name):
inject=[]
for z in name:
y=(wsite.replace("FUZZ",z))
Label(pro,text='' , bg='white').grid(row=4,column=0,sticky='EWNS')
Label(pro,text=y, bg='white').grid(row=4,column=0,sticky='EW')
break'''
global i
i=int(0)
for x in result:
i=int(i+1)
Label(pro,text=x,font='Harrington 12 bold',bg='white').grid(row=5+i,column=0,sticky='NS')
if (len(result) != 0):
showinfo('Results','Website is vulnerable to sql injection')
Button(pro,text='Exploit',bg='white',command=lambda:[deepXploit(),xploit(),]).grid(row=10,column=5,sticky='W')
else :
showinfo('Results','Website is not vulnerable to sql injection')
pro.mainloop()
def checkres():
if not result:
showinfo('Results',"Not vulnerable")
def Home():
global p1
p1=Tk()
global s
p1.geometry('1024x577')
p1.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
Label(p1,image=pic).grid(row=0,column=0,rowspan=10,columnspan=10)
p1.title("SQL Injection Vulnerability Scanner")
Label(p1,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=0,column=0,columnspan=10)
Label(p1,text='Website:', font='Harrington 14 bold' ,bg='white').grid(row=2,column=0)
s=Entry(p1,bg='LightCyan4', cursor='dot')
s.grid(row=2,column=1,columnspan=5,sticky='EW')
Label(p1,text='Injection file select:', font='Harrington 14 bold' ,bg='white').grid(row=8,column=0)
def fileselect():
injectionfile=askopenfilename(title = "Select injection dictionary file",filetypes = (("text files","*.txt"),))
f = open(injectionfile, "r")
global name
name = f.read().splitlines()
print(name)
def webget():
global wsite
wsite=str(s.get()+"FUZZ")
print(wsite)
Button(p1, text='select file', command=fileselect, bg='white', cursor='dot').grid(row=8, column=1)
Button(p1, text="Check",bg='white',command=lambda:[webget(),checkvuln(wsite,name),]).grid(row=6,column=8, sticky='EWNS')
p1.mainloop()
Home()
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
import mock
from pants.bin.goal_runner import EngineInitializer
class GraphInvalidationTest(unittest.TestCase):
def _make_setup_args(self, *specs):
options = mock.Mock()
options.target_specs = specs
return dict(options=options)
def setup_legacy_product_graph(self, *specs):
kwargs = self._make_setup_args(*specs)
with EngineInitializer.open_legacy_graph(**kwargs) as (_, _, scheduler):
return scheduler.product_graph
def test_invalidate_fsnode(self):
product_graph = self.setup_legacy_product_graph('3rdparty/python::')
initial_node_count = len(product_graph)
self.assertGreater(initial_node_count, 0)
product_graph.invalidate_files(['3rdparty/python/BUILD'])
self.assertLess(len(product_graph), initial_node_count)
def test_invalidate_fsnode_incremental(self):
product_graph = self.setup_legacy_product_graph('3rdparty/python::')
node_count = len(product_graph)
self.assertGreater(node_count, 0)
# Invalidate the '3rdparty/python' Path's DirectoryListing first by touching a random file.
for filename in ('3rdparty/python/CHANGED_RANDOM_FILE', '3rdparty/python/BUILD'):
product_graph.invalidate_files([filename])
node_count, last_node_count = len(product_graph), node_count
self.assertLess(node_count, last_node_count)
|
from datetime import datetime, timedelta
from flask import Flask
from flask.helpers import make_response
from flask import request
from flask.json import jsonify
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import psycopg2
import jwt
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'POSTGRESQL_URL' # secret url
db = SQLAlchemy(app)
conn = psycopg2.connect(host="localhost", port=5432,
database="db", user="postgres", password="password")
cur = conn.cursor()
cur.execute("SELECT * FROM Users")
query_results = cur.fetchall()
@app.route('/login')
def login():
auth = request.authorization
for i in range(cur.rowcount):
if auth and auth.username == query_results[i][1]:
if auth.password == query_results[i][2]:
token = jwt.encode({'user': auth.username, 'exp': datetime.utcnow(
) + timedelta(minutes=30)}, str(app.config['SECRET_KEY']))
sql = "UPDATE Users SET token = %s WHERE id = %s"
val = (token, query_results[i][0])
cur.execute(sql, val)
conn.commit()
return jsonify({'token': token})
return make_response('Could not verify!', 401, {'WWW-Authenticate': 'Basic realm="Login required'})
@app.route('/protected')
def protected():
cur.execute("SELECT * FROM Users")
query_results = cur.fetchall()
token = request.args.get('token')
for j in range(3):
if token == query_results[j][3]:
return "<h1>Hello, token which is provided is correct </h1>"
else:
continue
return "<h1>Hello, Could not verify the token </h1>"
if __name__ == '__main__':
app.run(debug=True)
|
import os
from fabric.api import sudo, run, prompt, abort, local, settings, hide
from fabric.tasks import Task
from state import myenv, load_proj_env
def lpath_exists(path):
'''
use this instead of os.path.exists when testing whether local path exists,
it consider context that set by lcd
'''
with settings(hide('warnings'), warn_only=True):
return local("test -e '%s'" % path).succeeded
def path_exists(path):
#if files.exists(rel, verbose=True):
#FIXME: have no idea that why the above command does not work
#Warning: run() encountered an error (return code 1) while executing 'test -e "$(echo /usr/local/nds/releases/20120510140214)"'
#run(...., shell=False) will get correct output
with settings(hide('warnings'), warn_only=True):
return run("test -e '%s'" % path).succeeded
def mine(*args, **kw):
#TODO: support myenv in shell running, for sudo,run,etc.
return sudo(*args, user=myenv.owner, **kw)
def is_owner(path):
uname = run('uname').stdout
if uname == 'FreeBSD':
return mine('id -u').stdout == run("stat -f'%%u' %s" % path).stdout
else: #if uname == 'Linux':
return mine('id -u').stdout == run("stat -c'%%u' %s" % path).stdout
def is_python_module(path):
return path_exists(os.path.join(path, '__init__.py'))
def symlink_python_module(path):
from distutils import sysconfig
lib = sysconfig.get_python_lib()
target = os.path.join(lib, os.path.basename(path))
if path_exists(target):
sudo('rm %s' % target)
sudo('ln -s %s %s' % (path, target))
class ProjTask(Task):
'''
base class for project oriented task
'''
proj = None
def set_proj(self, proj):
self.proj = proj
def run(self, proj=None, *args, **kw):
if proj:
self.set_proj(proj)
if not self.proj:
proj = prompt('No project found. Please specify project:')
if proj:
self.set_proj(proj)
else:
abort('Invalid project name:%s' % proj)
load_proj_env(self.proj)
self.work(*args, **kw)
def work(*args, **kw):
raise NotImplemented
|
__all__ = ["evaluate","evaluate_utility"] |
from .gripper import *
|
from textblob import *
from random import choice
import mysql_utils as mysql
from intents import intents
from datefinder import find_dates
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
def clean_sentence(sentence):
cleaned_sentence = ''
sentence = Sentence(sentence)
sentence = sentence.lower().words
sentence.sort()
for word in sentence:
cleaned_sentence += word.lemmatize() + ' '
return Sentence(cleaned_sentence.strip())
def score(sentences):
for i in range(len(sentences)):
sentences[i] = str(clean_sentence(sentences[i]))
x = vectorizer.fit_transform(sentences).toarray()
return cosine_similarity([x[0]], [x[1]])
def response(message, userinfo):
dates = [date for date in find_dates(message)]
journal = mysql.journal_from_dates(userinfo['id'], dates)
if not journal is None:
return journal, True
max_score = 0
best_class = {}
for intent in intents():
for pattern in intent['patterns']:
pattern = str(clean_sentence(pattern))
curr_score = score([message, pattern])[0][0]
if curr_score > max_score:
max_score = curr_score
best_class = intent
if max_score == 0:
best_class = intents()[-1]
if best_class['tag'] in list(userinfo.keys()):
response = choice(best_class['responses']).replace('<' + best_class['tag'] + '>', userinfo[best_class['tag']])
else:
response = choice(best_class['responses'])
return response, False
|
num = int(input("Enter a number: "))
summation = 0
nums = []
for count in range(1,num+1):
print(count,sep =" ",end=" ")#This prints 1 then goes to if which prints +
if (count < num):
print("+",sep = " ",end=" ")# this stops printing + when count reaches num
nums.append(count)
print("=",sum(nums))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.