content
stringlengths 5
1.05M
|
|---|
import requests
from bs4 import BeautifulSoup as bs
import time
from datetime import datetime
import pandas as pd
import os
def gshp_link_by_query(query):
# Find a researcher's Google Scholar homepage link by a query
kw = '+'.join(query.split())
search_link = 'https://scholar.google.com/citations?hl=en&view_op=search_authors&mauthors=' + kw
r = requests.get(search_link)
if r.status_code == 200:
soup = bs(r.content, 'html.parser')
if len(soup.select('.gsc_1usr')) == 1:
return 'https://scholar.google.com' + soup.select('.gsc_1usr')[0].find('a')['href']
elif len(soup.select('.gsc_1usr')) == 0:
print(f'No scholar was found given the input {query}')
return None
# If multiple scholars are found given the query, a manual inspection is required
elif len(soup.select('.gsc_1usr')) > 1:
print(f'More than two scholars were found given the input {query}.'
f'\nSee: {search_link}')
return None
else:
print(f"A bad request. {r.status_code} Client Error.")
return None
def spw_filter(string):
"""Filter stopwords in a given string --> Titles of researchers' publications"""
stopwords = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're",
"you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them',
'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these',
'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do',
'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while',
'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before',
'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under',
'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'very', 'can', 'cannot', 'will', 'just', "don't", 'should', "should've",
'now', "aren't", "couldn't", "didn't", "doesn't", "hadn't", "hasn't", "haven't", "isn't", "mightn't",
"mustn't", "needn't", "shan't", "shouldn't", "wasn't", "weren't", "won't", "wouldn't"]
return [tk.strip() for tk in string.lower().split() if tk not in stopwords]
def ngram(tokens, n):
start = 0
end = len(tokens) - n + 1
res = []
for i in range(start, end):
res.append(' '.join(tokens[i:i+n]))
return res
def filtered_ngram(list_of_str, n):
ngram_res = []
if n == 1:
for st in list_of_str:
ngram_res.extend(spw_filter(st))
else:
tks = [spw_filter(st) for st in list_of_str]
# To avoid creating a unwanted ngram based on mixed titles
for tk in tks:
if len(tk) >= n:
ngram_res.extend(ngram(tk, n))
return ngram_res
def counter(alist, most_common=None):
if not type(alist) is list:
alist = list(alist)
else:
pass
counts = dict()
for i in alist:
counts[i] = counts.get(i, 0) + 1
return sorted(counts.items(), key=lambda x: x[1], reverse=True)[:most_common]
class GSAnalyzer:
def __init__(self, wd, res_dir):
self.wd = wd
self.res_dir = res_dir if res_dir.endswith('/') else res_dir + '/'
def loading_gs_homepage(self, url, loading_sp=1, pages_to_load=5):
# loading a researcher's GS homepage to the fullest or to a given page
self.wd.get(url)
self.url = url
# the publication list in the previous page before loading
pre_page_plist = self.wd.find_elements_by_class_name('gsc_a_tr')
# keep loading the webpage
show_more = self.wd.find_element_by_xpath('//*[@id="gsc_bpf_more"]/span/span[2]')
loading_sp = loading_sp
pages_to_load = pages_to_load
click_times = 0
while click_times < pages_to_load:
show_more.click()
click_times += 1
time.sleep(loading_sp)
cur_page_plist = self.wd.find_elements_by_class_name('gsc_a_tr')
if len(pre_page_plist) == len(cur_page_plist):
self.plist = cur_page_plist
break
else:
pre_page_plist = cur_page_plist
def list_of_texts_by_xpath(self, xpath):
targets = self.wd.find_elements_by_xpath(xpath)
return [target.text for target in targets]
def gs_basic_info(self):
# basic info: name, affiliation, homepage (if any), gs_url, specialization,
# all-time citation, past 5 year citation, date recorded
self.gs_name = self.wd.find_element_by_xpath('//*[@id="gsc_prf_in"]').text
try:
affiliation = self.wd.find_element_by_xpath('//*[@id="gsc_prf_i"]/div[2]/a').text
except:
affiliation = 'Unknown'
try:
homepage = self.wd.find_element_by_xpath('//*[@id="gsc_prf_ivh"]/a').get_attribute('href')
except:
homepage = 'Not available'
specialization = '; '.join(self.list_of_texts_by_xpath('//*[@id="gsc_prf_int"]/a'))
all_citation = self.wd.find_element_by_xpath('//*[@id="gsc_rsb_st"]/tbody/tr[1]/td[2]').text
past5y_citation = self.wd.find_element_by_xpath('//*[@id="gsc_rsb_st"]/tbody/tr[1]/td[3]').text
self.date = datetime.now().strftime('%Y-%m-%d')
return [self.gs_name, affiliation, homepage, self.url, specialization, all_citation, past5y_citation, self.date]
def citation_by_year(self):
# Return the citation number over the years
r = requests.get(self.url)
soup = bs(r.content, 'html.parser')
years = [int(y.text) for y in soup.select('#gsc_rsb_cit > div > div.gsc_md_hist_w > div > span')]
citations = [int(c.text) for c in soup.select('#gsc_rsb_cit > div > div.gsc_md_hist_w > div > a > span')]
return zip(years, citations)
def gs_publication_info(self):
# Publication info: title, author, link (for more details),
# author(s), citation, year, source (place of publication)
titles_links = self.wd.find_elements_by_xpath('//*[@id="gsc_a_b"]/tr/td[1]/a')
self.titles = [title.text for title in titles_links]
self.links = ['https://scholar.google.com' + link.get_attribute('data-href') for link in titles_links]
self.authors = self.list_of_texts_by_xpath('//*[@id="gsc_a_b"]/tr/td[1]/div[1]')
self.citations = self.list_of_texts_by_xpath('//*[@id="gsc_a_b"]/tr/td[2]')
self.years = self.list_of_texts_by_xpath('//*[@id="gsc_a_b"]/tr/td[3]')
self.source = self.list_of_texts_by_xpath('//*[@id="gsc_a_b"]/tr/td[1]/div[2]')
return zip(self.titles, self.links, self.authors, self.citations, self.years, self.source)
def titles_ngram_analysis(self, n_gram=2, most_used=20):
# Return unigram and specified ngram analysis of the titles
if not self.titles:
self.titles = self.list_of_texts_by_xpath('//*[@id="gsc_a_b"]/tr/td[1]/a')
unigram = filtered_ngram(self.titles, 1)
ngram_ = filtered_ngram(self.titles, n_gram)
fdist_ug = counter(unigram)
fdist_ug = fdist_ug[:most_used] if len(fdist_ug) >= most_used else fdist_ug
fdist_ng = counter(ngram_)
fdist_ng = fdist_ng[:most_used] if len(fdist_ng) >= most_used else fdist_ng
if len(fdist_ng) < len(fdist_ug):
for i in range(len(fdist_ug) - len(fdist_ng)):
fdist_ng.append(('', ''))
splitter = [''] * len(fdist_ug)
return zip(fdist_ug, splitter, fdist_ng)
def num_of_pub_by_year(self):
# Return the number of publication each year
if not self.years:
self.years = self.list_of_texts_by_xpath('//*[@id="gsc_a_b"]/tr/td[3]')
years = [str(y) for y in self.years]
return counter(years)
def authors_analysis(self):
# 1. The contribution of the researcher of interest to the publications that he/she authored.
# The contribution is intuitively displayed as the frequency of the author ranks the researcher was in.
# 2. The list of co-author, including the researcher him/herself.
if not self.authors:
self.authors = self.list_of_texts_by_xpath('//*[@id="gsc_a_b"]/tr/td[1]/div[1]')
auth_list = []
last_name = self.gs_name.split()[-1].strip()
contribution_index = []
for au in self.authors:
l = [a.strip() for a in au.split(',')]
auth_list.extend(l)
if last_name in au:
contribution_index.extend([l.index(i) + 1 for i in l if last_name in i])
else:
contribution_index.append('N/A')
ctr_fdist = counter(contribution_index)
ctr_fdist = [('Which author', 'Count')] + [('#_' + str(i), j) for i, j in ctr_fdist]
ctr_fdist += [('# of Pubs', len(self.authors)), ('', ''), ('Author', 'Count')]
auth_fdist = counter(auth_list)
return ctr_fdist + auth_fdist
def gs_profile_database(self, info):
# The basic info of the scholar searched will be aggregated into an excel file
path = self.res_dir + 'Aggregated GS Database.xlsx'
if os.path.exists(path):
df = pd.read_excel(path)
df.loc[df.shape[0]] = info
print(f'File {path} created!')
else:
df = pd.DataFrame(columns=[
'Name', 'Affiliation', 'Homepage', 'GScholarUrl', 'Specialization',
'Citation(All)', 'Citation(Past 5 Year)', 'Date Recorded'
])
df.loc[0] = info
print(f'File {path} updated!')
df.to_excel(path, index=False)
def gs_profile_generator(self, n_gram=2, most_used=20, add2database=True):
"""
:param n_gram: for the analysis of the publication titles
:param most_used: for the analysis of the publication titles
:param add2database: whether the researcher's basic info is saved in the aggregated database
:return: The researcher's GS profile and by default the Aggregated GS Database (basic info)
"""
info = self.gs_basic_info()
basic_info = pd.Series(info, index=[
'Name', 'Affiliation', 'Homepage', 'GScholarUrl', 'Specialization',
'Citation(All)', 'Citation(Past 5 Year)', 'Date Recorded'
])
citation_by_y = pd.DataFrame(self.citation_by_year(), columns=['Year', 'Citation'])
publication_info = pd.DataFrame(self.gs_publication_info(), columns=[
'Title', 'Link', 'Author', 'Citation', 'Year', 'Source'
])
titles_ngram = pd.DataFrame(self.titles_ngram_analysis(n_gram, most_used), columns=[
'Unigram', '', f'{n_gram}-gram'
])
pub_years = pd.DataFrame(self.num_of_pub_by_year(), columns=['Year', 'Count'])
authors = pd.DataFrame(self.authors_analysis())
if add2database:
self.gs_profile_database(info)
writer = pd.ExcelWriter(f'{self.res_dir}{self.gs_name} GSProfile_{self.date}.xlsx')
basic_info.to_excel(writer, sheet_name='Basic Info', header=False)
citation_by_y.to_excel(writer, sheet_name='Citation by Year', index=False)
publication_info.to_excel(writer, sheet_name='Publication Info', index=False)
titles_ngram.to_excel(writer, sheet_name='Titles Ngram', index=False)
pub_years.to_excel(writer, sheet_name='Pub Num by Year', index=False)
authors.to_excel(writer, sheet_name='Authors Analysis', header=False, index=False)
writer.save()
print(f'File {self.res_dir}{self.gs_name} GSProfile_{self.date}.xlsx saved!')
def gs_profiles_generators_by_urls(self, urls, loading_sp=1, pages_to_load=5, n_gram=2, most_used=20, add2database=True):
if not type(urls) is list:
print('Please enter a list of urls!')
try:
self.loading_gs_homepage(urls, loading_sp=loading_sp, pages_to_load=pages_to_load)
self.gs_profile_generator(n_gram=n_gram, most_used=most_used, add2database=add2database)
except:
self.close()
else:
for url in urls:
try:
self.loading_gs_homepage(url)
self.gs_profile_generator(n_gram=n_gram, most_used=most_used, add2database=add2database)
except:
print(f'Nothing found in {url}')
self.close()
def gs_profiles_generators_by_queries(self, queries, loading_sp=1, pages_to_load=5, n_gram=2, most_used=20, add2database=True):
if not type(queries) is list:
url = gshp_link_by_query(queries)
if url is not None:
self.loading_gs_homepage(url, loading_sp=loading_sp, pages_to_load=pages_to_load)
self.gs_profile_generator(n_gram=n_gram, most_used=most_used, add2database=add2database)
else:
urls = []
for query in queries:
url = gshp_link_by_query(query)
if url is not None:
urls.append(url)
self.gs_profiles_generators_by_urls(urls, loading_sp=loading_sp, pages_to_load=pages_to_load, n_gram=n_gram, most_used=most_used, add2database=add2database)
def close(self):
self.wd.quit()
|
""" App widget functionalities """
from app_head import get_head
from app_body import get_body
from app_page import set_page
from app_loading import get_loading_head, get_loading_body
from app_ogp import set_ogp
from app_title import get_title
from app_metatags import get_metatags
from bootstrap import get_bootstrap
from font_awesome import get_font_awesome
from app_navbar import navbar
from googleanalytics import get_googleanalytics
from app_stylesheet import get_stylesheet
from app_cookie import get_sa_theme
from sa_func import redirect_if_not_logged_in
#-------------------------------------------------------------------------------
# Insert here module of the widget to load
#-------------------------------------------------------------------------------
from tradingview_chart import get_tradingview_chart
from tradingview_ecocal import get_tradingview_ecocal
from tradingview_fxcross import get_tradingview_fxcross
from tradingview_fxheatmap import get_tradingview_fxheatmap
from tradingview_screener import get_tradingview_screener
from tradingview_watchlist import get_tradingview_watchlist
from trades_tab import get_trades_box
from news_feed import get_newsfeed
#-------------------------------------------------------------------------------
def get_widget_content(burl, nonavbar, funcname, noflexheight):
""" xxx """
box_content = ''
box_class = 'box'
box_vh = 'height:100vh;width:100vw;margin-left:-15px;'+\
'overflow-x:hidden;overflow-y:hidden;'
if nonavbar is None:
box_class = 'box-top'
box_vh = 'height:89vh;'
if noflexheight is not None:
box_vh = ''
refresh_once = ''+\
'<script>'+\
'window.onresize = function(){ location.reload(); };'+\
'if(!window.location.hash) {'+\
' window.location = window.location + "#";'+\
' window.location.reload();'+\
'}'+\
'</script>'
box_content = refresh_once +\
'<div class="'+ box_class +'"></div>' +\
' <div style="'+ box_vh +'">'+\
eval(funcname)+\
' </div>'
return box_content
def get_widget_page(appname,
burl,
nonavbar,
funcname,
refresh_in_second,
noflexheight,
terminal):
""" xxx """
return_data = ''
navbarcontent = ''
metarefresh = ''
if nonavbar is None:
navbarcontent = navbar(burl, 0, terminal)
if refresh_in_second is not None:
metarefresh = '<meta http-equiv="refresh" content="'+ str(refresh_in_second) +'">'
return_data = get_head(get_loading_head() +\
get_googleanalytics() +\
get_title(appname) +\
metarefresh +\
get_metatags(burl) +\
redirect_if_not_logged_in(burl, '') +\
set_ogp(burl, 1, '', '') +\
get_bootstrap(get_sa_theme(), burl) +\
get_font_awesome() +\
get_stylesheet(burl))
return_data = return_data + get_body(get_loading_body(), navbarcontent +\
get_widget_content(burl,
nonavbar,
funcname,
noflexheight),'')
return_data = set_page(return_data)
return return_data
|
"""
__/\\\\\\\\\\\\______________________/\\\\\\\\\\\____/\\\________/\\\_
_\/\\\////////\\\__________________/\\\/////////\\\_\/\\\_______\/\\\_
_\/\\\______\//\\\________________\//\\\______\///__\/\\\_______\/\\\_
_\/\\\_______\/\\\_____/\\\\\______\////\\\_________\/\\\_______\/\\\_
_\/\\\_______\/\\\___/\\\///\\\_______\////\\\______\/\\\_______\/\\\_
_\/\\\_______\/\\\__/\\\__\//\\\_________\////\\\___\/\\\_______\/\\\_
_\/\\\_______/\\\__\//\\\__/\\\___/\\\______\//\\\__\//\\\______/\\\__
_\/\\\\\\\\\\\\/____\///\\\\\/___\///\\\\\\\\\\\/____\///\\\\\\\\\/___
_\////////////________\/////_______\///////////________\/////////_____
Created by Tomáš Sandrini
"""
import time
import functools
def time_usage(func):
"""
Prints time usage of a given function
"""
def wrapper(*args, **kwargs):
beg_ts = time.time()
retval = func(*args, **kwargs)
end_ts = time.time()
print("elapsed time: %f" % (end_ts - beg_ts))
return retval
return wrapper
def trackcalls(func):
"""
Checks whether a function has been called
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
wrapper.has_been_called = True
return func(*args, **kwargs)
wrapper.has_been_called = False
return wrapper
|
import sys
def load(f):
return f.read()
def build(f):
loaded = load(f)
loaded = loaded.replace('mov', '10')
loaded = loaded.replace('eax', '01')
loaded = loaded.replace(',', '')
loaded = loaded.replace('\t', ' ')
loaded = loaded.replace('0x', '')
loaded = loaded.replace('int', '20')
try:
bf = open(sys.argv[1][:-4] + '_compiled.vmc', 'w')
except:
bf = open('hello_compiled.vmc', 'w')
bf.write(loaded)
try:
f = open(sys.argv[1], 'r')
except:
f = open('hello.vm', 'r')
build(f)
f.close()
|
"""Contains classes to extract text data and to encode text corpora"""
import pdb
class CharEncoder():
"""
Contains data on an encoded text corpus with labelled characters, including unique characters and
mappings to/from characters to integers.
"""
def __init__(self, corpus):
"""
Args:
corpus (list of str): a list containing every word in the text, including duplicates
"""
self.chars = tuple(set(corpus))
self.n_chars = len(self.chars)
self.int2char = dict(enumerate(self.chars))
self.char2int = {value: key for key, value in self.int2char.items()}
def label_sequences(self, text_sequences):
#this may be called "vectorizing?
return [[self.char2int[char] for char in sequence] for sequence in text_sequences]
def extract_shakespeare_data(path = "data/t8.shakespeare.txt"):
"""
Load the MIT online Shakespeare corpus from a text file.
Args:
path (str): path to Shakespare text file
Returns:
cleaned_text (str): entire cleaned text stripped of header/notes
"""
with open(path) as f:
text = f.read()
cleaned_text = ""
skip = False
for line in text.split("\n")[244:-1]:
if line[:2] == "<<":
skip = True
elif line[-2:] == ">>":
skip = False
continue
if skip or line == "":
continue
line = line+"\n"
cleaned_text += line
return cleaned_text
def extract_kjv_data(path = "data/kjv.txt"):
"""
Load the King James Version of the Bible.
"""
with open(path) as f:
text = f.read()
text = text[996:-18730]
return text
def make_sequences(text, sequence_length=100):
"""
Split a text into sequences of the same length in characters.
"""
n_sequences = len(text) // sequence_length
sequences = []
for i in range(0, n_sequences):
sequence = text[i*sequence_length : (i+1)*sequence_length]
sequences.append(sequence)
return sequences
|
"""
Created on 17 Apr 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdControlReceiver(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog [-r] [-e] [-v]", version="%prog 1.0")
# optional...
self.__parser.add_option("--receipt", "-r", action="store_true", dest="receipt", default=False,
help="print receipt to stdout")
self.__parser.add_option("--echo", "-e", action="store_true", dest="echo", default=False,
help="echo data to stdout")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
@property
def receipt(self):
return self.__opts.receipt
@property
def echo(self):
return self.__opts.echo
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdControlReceiver:{receipt:%s, verbose:%s}" % (self.receipt, self.verbose)
|
import keras
from keras.preprocessing import image as image_utils
from imagenet_utils import decode_predictions
from imagenet_utils import preprocess_input
from vgg16 import VGG16
import numpy as np
import os
import argparse
#import cv2
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image", required=True, help='path to the input image')
# args = vars(ap.parse_args())
#orig = cv2.imread(args["image"])
def predict_image(image_name):
image_path = os.path.join(os.getcwd(),'images',image_name)
print("[INFO] loading and preprocessing image...")
image = image_utils.load_img(image_path, target_size=(224, 224))
image = image_utils.img_to_array(image)
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)
print(image.shape)
# load the VGG16 network
print("[INFO] loading network...")
model = VGG16(weights="imagenet")
# classify the image
print("[INFO] classifying image...")
preds = model.predict(image)
#report = decode_predictions(preds)
#print(report)
(inID, label, probability) = decode_predictions(preds)[0][0]
# display the predictions to our screen
print("ImageNet ID: {}, Label: {}".format(inID, label))
return label
#return report
# cv2.putText(orig, "Label: {}".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
# cv2.imshow("Classification", orig)
# cv2.waitKey(0)
|
#!/usr/bin/env python
#
# Copyright (c) 2012, JT Olds <hello@jtolds.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Pants
http://www.pants-lang.org/
IR tests
"""
__author__ = "JT Olds"
__author_email__ = "hello@jtolds.com"
from unittest import TestCase
from ast.parse import parse
from ir.convert import convert
class ConversionTests(TestCase):
def convert(self, code):
return convert(parse(code)).expressions
def testSimple(self):
self.assertEquals(repr(self.convert("x = { 3 }\nx.\n")), "["
"Assignment("
"Identifier('x', True, 1, 1), "
"Function([], Integer(3, 1, 7), [], [], 1, 5), "
"True, 1, 5), "
"ReturnValue("
"Identifier('ir_1', False, 2, 2), "
"Variable(Identifier('x', True, 2, 1), 2, 1), "
"[], [], 2, 2)"
"]")
self.assertEquals(repr(self.convert("x = { 3 }\nx()\n")), "["
"Assignment("
"Identifier('x', True, 1, 1), "
"Function([], Integer(3, 1, 7), [], [], 1, 5), "
"True, 1, 5), "
"ReturnValue("
"Identifier('ir_1', False, 2, 2), "
"Variable(Identifier('x', True, 2, 1), 2, 1), "
"[], [], 2, 2)"
"]")
self.assertEquals(repr(self.convert("x = { 3 }\nx()\n")), "["
"Assignment("
"Identifier('x', True, 1, 1), "
"Function([], Integer(3, 1, 7), [], [], 1, 5), "
"True, 1, 5), "
"ReturnValue("
"Identifier('ir_1', False, 2, 2), "
"Variable(Identifier('x', True, 2, 1), 2, 1), "
"[], [], 2, 2)"
"]")
self.assertEquals(repr(self.convert("x = { 3 }\nx.")), "["
"Assignment("
"Identifier('x', True, 1, 1), "
"Function([], Integer(3, 1, 7), [], [], 1, 5), "
"True, 1, 5), "
"ReturnValue("
"Identifier('ir_1', False, 2, 2), "
"Variable(Identifier('x', True, 2, 1), 2, 1), "
"[], [], 2, 2)"
"]")
self.assertEquals(repr(self.convert("x.x := 3\n")), "["
"ObjectMutation("
"Variable(Identifier('x', True, 1, 1), 1, 1), "
"Identifier('x', True, 1, 1), "
"Integer(3, 1, 8), 1, 1)"
"]")
self.assertEquals(repr(self.convert("{||}.")), "["
"Assignment("
"Identifier('ir_1', False, 1, 1), "
"Function("
"[], "
"Variable(Identifier('null', False, 1, 1), 1, 1), "
"[], [], 1, 1), "
"True, 1, 1), "
"ReturnValue("
"Identifier('ir_2', False, 1, 5), "
"Variable(Identifier('ir_1', False, 1, 1), 1, 1), "
"[], [], 1, 5)"
"]")
|
# This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
import os
from unittest.mock import patch
def test_fake_creds(monkeypatch):
from datacube_ows.startup_utils import initialise_aws_credentials
monkeypatch.setenv("AWS_DEFAULT_REGION", "")
initialise_aws_credentials()
monkeypatch.setenv("AWS_DEFAULT_REGION", "us-west-1")
monkeypatch.setenv("AWS_NO_SIGN_REQUEST", "false")
with patch("datacube_ows.startup_utils.configure_s3_access") as s3a:
s3a.return_value = None
initialise_aws_credentials()
assert os.getenv("AWS_NO_SIGN_REQUEST") is None
monkeypatch.setenv("AWS_NO_SIGN_REQUEST", "indubitably")
initialise_aws_credentials()
assert os.getenv("AWS_ACCESS_KEY_ID") == "fake"
def test_initialise_logger():
from datacube_ows.startup_utils import initialise_logger
log = initialise_logger("tim.the.testlogger")
assert log is not None
log.info("Test")
def test_initialise_ign_warn():
from datacube_ows.startup_utils import initialise_ignorable_warnings
initialise_ignorable_warnings()
def test_initialise_debugging(monkeypatch):
monkeypatch.setenv("PYDEV_DEBUG", "")
from datacube_ows.startup_utils import initialise_debugging
initialise_debugging()
def test_initialise_sentry(monkeypatch):
monkeypatch.setenv("SENTRY_KEY", "")
monkeypatch.setenv("SENTRY_PROJECT", "")
from datacube_ows.startup_utils import initialise_sentry
initialise_sentry()
monkeypatch.setenv("SENTRY_KEY", "dummy_key")
monkeypatch.setenv("SENTRY_PROJECT", "dummy_project")
try:
initialise_sentry()
except Exception:
pass
def test_prometheus_inactive(monkeypatch):
monkeypatch.setenv("prometheus_multiproc_dir", "")
from datacube_ows.startup_utils import ( # noqa: F401
initialise_prometheus, initialise_prometheus_register)
def test_supported_version():
from datacube_ows.protocol_versions import SupportedSvcVersion
ver = SupportedSvcVersion("wts", "1.2.3", "a", "b")
assert ver.service == "wts"
assert ver.service_upper == "WTS"
assert ver.version == "1.2.3"
assert ver.version_parts == ["1", "2", "3"]
assert ver.router == "a"
assert ver.exception_class == "b"
from datacube_ows.protocol_versions import supported_versions
supported = supported_versions()
assert supported["wms"].versions[0].service == "wms"
|
from remi.gui import *
from os.path import basename, dirname, join, exists, isfile
from time import time
class LocalImage(Image):
def __init__(self, file_path_name=None, **kwargs):
super(LocalImage, self).__init__('/assets/please-select-image.png', **kwargs)
self.imagedata = None
self.mimetype = None
self.encoding = None
self.svg_index = None
self.file_path = None
self.load(file_path_name if file_path_name else './assets/please-select-image.png')
def load(self, file_path):
self.file_path = file_path
self.mimetype, self.encoding = mimetypes.guess_type(file_path)
with open(file_path, 'rb') as f:
self.imagedata = f.read()
# Try to read svg index
svg_index_path = join(dirname(file_path), '.' + basename(file_path) + '.svg')
if exists(svg_index_path) and isfile(svg_index_path):
with open(svg_index_path, 'rb') as f:
self.svg_index = f.read().decode('utf-8')
else:
self.svg_index = None
self.refresh()
def refresh(self):
i = int(time() * 1e6)
self.attributes['src'] = "/%s/get_image_data?update_index=%d" % (
id(self), i)
def get_image_data(self, update_index):
headers = {
'Content-type': self.mimetype if self.mimetype else 'application/octet-stream'}
return [self.imagedata, headers]
def save(self):
file_path = self.file_path
svg_index_path = join(dirname(file_path), '.' + basename(file_path) + '.svg')
if self.svg_index and exists(file_path) and (not exists(svg_index_path) or isfile(svg_index_path)):
with open(svg_index_path, 'wb') as f:
f.write(str.encode(self.svg_index))
def clear(self):
self.load('./assets/please-select-image.png')
|
# Copyright (c) 2014, FTW Forschungszentrum Telekommunikation Wien
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of FTW nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL FTW
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import os
from collections import defaultdict
import fnmatch
import time
import logging
import cPickle
import gc
import numpy as np
from netaddr import IPRange
from netaddr.ip import IPAddress, IPNetwork
from RBTree import RBTree
import DomainCluster as domclust
from util import memory_usage
import config
from IPBlock import IPBlock,CanNotMergeException
class timeInterval(object):
"""
Decorator class that automatically flushes old entries from an DNSMap
object. Specifically, it deletes DomainStr objects from IPBlock objects
when the corresponding domain:IP mapping has not been observed in the input
data for an adjustable time interval (see self.interval). Note that if a
IPBlock does not contain any more domains after this operation, also the
IPBlock object is removed from the DNSMap.
"""
def __init__(self, func):
self.nextMergeInterval=config.timebinSizeMerge
self.nextSplitAndCleanupInterval=config.timebinSizeSplitAndCleanup
self.tNextMerge = None
self.tNextSplitAndCleanup = None
self.func = func
def __call__(self, *args, **kwargs):
timestamp=args[2]
if not timestamp:
"""
sometimes we want to add something to the DNS map without using a
timestamp; in this case we directly jump to dnsmap.add(..)
"""
return self.func.__call__(self.obj, *args, **kwargs)
merged=False
splitAndCleanedUp=False
curTime=time.time()
dnsmap = self.obj
blocksMerged=set()
numIPBlocksBeforeMerge=-1
if not self.tNextMerge:
self.tNextMerge = timestamp+self.nextMergeInterval
if not self.tNextSplitAndCleanup:
self.tNextSplitAndCleanup = timestamp+self.nextSplitAndCleanupInterval
if timestamp > self.tNextMerge:
"""
it's time to split and merge blocks
"""
merged=True
"""
remember how many IPBlocks we had before merging
"""
numIPBlocksBeforeMerge=dnsmap.getNumberOfIPBlocks()
# FIXME, remove this: ensure that all IPBlocks are clustered, not just the ones
# that are being merged/split below
dnsmap.reclusterAll(config.clusteringThreshold, force=False)
"""
MERGE
"""
blocksMerged = dnsmap.mergeAllBlocks()
numBlocksMerged = len(blocksMerged)
"""
Schedule next merge/split iteration
"""
self.tNextMerge += self.nextMergeInterval
"""
output some statistics
"""
msg=('merged blocks: %u'%(numBlocksMerged))
logging.info(msg)
if timestamp > self.tNextSplitAndCleanup:
"""
we do the cleanup AFTER the split/merge operation, as we need the
domains set in each of IPBlocks not to be empty in order to cluster
the domains for splitting/merging. The cleanup procedure *resets*
the domains field, therefore it has to come after split/merge
"""
splitAndCleanedUp=True
"""
SPLIT
we do this BEFORE calling dnsmap.cleanup(), as this call resets the
active IP settings, and therefore affects the splitting. We want to
split blocks only if the corresponding IPs were inactive for an
entire cleanup time interval.
NOTE: we do split blocks that were merged in the previous merge
intervals though!
"""
numBlocksSplit = dnsmap.splitAllBlocks(blocksMerged)
"""
remove empty IPBlocks, remove unused domain names, uncollapse
clusters, and reset all IPBlocks (i.e., reset the set of contained
domains and set all IPs to not active)
"""
dnsmap.cleanup()
"""
After the first cleanup iteration we start to output suspicious
activity
"""
dnsmap.doOutputSuspicious=True
"""
Schedule next cleanup iteration
"""
self.tNextSplitAndCleanup += self.nextSplitAndCleanupInterval
"""
output some statistics
"""
msg=('split blocks: %u'%(numBlocksSplit))
logging.info(msg)
"""
dump current dnsmap to disk, omitting the domains
"""
dnsmap.dumpt(os.path.join(config.workingDir,
'dnsmap_'+str(timestamp)+'.txt'), withDomains=False)
if merged or splitAndCleanedUp:
"""
output some statistics
"""
msg=('t is now %u; merged: %s; splitAndCleanedUp: %s'%(timestamp,
merged, splitAndCleanedUp))
logging.info(msg)
logging.info('memory usage: '+str(memory_usage()))
msg=('IPBlocks before merge: %u'%(numIPBlocksBeforeMerge))
logging.info(msg)
msg=('IPs/IPBlocks: %u/%u'%(dnsmap.getNumberOfIPs(),
dnsmap.getNumberOfIPBlocks()))
logging.info(msg)
logging.info('domains: '+str(dnsmap.getNumDomains()))
logging.info('Clusters per IP: '+str(dnsmap.getMeanStdClustersPerIP()))
logging.info('Clusters per IPBlock: '+str(dnsmap.getMeanStdClustersPerIPBlock()))
logging.info('Collapsed clusters: '+str(dnsmap.getNumCollapsedClusters()))
logging.info('Blocks that reached cluster capacity: '+str(len([1 for node in
dnsmap.traverseTrees() if
node.value.hasReachedClusterCapacity()])))
logging.info('this took '+str(time.time()-curTime)+' seconds')
return self.func.__call__(self.obj, *args, **kwargs)
def __get__(self, instance, owner):
self.cls = owner
self.obj = instance
return self.__call__
def mergeConditionMet(ipb1, ipb2, domainSimilarityTh, domainCountTh):
"""
Tests if two IPBlocks <ipb1> and <ipb2> should be merged.
"""
"""
if the blocks belong to different autonomous systems, we don't merge them
"""
if not ipb1.AS == ipb2.AS:
return False
"""
for evaluating the merging condition, we need an up-to-date cluster
configuration. maybe we delayed that computation until here, so let's
check.
"""
ipb1.cluster()
ipb2.cluster()
"""
we cache the distances between the cluster centers here to avoid that we
have to recompute them again and again
"""
distances = dict()
def _match(x,y,numDomains):
"""
This function checks if x is similar enough to y to be merged. It does
NOT check if y is similar enough to x!
x and y are tuples with (clusterKey, set of clustered DomainStr objects)
"""
numMatchingDomains = 0.0
domainsLeft = numDomains
for k1,v1 in x:
if not len(v1):
"""
the clusters are sorted according to the number of domains the
contain, in decreasing order. if <v1> is empty, no other
cluster will therefore contain anything, therefore we can break
here
"""
break
for k2,v2 in y:
try:
"""
note the reversed indexes below (k1,k2). this is a trick to
let the distances be computed the first time _match() is
called, and let them be reused when it is called the second
time with reversed parameters.
"""
d = distances[(k2,k1)]
except KeyError:
d = domclust.domainDist(k1,k2)
distances[(k1,k2)] = d
if d <= domainSimilarityTh:
"""
we found the largest cluster in <y> that matches to <k2>.
"""
numMatchingDomains+=len(v1)
break
"""
let's see if we already found enough matches, so that we can exit
early and save time
"""
if numMatchingDomains/numDomains >= domainCountTh:
return True
"""
does it still make sense to keep searching? we can stop
when the number of remaining domains is too small to satisfy the
condition above.
"""
domainsLeft-=len(v1)
if (numMatchingDomains+domainsLeft)/numDomains < domainCountTh:
return False
"""
not enough matches found
"""
return False
"""
sort clusters according to the number of included domains, in
decreasing order. this should help us to speed up the process to find
a sufficient number of matching domains.
"""
ipb1Clusters = sorted(ipb1.clusters.items(), key=lambda x:len(x[1]),
reverse=True)
ipb2Clusters = sorted(ipb2.clusters.items(), key=lambda x:len(x[1]),
reverse=True)
numDomainsIpb1 = ipb1.getNumDomains()
numDomainsIpb2 = ipb2.getNumDomains()
if not numDomainsIpb1 or not numDomainsIpb2:
return False
doMerge = _match(ipb1Clusters, ipb2Clusters, numDomainsIpb1)
if doMerge:
doMerge = _match(ipb2Clusters, ipb1Clusters, numDomainsIpb2)
return doMerge
class DNSMap():
"""
This class stores IPBlock objects in a set of Red-Black-Trees. The idea is to
split the entire IP address range in a number of ranges depending on the
netmask of an IP address. this way the depth of the tree can be controlled,
at the price of spreading the information amongst several tree that don't
communicate with each other, and which might show some nasty effects at the
edges of their IP ranges (e.g., when a certain domain maps half to one
tree, and half to the neighboring one).
"""
def __init__(self, clusteringThreshold, domainCountThreshold,
netmask=8):
"""
startmask defines the number of trees that we are going to use. it
defaults to 8, which means that for each /8 IP address there is one
tree (in this case: theoretically 256). Note that the trees are created
on demand, i.e. when we see the first time an IP address for which not
yet a tree exists, we create the tree and insert the address there.
"""
assert 0<=clusteringThreshold<=1
assert 0<=domainCountThreshold<=1
self.netmask = '/'+str(netmask)
self.forest = defaultdict(RBTree)
self.domainfactory = domclust.DomainStrFactory()
# self.doOutputSuspicious = False
self.doOutputSuspicious = True
config.clusteringThreshold = clusteringThreshold
config.domainCountThreshold = domainCountThreshold
# FIXME, this should probably be set in config.py
self.suspiciousFile = open(os.path.join(config.workingDir,
'suspicious.txt'), 'w')
def _findTree(self, ip):
"""
returns the tree containing <ip>
ip in integer or string format
"""
ipnw = IPNetwork(str(IPAddress(ip))+self.netmask).first
return self.forest[ipnw]
def _insertIPBlock(self, ipb, iptree=None):
"""
Insert an IPBlock <ipb> in the tree specified by the first IP address
of <ipb>, or in <iptree> if not None
"""
if not ipb: return
if iptree == None:
iptree = self._findTree(ipb.first())
ipbTreeElem = iptree.insertNode(ipb.first(), ipb)
return ipbTreeElem
def _removeIPBlock(self, ipb):
"""
Remove the node that holds <ipb> from the corresponding tree.
"""
if not ipb: return
node, iptree = self.getTreeElem(ipb.first())
if node:
iptree.deleteNode(node)
else:
log.warn('could not remove node for IPBlock',ipb)
def removeEmptyIPBlocks(self):
"""
Removes all IPBlocks which have an empty <domains> set, and which do
not contain collapsed clusters.
NOTE: this should be used only at the end of a time bin, as only then
the IPBlocks' <domains> set is properly filled. Remember that we empty
<domains> at every begin of a new time bin!
returns the number of deleted IPBlocks
"""
nodesToDelete = set()
for node, tree in self.traverseTrees(True):
ipb = node.value
if not ipb.getNumDomains():
"""
this block does not contain any domains and can therefore
be deleted
"""
nodesToDelete.add((node.key, tree, ipb))
"""
we can not delete from the tree while iterating over it, therefore
we do the deletions here
"""
for nodeKey, tree, ipb in nodesToDelete:
node = tree.findNode(nodeKey)
tree.deleteNode(node)
"""
we deleted a lot of stuff now, let's invoke the garbage collector
"""
#gc.collect()
return (len(nodesToDelete))
def cleanup(self):
"""
FIXME: comment me
"""
"""
First, remove all domains that do not map to any IPBlock anymore from
the domain factory. this has to be done *before* flushing the domains
from each individual IPBlock, as flushing from the blocks effectively
empties the <ipblocks> set of each of the DomainStr object the factory,
which is exactly the condition we check for when flushing from the
factory.
"""
"""
remove IP blocks that do not contain any domains anymore
"""
numIPBlocksDeleted=self.removeEmptyIPBlocks()
msg='deleted nodes: %u'%(numIPBlocksDeleted)
logging.info(msg)
numDomains=self.getNumDomains()
self.domainfactory.flushEmptyDomains()
logging.info('removed '+str(numDomains-self.getNumDomains())+
' domains')
for node, tree in self.traverseTrees(True):
ipb = node.value
"""
convert all collapsed clusters that do not contain sufficient
domains anymore to normal clusters
"""
ipb.uncollapseClusters()
"""
remove all clusters that are already empty, even before removing
the domains from the IP block below
"""
ipb.removeEmptyClusters()
"""
flush the set of domains for each IPBlock
"""
ipb.flushDomains()
"""
mark all IPs in all clusters in this block as inactive
"""
ipb.setIPsInactive()
"""
we deleted a lot of stuff now, let's invoke the garbage collector
"""
gc.collect()
def getRightNeighbor(self, ipb, ipbTreeElem, iptree):
"""
Finds the direct right neighbor of an IPBlock <ipb>. The direct right
neighbor is the one which satisfies ipb.last+1 == neighbor.first. If
such a neighbor doesn't exist we return <None>.
Returns a tuple (rightNeighborBlock, rightNeighborTreeElem)
"""
rightNeighborTreeElem = iptree.nextNode(ipbTreeElem)
if rightNeighborTreeElem:
rightNeighborBlock = rightNeighborTreeElem.value
if ipb.isRightNeighbor(rightNeighborBlock):
return (rightNeighborBlock, rightNeighborTreeElem)
return None
def getLeftNeighbor(self, ipb, ipbTreeElem, iptree):
"""
Finds the direct left neighbor of an IPBlock <ipb>. The direct left
neighbor is the one which satisfies neighbor.last+1 == ipb.first. If
such a neighbor doesn't exist we return <None>.
Returns a tuple (leftNeighborBlock, leftNeighborTreeElem)
"""
leftNeighborTreeElem = iptree.prevNode(ipbTreeElem)
if leftNeighborTreeElem:
leftNeighborBlock = leftNeighborTreeElem.value
if leftNeighborBlock.isRightNeighbor(ipb):
return (leftNeighborBlock, leftNeighborTreeElem)
return None
def mergeAllBlocks(self):
"""
Run over the IPBlocks stored in this DNSMap and try to merge all
blocks.
"""
blocksMerged=set()
for rbtree in self.forest.itervalues():
node=rbtree.firstNode()
while True:
ipb = node.value
rightNeighborTreeElem = rbtree.nextNode(node)
if not rightNeighborTreeElem:
"""
Reached the last node in this <rbtree>
"""
break
rightNeighborBlock = rightNeighborTreeElem.value
if ipb.isRightNeighbor(rightNeighborBlock):
merged = self.mergeIPBlocks(ipb,
rightNeighborBlock, rightNeighborTreeElem,
rbtree)
if merged:
blocksMerged.add(str(ipb))
else:
node=rightNeighborTreeElem
else:
node=rightNeighborTreeElem
return blocksMerged
def splitAllBlocks(self, blocksNotToBeSplit=[]):
"""
Runs over all IPBlocks stored in this DNSMap and tries to split them by
evaluating the mergeCondition on both halves of each IPBlock.
blocksNotToBeSplit: a set of IPBlocks that should not be split. The
blocks are identified by str(block).
returns the number of blocks that were split
"""
numBlocksSplit=0
for rbtree in self.forest.values():
node = rbtree.firstNode()
while node:
ipb = node.value
if len(ipb)>1 and not str(ipb) in blocksNotToBeSplit:
"""
this block contains more than one IP and was NOT just
created by merging, so we can try to split it
"""
ipb1, ipb2 = self.splitIPBlock(ipb)
if not mergeConditionMet(ipb1, ipb2,
config.clusteringThreshold,
config.domainCountThreshold):
numBlocksSplit+=1
self._removeIPBlock(ipb)
node1 = self._insertIPBlock(ipb1, iptree=rbtree)
node2 = self._insertIPBlock(ipb2, iptree=rbtree)
"""
update the back-references in the DomainStr
objects: remove references to the deleted block,
and create reference to the new blocks
"""
for d in ipb.getDomains():
"""
FIXME
sometimes it happens that <d> does not contain a back-reference to
<ipb> anymore. in fact, in this case <d> does not contain any
back-references to IPBlocks. it's unclear why this is happening,
for now we ignore this warning.
"""
d.removeIPBlock(ipb, warn=True)
for d in ipb1.getDomains():
d.addIPBlock(ipb1)
for d in ipb2.getDomains():
d.addIPBlock(ipb2)
"""
We continue with the block following the ones we
just created by splitting <ipb>. That means that
these new blocks will at earliest be further split
in the next iteration.
"""
node = node2
node = rbtree.nextNode(node)
return numBlocksSplit
def mergeIPBlocks(self, masterIpb, slaveIpb, slaveTreeElem, rbtree):
"""
Merges two IPBlocks if they are similar enough (see
_mergeConditionMet()). As a result of this operation, the contents
of <slaveIpb> will be written to <masterIpb>, and <slaveIpb> will be
deleted. Note that <slaveIpb> must be the direct right neighbor of
<masterIpb>, else merging will fail in any case.
Returns True if merging was successful, else False.
"""
if mergeConditionMet(masterIpb, slaveIpb,
config.clusteringThreshold, config.domainCountThreshold):
try:
masterIpb.mergeWithRightNeighbor(slaveIpb)
except CanNotMergeException:
logging.warn('cannot merge', masterIpb, slaveIpb)
return False
else:
rbtree.deleteNode(slaveTreeElem)
del slaveIpb
return True
else:
return False
def splitIPBlock(self, ipb):
"""
Split <ipb> in two halves.
Note: this does neither delete <ipb> from the DNSMap nor insert the new
blocks in it. Also, it doesn't create back-references to the new blocks
in the containing domains <ipblocks> field. All of this has to be done
outside of this function in case it is decided that the new blocks
should be kept.
returns the two new IPBlocks.
"""
if len(ipb) == 1:
return None
"""
ensure that the IPBlock is properly clustered
"""
ipb.cluster()
"""
split <ipb> in two halves, and extract from each halve the domains that
have been used in this IP range
"""
splitIndex = int(len(ipb.iprange)/2.0)
domainsIpb1 = []
domainsIpb2 = []
for clusterKey, cluster in ipb.clusters.iteritems():
activeIPsForIpb1=np.copy(cluster.activeIPs[:splitIndex])
activeIPsForIpb2=np.copy(cluster.activeIPs[splitIndex:])
if True in activeIPsForIpb1:
domainsIpb1+=[(cluster.domains, activeIPsForIpb1)]
if True in activeIPsForIpb2:
domainsIpb2+=[(cluster.domains, activeIPsForIpb2)]
def _createNewIPBlock(firstIP, lastIP, domainsAndActiveIPs,
ipIndexOffset=0):
"""
creates a new IPBlock from a set of domains and IPs. Note that we
do not create back references for these domains.
"""
newIpb = IPBlock(firstIP, last=lastIP)
for domains, activeIPs in domainsAndActiveIPs:
for d in domains:
for ipIndex, ipIsActive in enumerate(activeIPs):
if ipIsActive:
"""
we DON'T want to create back-references, as we do
not know yet if we're going to keep newIpb
"""
ip=ipb.iprange[ipIndex+ipIndexOffset]
newIpb.addDomain(d, ip, createBackRef=False)
newIpb.cluster()
return newIpb
"""
create two blocks using the domains that were active in each halve
"""
ipb1=_createNewIPBlock(ipb.iprange[0], ipb.iprange[splitIndex-1],
domainsIpb1)
ipb2=_createNewIPBlock(ipb.iprange[splitIndex], ipb.iprange[-1],
domainsIpb2, ipIndexOffset=splitIndex)
return (ipb1,ipb2)
def getTreeElem(self, ip):
"""
Returns a tuple with the node that contains <ip> and the containing
RBTree that contains this node. Return (None, RBTree) if no such node
exists.
Note that node.value is an IPBlock object.
"""
iptree = self._findTree(ip)
"""
find the IPBlock in the tree that starts at an IP address that is
closest to <ip>, and SMALLER than <ip>
"""
treeElem = iptree.findClosestNode(int(IPAddress(ip)))
if not treeElem:
return None, iptree
"""
let's see if this is the 'right' closest block, i.e. the one where
<ip> is >= the start IP of the block. if this is not the case, we
pick the block's left neighbor.
"""
if not ip in treeElem.value:
treeElem = iptree.prevNode(treeElem)
if not treeElem or not ip in treeElem.value:
return None, iptree
return treeElem, iptree
else:
return treeElem, iptree
def getIPBlock(self, ip):
"""
Return the IPBlock that contains <ip>. Returns None if no such IPBlock
is found.
"""
node,_ = self.getTreeElem(ip)
if node:
return node.value
else:
return None
def reclusterAll(self, clusteringThreshold, force=False):
"""
Set the clustering threshold for all contained IPBlocks to
<clusteringThreshold>, and force immediate reclustering of all
IPBlocks.
"""
config.clusteringThreshold=clusteringThreshold
for node in self.traverseTrees():
ipb=node.value
if force:
ipb._doCluster()
else:
ipb.cluster()
def _writeSuspicious(self, timestamp, dname, ip, ttl, clientID, minDist,
numDomainsInBlock):
"""
FIXME: comment me
"""
d = self.domainfactory.getDomainStr(dname)
if d:
numBlocks = len(d.ipblocks)
else:
numBlocks = 0
# NOTE: <dname> may contain non-ascii symbols, that's why we use
# unicode
# not print the minDist value
s = ' '.join([str(timestamp), str(clientID), unicode(dname), str(ip), str(ttl), str(numBlocks), str(numDomainsInBlock)])
s=s.encode('utf-8')
self.suspiciousFile.write(s+'\n')
@timeInterval
def add(self, ip, dname, timestamp, ttl, clientID=None):
"""
Add a new IP address/domain name mapping to the tree. Four things can
happen:
1) If there is a block that already contains the mapping, nothing is
done.
2) If the IP is contained in the block, but not the dname, the block is
split to create a new block that contains all previous domain names
plus dname, and only one IP (namely <ip>)
3) If there is no matching block yet, a new one is created.
4) If the closest block is a right neighbor of the new block to be
created, the closest block is extended to contain also the new IP
address.
Returns True if a new IPBlock was added, else returns False
:type ttl: ttl
"""
if not ip or not dname:
msg = unicode('cannot add empty record: %s %s %s'%(ip, dname,
timestamp))
logging.info(msg)
return False
dname=dname.lower()
"""
Get the DomainStr object corresponding to <dname> from the factory.
This ensures that there's always exactly one object for a domain name,
no matter in how many IPBlocks this object appears
"""
dnameObj = self.domainfactory.makeDomainStr(dname)
"""
We need to add this mapping to the IP block that contains <ip>, let's
find it.
"""
containingTreeElem, iptree = self.getTreeElem(ip)
if not containingTreeElem:
"""
couldn't find a block that contains <ip>, let's create a new one and
insert it into the tree
"""
ipb = IPBlock(ip)
ipb.addDomain(dnameObj, ip)
ipbTreeElem = self._insertIPBlock(ipb, iptree)
"""
can we merge this block now with the right/left neighbor?
"""
#merged = self.mergeWithNeighbors(ipb, ipbTreeElem, iptree)
if self.doOutputSuspicious:
#if not merged and self.doOutputSuspicious:
# """
# This is a new mapping that involves an IP that we didn't see
# before. Typically, such IPs are in access provider networks,
# and could therefore indicate malicious activity.
# """
self._writeSuspicious(timestamp, dname, ip, ttl,clientID, -1
,ipb.getNumDomains())
return True
else:
"""
found an existing IPBlock for this IP
"""
ipb = containingTreeElem.value
"""
let's try to add this mapping to the <ipb>
"""
addResultCode, minDist = ipb.addDomain(dnameObj, ip)
"""
addResult==0 and addResult==1 mean that we were able to integrate
the mapping in the block without changing the clusters
"""
if addResultCode==2:
"""
adding this mapping changed the cluster configuration, which
means that we found no good fit to this IPBlock. This seems
suspicious.
"""
if self.doOutputSuspicious and minDist>0.0:
self._writeSuspicious(timestamp, dname, ip, ttl,clientID,
minDist, ipb.getNumDomains())
return True
elif addResultCode==3:
"""
the domain does not fit, but we cannot create a new cluster;
therefore we ignore this domain for now, hoping that we may be
able to consider it in the future after reclustering and
merging
"""
#msg=('Could not create cluster: %s %s'
# ' %s'%(unicode(dnameObj),
# str(ip), str(timestamp)))
#msg=msg.encode('utf-8')
#logging.info(msg)
return False
else:
return False
def traverseTrees(self, returnContainingTree=False, sortedByIP=False):
"""
Generator that returns at each iteration an IPBlock from this DNSMap
object.
"""
if sortedByIP:
forest=self.forest.items()
forest.sort(key=lambda x:x[0])
_,trees=zip(*forest)
else:
trees=self.forest.itervalues()
for tree in trees:
for node in tree.traversalGenerator():
if node:
if returnContainingTree:
yield (node, tree)
else:
yield node
def findIPBlocksForDname(self, dname):
"""
Returns all IPBlocks that contain domains matching <dname>. Note that
you can use wildcard in <dname>, check out the documentation of the
fnmatch module.
NOTE: this does not find those IPBlocks where <dname> is contained in a
collapsed cluster.
"""
if '*' in dname:
results = []
for node in self.traverseTrees():
ipb=node.value
if fnmatch.filter(ipb.getDomains(), dname):
results.append(ipb)
else:
d = self.domainfactory.getDomainStr(dname)
if d:
results = list(d.ipblocks)
else:
results = None
return results
def getNumberOfIPBlocks(self):
"""
"""
cnt=0
for node in self.traverseTrees():
cnt+=1
return cnt
def getMeanStdClustersPerIP(self):
"""
Return the mean, standard deviation, and standard error for the number
of clusters per IP address in this DNSMap
"""
numClusters=[]
for node in self.traverseTrees():
ipb=node.value
numClustersInIPBlock=len(ipb.clusters)
if numClustersInIPBlock:
numClusters.append(numClustersInIPBlock/float(len(ipb)))
m=np.mean(numClusters)
std=np.std(numClusters)
#stderr=std/np.sqrt(len(numClusters))
#return (m, std, stderr)
return (m,std,np.percentile(numClusters, list(np.arange(5,105,5))))
def getMeanStdClustersPerIPBlock(self):
"""
Return the mean, standard deviation, and standard error for the number
of clusters per IPBlock in this DNSMap
"""
numClusters=[]
for node in self.traverseTrees():
ipb=node.value
numClusters.append(len(ipb.clusters))
m=np.mean(numClusters)
std=np.std(numClusters)
#stderr=std/np.sqrt(len(numClusters))
#return (m, std, stderr)
return (m,std,np.percentile(numClusters, list(np.arange(5,105,5))))
def getNumCollapsedClusters(self):
"""
Returns the total number of collapsed clusters for all IPBlocks in
this DNSMap
"""
numCollapsed=0
for node in self.traverseTrees():
ipb=node.value
numCollapsed+=len(ipb.getCollapsedClusters())
return numCollapsed
def getNumberOfIPs(self):
"""
Returns the number of IPs that are contained by this DNSMap object,
i.e. all IPs in the IPranges in all of the contained RBTrees.
"""
cnt=0
for node in self.traverseTrees():
ipb=node.value
cnt+=len(ipb)
return cnt
def getNumDomains(self):
"""
Returns the number of domain names stored in this DNSMap object,
i.e. all domain names in self.domainfactory. Note that this does not
necessarily include the all domains that are contained exclusively in
collapsed clusters, as not all of them are stored in the domainfactory
"""
return len(self.domainfactory.domains)
def getDnamesCount(self):
"""
Returns a dict with <domain name>:<cnt>, where <cnt> represents the
number of IPBlocks in which this domain name was found.
"""
dnames = dict()
for dname in self.domainfactory.domains:
dnames[dname]=len(dname.ipblocks)
return dnames
def getIPBlocksForDnames(self, searchedDname=None):
"""
Returns a dict with <domain name>:<IPBlocks>, i.e. all IP blocks where
a domain name maps to. The optional argument <searchedDname> restricts
the output to domains that match the given pattern which can also
include (fnmatch) wildcards.
"""
dnamesBlocks = defaultdict(list)
for node in self.traverseTrees():
ipb=node.value
for dname in ipb.getDomains():
if searchedDname:
if fnmatch.fnmatch(dname, searchedDname):
dnamesBlocks[dname].append(ipb)
else:
dnamesBlocks[dname].append(ipb)
return dnamesBlocks
# @staticmethod
# def load(filename):
# """
# Loads an IPBlocks object from a pickled file.
# """
# iptree=None
# with open(filename, 'rb') as f:
# iptree = cPickle.load(f)
# return iptree
#
# def dump(self, filename):
# """
# Pickles this object to file <filename>
# """
# with open(filename, 'wb') as f:
# cPickle.dump(self, f, cPickle.HIGHEST_PROTOCOL)
def dumpt(self, filename, withDomains=False):
"""
Dumps the content of this DNSMap to a text file. Each line represent
one IPBlock, the format is:
FIRST_IP LAST_IP clusterKey1;clusterKey2;[..];\n
if <withDomains>==True, the output is:
FIRST_IP LAST_IP
clusterKey1:domain1,domain2,[..];clusterKey2:domain3,domain4,[..];[..];\n
"""
with open(filename, 'w') as f:
for node in self.traverseTrees():
ipb=node.value
ipb.cluster()
if ipb.clusters:
if ipb.hasReachedClusterCapacity():
f.write('*')
f.write(str(ipb.first())+' ')
f.write(str(ipb.last())+' ')
if withDomains:
for ck,cv in ipb.clusters.iteritems():
"""
Add '*' prefix to collapsed clusters
"""
if cv.isCollapsed:
ck='*'+ck
f.write(ck.encode('utf-8')+':')
s=','.join([d.encode('utf-8') for d in cv.domains])
f.write(s)
f.write(';')
else:
for ck,cv in ipb.clusters.iteritems():
"""
Add '*' prefix to collapsed clusters
"""
if cv.isCollapsed:
ck='*'+ck
f.write(ck.encode('utf-8')+';')
f.write('\n')
@staticmethod
def loadt(filename, clusteringThreshold, domainCountThreshold,
withDomains=True):
"""
Loads the text format written by dumpt() and returns a DNSMap object
that represents the data in <filename>.
"""
dnsmap=DNSMap(clusteringThreshold, domainCountThreshold)
with open(filename, 'r') as f:
logging.warn('loading DNSMap from '+str(filename))
for line in f:
if not line:
continue
line=line.rstrip('\n')
line=line.rstrip(' ')
splitLine=line.split()
try:
if splitLine[0].startswith('*'):
#hasReachedClusterCapacity=True
firstIP=IPAddress(splitLine[0][1:])
else:
#hasReachedClusterCapacity=False
firstIP=IPAddress(splitLine[0])
lastIP=IPAddress(splitLine[1])
ipb=IPBlock(firstIP, last=lastIP)
#ipb.hasReachedClusterCapacity=hasReachedClusterCapacity
clusters=(splitLine[2]).split(';')
for cluster in clusters:
if not cluster:continue
isCollapsed=False
"""
remove '*' prefix for collapsed clusters
"""
if cluster.startswith('*'):
isCollapsed=True
cluster=cluster.lstrip('*')
try:
try:
index=cluster.index(':')
except ValueError:
ckDname=domclust.DomainStr(cluster)
# create an empty cluster in this IPBlock
cluster=domclust.DomainCluster([],
isCollapsed=isCollapsed)
cluster.initActiveIPs(len(ipb))
ipb.clusters[ckDname]=cluster
else:
"""
seems we also exported domain names
"""
ck=cluster[:index]
clusteredDomains=cluster[index+1:].split(',')
ckDname=domclust.DomainStr(ck)
# create an empty cluster in this IPBlock
cluster=domclust.DomainCluster([],
isCollapsed=isCollapsed)
cluster.initActiveIPs(len(ipb))
ipb.clusters[ckDname]=cluster
if withDomains:
for d in clusteredDomains:
dname=dnsmap.domainfactory.makeDomainStr(d)
dname.addIPBlock(ipb)
ipb.addToCluster(dname, ckDname)
except UnicodeDecodeError:
continue
except IndexError:
continue
else:
dnsmap._insertIPBlock(ipb)
return dnsmap
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import json
import os
from docs_server_utils import GetLinkToRefType
import compiled_file_system as compiled_fs
from file_system import FileNotFoundError
import third_party.json_schema_compiler.json_comment_eater as json_comment_eater
import third_party.json_schema_compiler.model as model
import third_party.json_schema_compiler.idl_schema as idl_schema
import third_party.json_schema_compiler.idl_parser as idl_parser
# Increment this version when there are changes to the data stored in any of
# the caches used by APIDataSource. This allows the cache to be invalidated
# without having to flush memcache on the production server.
_VERSION = 2
def _RemoveNoDocs(item):
if type(item) == dict:
if item.get('nodoc', False):
return True
to_remove = []
for key, value in item.items():
if _RemoveNoDocs(value):
to_remove.append(key)
for k in to_remove:
del item[k]
elif type(item) == list:
to_remove = []
for i in item:
if _RemoveNoDocs(i):
to_remove.append(i)
for i in to_remove:
item.remove(i)
return False
def _CreateId(node, prefix):
if node.parent is not None and not isinstance(node.parent, model.Namespace):
return '-'.join([prefix, node.parent.simple_name, node.simple_name])
return '-'.join([prefix, node.simple_name])
def _FormatValue(value):
"""Inserts commas every three digits for integer values. It is magic.
"""
s = str(value)
return ','.join([s[max(0, i - 3):i] for i in range(len(s), 0, -3)][::-1])
class _JscModel(object):
"""Uses a Model from the JSON Schema Compiler and generates a dict that
a Handlebar template can use for a data source.
"""
def __init__(self, json):
clean_json = copy.deepcopy(json)
if _RemoveNoDocs(clean_json):
self._namespace = None
else:
self._namespace = model.Namespace(clean_json, clean_json['namespace'])
def _FormatDescription(self, description):
if description is None or '$ref:' not in description:
return description
refs = description.split('$ref:')
formatted_description = [refs[0]]
for ref in refs[1:]:
parts = ref.split(' ', 1)
if len(parts) == 1:
ref = parts[0]
rest = ''
else:
ref, rest = parts
rest = ' ' + rest
if not ref[-1].isalnum():
rest = ref[-1] + rest
ref = ref[:-1]
ref_dict = GetLinkToRefType(self._namespace.name, ref)
formatted_description.append('<a href="%(href)s">%(text)s</a>%(rest)s' %
{ 'href': ref_dict['href'], 'text': ref_dict['text'], 'rest': rest })
return ''.join(formatted_description)
def ToDict(self):
if self._namespace is None:
return {}
return {
'name': self._namespace.name,
'types': [self._GenerateType(t) for t in self._namespace.types.values()
if t.type_ != model.PropertyType.ADDITIONAL_PROPERTIES],
'functions': self._GenerateFunctions(self._namespace.functions),
'events': self._GenerateEvents(self._namespace.events),
'properties': self._GenerateProperties(self._namespace.properties)
}
def _GenerateType(self, type_):
type_dict = {
'name': type_.simple_name,
'description': self._FormatDescription(type_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'events': self._GenerateEvents(type_.events),
'id': _CreateId(type_, 'type')
}
self._RenderTypeInformation(type_, type_dict)
return type_dict
def _GenerateFunctions(self, functions):
return [self._GenerateFunction(f) for f in functions.values()]
def _GenerateFunction(self, function):
function_dict = {
'name': function.simple_name,
'description': self._FormatDescription(function.description),
'callback': self._GenerateCallback(function.callback),
'parameters': [],
'returns': None,
'id': _CreateId(function, 'method')
}
if (function.parent is not None and
not isinstance(function.parent, model.Namespace)):
function_dict['parent_name'] = function.parent.simple_name
else:
function_dict['parent_name'] = None
if function.returns:
function_dict['returns'] = self._GenerateProperty(function.returns)
for param in function.params:
function_dict['parameters'].append(self._GenerateProperty(param))
if function_dict['callback']:
function_dict['parameters'].append(function_dict['callback'])
if len(function_dict['parameters']) > 0:
function_dict['parameters'][-1]['last'] = True
return function_dict
def _GenerateEvents(self, events):
return [self._GenerateEvent(e) for e in events.values()]
def _GenerateEvent(self, event):
event_dict = {
'name': event.simple_name,
'description': self._FormatDescription(event.description),
'parameters': map(self._GenerateProperty, event.params),
'callback': self._GenerateCallback(event.callback),
'conditions': [GetLinkToRefType(self._namespace.name, c)
for c in event.conditions],
'actions': [GetLinkToRefType(self._namespace.name, a)
for a in event.actions],
'filters': map(self._GenerateProperty, event.filters),
'supportsRules': event.supports_rules,
'id': _CreateId(event, 'event')
}
if (event.parent is not None and
not isinstance(event.parent, model.Namespace)):
event_dict['parent_name'] = event.parent.simple_name
else:
event_dict['parent_name'] = None
if event_dict['callback']:
event_dict['parameters'].append(event_dict['callback'])
if len(event_dict['parameters']) > 0:
event_dict['parameters'][-1]['last'] = True
return event_dict
def _GenerateCallback(self, callback):
if not callback:
return None
callback_dict = {
'name': callback.simple_name,
'description': self._FormatDescription(callback.description),
'simple_type': {'simple_type': 'function'},
'optional': callback.optional,
'parameters': []
}
for param in callback.params:
callback_dict['parameters'].append(self._GenerateProperty(param))
if (len(callback_dict['parameters']) > 0):
callback_dict['parameters'][-1]['last'] = True
return callback_dict
def _GenerateProperties(self, properties):
return [self._GenerateProperty(v) for v in properties.values()
if v.type_ != model.PropertyType.ADDITIONAL_PROPERTIES]
def _GenerateProperty(self, property_):
property_dict = {
'name': property_.simple_name,
'optional': property_.optional,
'description': self._FormatDescription(property_.description),
'properties': self._GenerateProperties(property_.properties),
'parameters': [],
'functions': self._GenerateFunctions(property_.functions),
'returns': None,
'id': _CreateId(property_, 'property')
}
for param in property_.params:
property_dict['parameters'].append(self._GenerateProperty(param))
if property_.returns:
property_dict['returns'] = self._GenerateProperty(property_.returns)
if (property_.parent is not None and
not isinstance(property_.parent, model.Namespace)):
property_dict['parent_name'] = property_.parent.simple_name
else:
property_dict['parent_name'] = None
if property_.has_value:
if isinstance(property_.value, int):
property_dict['value'] = _FormatValue(property_.value)
else:
property_dict['value'] = property_.value
else:
self._RenderTypeInformation(property_, property_dict)
return property_dict
def _RenderTypeInformation(self, property_, dst_dict):
if property_.type_ == model.PropertyType.CHOICES:
dst_dict['choices'] = map(self._GenerateProperty,
property_.choices.values())
# We keep track of which is last for knowing when to add "or" between
# choices in templates.
if len(dst_dict['choices']) > 0:
dst_dict['choices'][-1]['last'] = True
elif property_.type_ == model.PropertyType.REF:
dst_dict['link'] = GetLinkToRefType(self._namespace.name,
property_.ref_type)
elif property_.type_ == model.PropertyType.ARRAY:
dst_dict['array'] = self._GenerateProperty(property_.item_type)
elif property_.type_ == model.PropertyType.ENUM:
dst_dict['enum_values'] = []
for enum_value in property_.enum_values:
dst_dict['enum_values'].append({'name': enum_value})
if len(dst_dict['enum_values']) > 0:
dst_dict['enum_values'][-1]['last'] = True
elif property_.instance_of is not None:
dst_dict['simple_type'] = property_.instance_of.lower()
else:
dst_dict['simple_type'] = property_.type_.name.lower()
class _LazySamplesGetter(object):
"""This class is needed so that an extensions API page does not have to fetch
the apps samples page and vice versa.
"""
def __init__(self, api_name, samples):
self._api_name = api_name
self._samples = samples
def get(self, key):
return self._samples.FilterSamples(key, self._api_name)
class APIDataSource(object):
"""This class fetches and loads JSON APIs from the FileSystem passed in with
|cache_factory|, so the APIs can be plugged into templates.
"""
class Factory(object):
def __init__(self, cache_factory, base_path, samples_factory):
self._permissions_cache = cache_factory.Create(self._LoadPermissions,
compiled_fs.PERMS,
version=_VERSION)
self._json_cache = cache_factory.Create(self._LoadJsonAPI,
compiled_fs.JSON,
version=_VERSION)
self._idl_cache = cache_factory.Create(self._LoadIdlAPI,
compiled_fs.IDL,
version=_VERSION)
self._idl_names_cache = cache_factory.Create(self._GetIDLNames,
compiled_fs.IDL_NAMES,
version=_VERSION)
self._samples_factory = samples_factory
self._base_path = base_path
def Create(self, request):
return APIDataSource(self._permissions_cache,
self._json_cache,
self._idl_cache,
self._idl_names_cache,
self._base_path,
self._samples_factory.Create(request))
def _LoadPermissions(self, json_str):
return json.loads(json_comment_eater.Nom(json_str))
def _LoadJsonAPI(self, api):
return _JscModel(json.loads(json_comment_eater.Nom(api))[0])
def _LoadIdlAPI(self, api):
idl = idl_parser.IDLParser().ParseData(api)
return _JscModel(idl_schema.IDLSchema(idl).process()[0])
def _GetIDLNames(self, apis):
return [model.UnixName(os.path.splitext(api.split('/')[-1])[0])
for api in apis if api.endswith('.idl')]
def __init__(self,
permissions_cache,
json_cache,
idl_cache,
idl_names_cache,
base_path,
samples):
self._base_path = base_path
self._permissions_cache = permissions_cache
self._json_cache = json_cache
self._idl_cache = idl_cache
self._idl_names_cache = idl_names_cache
self._samples = samples
def _GetPermsFromFile(self, filename):
try:
perms = self._permissions_cache.GetFromFile('%s/%s' %
(self._base_path, filename))
return dict((model.UnixName(k), v) for k, v in perms.iteritems())
except FileNotFoundError:
return {}
def _GetFeature(self, path):
# Remove 'experimental_' from path name to match the keys in
# _permissions_features.json.
path = model.UnixName(path.replace('experimental_', ''))
for filename in ['_permission_features.json', '_manifest_features.json']:
api_perms = self._GetPermsFromFile(filename).get(path, None)
if api_perms is not None:
break
if api_perms and api_perms['channel'] in ('trunk', 'dev', 'beta'):
api_perms[api_perms['channel']] = True
return api_perms
def _GenerateHandlebarContext(self, handlebar, path):
return_dict = {
'permissions': self._GetFeature(path),
'samples': _LazySamplesGetter(path, self._samples)
}
return_dict.update(handlebar.ToDict())
return return_dict
def __getitem__(self, key):
return self.get(key)
def get(self, key):
path, ext = os.path.splitext(key)
unix_name = model.UnixName(path)
idl_names = self._idl_names_cache.GetFromFileListing(self._base_path)
cache, ext = ((self._idl_cache, '.idl') if (unix_name in idl_names) else
(self._json_cache, '.json'))
return self._GenerateHandlebarContext(
cache.GetFromFile('%s/%s%s' % (self._base_path, unix_name, ext)),
path)
|
place = input("Enter a place: ")
animal = input("Enter a animal:")
job = input("Enter the name of a job:")
badsmell = input("Enter a bad smell:")
print("We went to "+place)
print("To see a "+animal)
print("But a "+job)
print("Said go away")
print("So I said why?")
print("They said because you smell like "+badsmell)
|
# Generated by Django 2.2.1 on 2019-10-06 01:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainpage', '0002_auto_20191005_1923'),
]
operations = [
migrations.RemoveField(
model_name='coupons',
name='category_id',
),
migrations.RemoveField(
model_name='coupons',
name='student_id',
),
migrations.RemoveField(
model_name='coupons',
name='user_account_id',
),
migrations.RemoveField(
model_name='payments',
name='category_id',
),
]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['InstanceArgs', 'Instance']
@pulumi.input_type
class InstanceArgs:
def __init__(__self__, *,
plan: pulumi.Input[str],
region: pulumi.Input[str],
activation_email: Optional[pulumi.Input[bool]] = None,
app_id: Optional[pulumi.Input[int]] = None,
backups: Optional[pulumi.Input[str]] = None,
backups_schedule: Optional[pulumi.Input['InstanceBackupsScheduleArgs']] = None,
ddos_protection: Optional[pulumi.Input[bool]] = None,
enable_ipv6: Optional[pulumi.Input[bool]] = None,
enable_private_network: Optional[pulumi.Input[bool]] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
iso_id: Optional[pulumi.Input[str]] = None,
label: Optional[pulumi.Input[str]] = None,
os_id: Optional[pulumi.Input[int]] = None,
private_network_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
reserved_ip_id: Optional[pulumi.Input[str]] = None,
script_id: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
ssh_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tag: Optional[pulumi.Input[str]] = None,
user_data: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Instance resource.
:param pulumi.Input[str] plan: The ID of the plan that you want the instance to subscribe to. [See List Plans](https://www.vultr.com/api/#tag/plans)
:param pulumi.Input[str] region: The ID of the region that the instance is to be created in. [See List Regions](https://www.vultr.com/api/#operation/list-regions)
:param pulumi.Input[bool] activation_email: Whether an activation email will be sent when the server is ready.
:param pulumi.Input[int] app_id: The ID of the Vultr application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications)
:param pulumi.Input[str] backups: Whether automatic backups will be enabled for this server (these have an extra charge associated with them). Values can be enabled or disabled.
:param pulumi.Input['InstanceBackupsScheduleArgs'] backups_schedule: A block that defines the way backups should be scheduled. While this is an optional field if `backups` are `enabled` this field is mandatory. The configuration of a `backups_schedule` is listed below.
:param pulumi.Input[bool] ddos_protection: Whether DDOS protection will be enabled on the server (there is an additional charge for this).
:param pulumi.Input[bool] enable_ipv6: Whether the server has IPv6 networking activated.
:param pulumi.Input[bool] enable_private_network: Whether the server has private networking support enabled.
:param pulumi.Input[str] firewall_group_id: The ID of the firewall group to assign to the server.
:param pulumi.Input[str] hostname: The hostname to assign to the server.
:param pulumi.Input[str] image_id: The ID of the Vultr marketplace application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications) Note marketplace applications are denoted by type: `marketplace` and you must use the `image_id` not the id.
:param pulumi.Input[str] iso_id: The ID of the ISO file to be installed on the server. [See List ISO](https://www.vultr.com/api/#operation/list-isos)
:param pulumi.Input[str] label: A label for the server.
:param pulumi.Input[int] os_id: The ID of the operating system to be installed on the server. [See List OS](https://www.vultr.com/api/#operation/list-os)
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_network_ids: A list of private network IDs to be attached to the server.
:param pulumi.Input[str] reserved_ip_id: ID of the floating IP to use as the main IP of this server.
:param pulumi.Input[str] script_id: The ID of the startup script you want added to the server.
:param pulumi.Input[str] snapshot_id: The ID of the Vultr snapshot that the server will restore for the initial installation. [See List Snapshots](https://www.vultr.com/api/#operation/list-snapshots)
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_key_ids: A list of SSH key IDs to apply to the server on install (only valid for Linux/FreeBSD).
:param pulumi.Input[str] tag: The tag to assign to the server.
:param pulumi.Input[str] user_data: Generic data store, which some provisioning tools and cloud operating systems use as a configuration file. It is generally consumed only once after an instance has been launched, but individual needs may vary.
"""
pulumi.set(__self__, "plan", plan)
pulumi.set(__self__, "region", region)
if activation_email is not None:
pulumi.set(__self__, "activation_email", activation_email)
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if backups is not None:
pulumi.set(__self__, "backups", backups)
if backups_schedule is not None:
pulumi.set(__self__, "backups_schedule", backups_schedule)
if ddos_protection is not None:
pulumi.set(__self__, "ddos_protection", ddos_protection)
if enable_ipv6 is not None:
pulumi.set(__self__, "enable_ipv6", enable_ipv6)
if enable_private_network is not None:
warnings.warn("""In the next release of this provider we will be removing `enable_private_network` due to issues that may cause drift and having to maintain private network ip state. Please switch to using private_network_ids to manage your private network fields.""", DeprecationWarning)
pulumi.log.warn("""enable_private_network is deprecated: In the next release of this provider we will be removing `enable_private_network` due to issues that may cause drift and having to maintain private network ip state. Please switch to using private_network_ids to manage your private network fields.""")
if enable_private_network is not None:
pulumi.set(__self__, "enable_private_network", enable_private_network)
if firewall_group_id is not None:
pulumi.set(__self__, "firewall_group_id", firewall_group_id)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if image_id is not None:
pulumi.set(__self__, "image_id", image_id)
if iso_id is not None:
pulumi.set(__self__, "iso_id", iso_id)
if label is not None:
pulumi.set(__self__, "label", label)
if os_id is not None:
pulumi.set(__self__, "os_id", os_id)
if private_network_ids is not None:
pulumi.set(__self__, "private_network_ids", private_network_ids)
if reserved_ip_id is not None:
pulumi.set(__self__, "reserved_ip_id", reserved_ip_id)
if script_id is not None:
pulumi.set(__self__, "script_id", script_id)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if ssh_key_ids is not None:
pulumi.set(__self__, "ssh_key_ids", ssh_key_ids)
if tag is not None:
pulumi.set(__self__, "tag", tag)
if user_data is not None:
pulumi.set(__self__, "user_data", user_data)
@property
@pulumi.getter
def plan(self) -> pulumi.Input[str]:
"""
The ID of the plan that you want the instance to subscribe to. [See List Plans](https://www.vultr.com/api/#tag/plans)
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: pulumi.Input[str]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def region(self) -> pulumi.Input[str]:
"""
The ID of the region that the instance is to be created in. [See List Regions](https://www.vultr.com/api/#operation/list-regions)
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: pulumi.Input[str]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="activationEmail")
def activation_email(self) -> Optional[pulumi.Input[bool]]:
"""
Whether an activation email will be sent when the server is ready.
"""
return pulumi.get(self, "activation_email")
@activation_email.setter
def activation_email(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "activation_email", value)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the Vultr application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications)
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter
def backups(self) -> Optional[pulumi.Input[str]]:
"""
Whether automatic backups will be enabled for this server (these have an extra charge associated with them). Values can be enabled or disabled.
"""
return pulumi.get(self, "backups")
@backups.setter
def backups(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backups", value)
@property
@pulumi.getter(name="backupsSchedule")
def backups_schedule(self) -> Optional[pulumi.Input['InstanceBackupsScheduleArgs']]:
"""
A block that defines the way backups should be scheduled. While this is an optional field if `backups` are `enabled` this field is mandatory. The configuration of a `backups_schedule` is listed below.
"""
return pulumi.get(self, "backups_schedule")
@backups_schedule.setter
def backups_schedule(self, value: Optional[pulumi.Input['InstanceBackupsScheduleArgs']]):
pulumi.set(self, "backups_schedule", value)
@property
@pulumi.getter(name="ddosProtection")
def ddos_protection(self) -> Optional[pulumi.Input[bool]]:
"""
Whether DDOS protection will be enabled on the server (there is an additional charge for this).
"""
return pulumi.get(self, "ddos_protection")
@ddos_protection.setter
def ddos_protection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ddos_protection", value)
@property
@pulumi.getter(name="enableIpv6")
def enable_ipv6(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the server has IPv6 networking activated.
"""
return pulumi.get(self, "enable_ipv6")
@enable_ipv6.setter
def enable_ipv6(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_ipv6", value)
@property
@pulumi.getter(name="enablePrivateNetwork")
def enable_private_network(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the server has private networking support enabled.
"""
return pulumi.get(self, "enable_private_network")
@enable_private_network.setter
def enable_private_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_private_network", value)
@property
@pulumi.getter(name="firewallGroupId")
def firewall_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the firewall group to assign to the server.
"""
return pulumi.get(self, "firewall_group_id")
@firewall_group_id.setter
def firewall_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "firewall_group_id", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
The hostname to assign to the server.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Vultr marketplace application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications) Note marketplace applications are denoted by type: `marketplace` and you must use the `image_id` not the id.
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="isoId")
def iso_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the ISO file to be installed on the server. [See List ISO](https://www.vultr.com/api/#operation/list-isos)
"""
return pulumi.get(self, "iso_id")
@iso_id.setter
def iso_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iso_id", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
"""
A label for the server.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@property
@pulumi.getter(name="osId")
def os_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the operating system to be installed on the server. [See List OS](https://www.vultr.com/api/#operation/list-os)
"""
return pulumi.get(self, "os_id")
@os_id.setter
def os_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "os_id", value)
@property
@pulumi.getter(name="privateNetworkIds")
def private_network_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of private network IDs to be attached to the server.
"""
return pulumi.get(self, "private_network_ids")
@private_network_ids.setter
def private_network_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "private_network_ids", value)
@property
@pulumi.getter(name="reservedIpId")
def reserved_ip_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the floating IP to use as the main IP of this server.
"""
return pulumi.get(self, "reserved_ip_id")
@reserved_ip_id.setter
def reserved_ip_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reserved_ip_id", value)
@property
@pulumi.getter(name="scriptId")
def script_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the startup script you want added to the server.
"""
return pulumi.get(self, "script_id")
@script_id.setter
def script_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_id", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Vultr snapshot that the server will restore for the initial installation. [See List Snapshots](https://www.vultr.com/api/#operation/list-snapshots)
"""
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter(name="sshKeyIds")
def ssh_key_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH key IDs to apply to the server on install (only valid for Linux/FreeBSD).
"""
return pulumi.get(self, "ssh_key_ids")
@ssh_key_ids.setter
def ssh_key_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_key_ids", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag to assign to the server.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@property
@pulumi.getter(name="userData")
def user_data(self) -> Optional[pulumi.Input[str]]:
"""
Generic data store, which some provisioning tools and cloud operating systems use as a configuration file. It is generally consumed only once after an instance has been launched, but individual needs may vary.
"""
return pulumi.get(self, "user_data")
@user_data.setter
def user_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_data", value)
@pulumi.input_type
class _InstanceState:
def __init__(__self__, *,
activation_email: Optional[pulumi.Input[bool]] = None,
allowed_bandwidth: Optional[pulumi.Input[int]] = None,
app_id: Optional[pulumi.Input[int]] = None,
backups: Optional[pulumi.Input[str]] = None,
backups_schedule: Optional[pulumi.Input['InstanceBackupsScheduleArgs']] = None,
date_created: Optional[pulumi.Input[str]] = None,
ddos_protection: Optional[pulumi.Input[bool]] = None,
default_password: Optional[pulumi.Input[str]] = None,
disk: Optional[pulumi.Input[int]] = None,
enable_ipv6: Optional[pulumi.Input[bool]] = None,
enable_private_network: Optional[pulumi.Input[bool]] = None,
features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
gateway_v4: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
internal_ip: Optional[pulumi.Input[str]] = None,
iso_id: Optional[pulumi.Input[str]] = None,
kvm: Optional[pulumi.Input[str]] = None,
label: Optional[pulumi.Input[str]] = None,
main_ip: Optional[pulumi.Input[str]] = None,
netmask_v4: Optional[pulumi.Input[str]] = None,
os: Optional[pulumi.Input[str]] = None,
os_id: Optional[pulumi.Input[int]] = None,
plan: Optional[pulumi.Input[str]] = None,
power_status: Optional[pulumi.Input[str]] = None,
private_network_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ram: Optional[pulumi.Input[int]] = None,
region: Optional[pulumi.Input[str]] = None,
reserved_ip_id: Optional[pulumi.Input[str]] = None,
script_id: Optional[pulumi.Input[str]] = None,
server_status: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
ssh_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
status: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
user_data: Optional[pulumi.Input[str]] = None,
v6_main_ip: Optional[pulumi.Input[str]] = None,
v6_network: Optional[pulumi.Input[str]] = None,
v6_network_size: Optional[pulumi.Input[int]] = None,
vcpu_count: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering Instance resources.
:param pulumi.Input[bool] activation_email: Whether an activation email will be sent when the server is ready.
:param pulumi.Input[int] allowed_bandwidth: The server's allowed bandwidth usage in GB.
:param pulumi.Input[int] app_id: The ID of the Vultr application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications)
:param pulumi.Input[str] backups: Whether automatic backups will be enabled for this server (these have an extra charge associated with them). Values can be enabled or disabled.
:param pulumi.Input['InstanceBackupsScheduleArgs'] backups_schedule: A block that defines the way backups should be scheduled. While this is an optional field if `backups` are `enabled` this field is mandatory. The configuration of a `backups_schedule` is listed below.
:param pulumi.Input[str] date_created: The date the server was added to your Vultr account.
:param pulumi.Input[bool] ddos_protection: Whether DDOS protection will be enabled on the server (there is an additional charge for this).
:param pulumi.Input[str] default_password: The server's default password.
:param pulumi.Input[int] disk: The description of the disk(s) on the server.
:param pulumi.Input[bool] enable_ipv6: Whether the server has IPv6 networking activated.
:param pulumi.Input[bool] enable_private_network: Whether the server has private networking support enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] features: Array of which features are enabled.
:param pulumi.Input[str] firewall_group_id: The ID of the firewall group to assign to the server.
:param pulumi.Input[str] gateway_v4: The server's IPv4 gateway.
:param pulumi.Input[str] hostname: The hostname to assign to the server.
:param pulumi.Input[str] image_id: The ID of the Vultr marketplace application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications) Note marketplace applications are denoted by type: `marketplace` and you must use the `image_id` not the id.
:param pulumi.Input[str] internal_ip: The server's internal IP address.
:param pulumi.Input[str] iso_id: The ID of the ISO file to be installed on the server. [See List ISO](https://www.vultr.com/api/#operation/list-isos)
:param pulumi.Input[str] kvm: The server's current KVM URL. This URL will change periodically. It is not advised to cache this value.
:param pulumi.Input[str] label: A label for the server.
:param pulumi.Input[str] main_ip: The server's main IP address.
:param pulumi.Input[str] netmask_v4: The server's IPv4 netmask.
:param pulumi.Input[str] os: The string description of the operating system installed on the server.
:param pulumi.Input[int] os_id: The ID of the operating system to be installed on the server. [See List OS](https://www.vultr.com/api/#operation/list-os)
:param pulumi.Input[str] plan: The ID of the plan that you want the instance to subscribe to. [See List Plans](https://www.vultr.com/api/#tag/plans)
:param pulumi.Input[str] power_status: Whether the server is powered on or not.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_network_ids: A list of private network IDs to be attached to the server.
:param pulumi.Input[int] ram: The amount of memory available on the server in MB.
:param pulumi.Input[str] region: The ID of the region that the instance is to be created in. [See List Regions](https://www.vultr.com/api/#operation/list-regions)
:param pulumi.Input[str] reserved_ip_id: ID of the floating IP to use as the main IP of this server.
:param pulumi.Input[str] script_id: The ID of the startup script you want added to the server.
:param pulumi.Input[str] server_status: A more detailed server status (none, locked, installingbooting, isomounting, ok).
:param pulumi.Input[str] snapshot_id: The ID of the Vultr snapshot that the server will restore for the initial installation. [See List Snapshots](https://www.vultr.com/api/#operation/list-snapshots)
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_key_ids: A list of SSH key IDs to apply to the server on install (only valid for Linux/FreeBSD).
:param pulumi.Input[str] status: The status of the server's subscription.
:param pulumi.Input[str] tag: The tag to assign to the server.
:param pulumi.Input[str] user_data: Generic data store, which some provisioning tools and cloud operating systems use as a configuration file. It is generally consumed only once after an instance has been launched, but individual needs may vary.
:param pulumi.Input[str] v6_main_ip: The main IPv6 network address.
:param pulumi.Input[str] v6_network: The IPv6 subnet.
:param pulumi.Input[int] v6_network_size: The IPv6 network size in bits.
:param pulumi.Input[int] vcpu_count: The number of virtual CPUs available on the server.
"""
if activation_email is not None:
pulumi.set(__self__, "activation_email", activation_email)
if allowed_bandwidth is not None:
pulumi.set(__self__, "allowed_bandwidth", allowed_bandwidth)
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if backups is not None:
pulumi.set(__self__, "backups", backups)
if backups_schedule is not None:
pulumi.set(__self__, "backups_schedule", backups_schedule)
if date_created is not None:
pulumi.set(__self__, "date_created", date_created)
if ddos_protection is not None:
pulumi.set(__self__, "ddos_protection", ddos_protection)
if default_password is not None:
pulumi.set(__self__, "default_password", default_password)
if disk is not None:
pulumi.set(__self__, "disk", disk)
if enable_ipv6 is not None:
pulumi.set(__self__, "enable_ipv6", enable_ipv6)
if enable_private_network is not None:
warnings.warn("""In the next release of this provider we will be removing `enable_private_network` due to issues that may cause drift and having to maintain private network ip state. Please switch to using private_network_ids to manage your private network fields.""", DeprecationWarning)
pulumi.log.warn("""enable_private_network is deprecated: In the next release of this provider we will be removing `enable_private_network` due to issues that may cause drift and having to maintain private network ip state. Please switch to using private_network_ids to manage your private network fields.""")
if enable_private_network is not None:
pulumi.set(__self__, "enable_private_network", enable_private_network)
if features is not None:
pulumi.set(__self__, "features", features)
if firewall_group_id is not None:
pulumi.set(__self__, "firewall_group_id", firewall_group_id)
if gateway_v4 is not None:
pulumi.set(__self__, "gateway_v4", gateway_v4)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if image_id is not None:
pulumi.set(__self__, "image_id", image_id)
if internal_ip is not None:
pulumi.set(__self__, "internal_ip", internal_ip)
if iso_id is not None:
pulumi.set(__self__, "iso_id", iso_id)
if kvm is not None:
pulumi.set(__self__, "kvm", kvm)
if label is not None:
pulumi.set(__self__, "label", label)
if main_ip is not None:
pulumi.set(__self__, "main_ip", main_ip)
if netmask_v4 is not None:
pulumi.set(__self__, "netmask_v4", netmask_v4)
if os is not None:
pulumi.set(__self__, "os", os)
if os_id is not None:
pulumi.set(__self__, "os_id", os_id)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if power_status is not None:
pulumi.set(__self__, "power_status", power_status)
if private_network_ids is not None:
pulumi.set(__self__, "private_network_ids", private_network_ids)
if ram is not None:
pulumi.set(__self__, "ram", ram)
if region is not None:
pulumi.set(__self__, "region", region)
if reserved_ip_id is not None:
pulumi.set(__self__, "reserved_ip_id", reserved_ip_id)
if script_id is not None:
pulumi.set(__self__, "script_id", script_id)
if server_status is not None:
pulumi.set(__self__, "server_status", server_status)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if ssh_key_ids is not None:
pulumi.set(__self__, "ssh_key_ids", ssh_key_ids)
if status is not None:
pulumi.set(__self__, "status", status)
if tag is not None:
pulumi.set(__self__, "tag", tag)
if user_data is not None:
pulumi.set(__self__, "user_data", user_data)
if v6_main_ip is not None:
pulumi.set(__self__, "v6_main_ip", v6_main_ip)
if v6_network is not None:
pulumi.set(__self__, "v6_network", v6_network)
if v6_network_size is not None:
pulumi.set(__self__, "v6_network_size", v6_network_size)
if vcpu_count is not None:
pulumi.set(__self__, "vcpu_count", vcpu_count)
@property
@pulumi.getter(name="activationEmail")
def activation_email(self) -> Optional[pulumi.Input[bool]]:
"""
Whether an activation email will be sent when the server is ready.
"""
return pulumi.get(self, "activation_email")
@activation_email.setter
def activation_email(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "activation_email", value)
@property
@pulumi.getter(name="allowedBandwidth")
def allowed_bandwidth(self) -> Optional[pulumi.Input[int]]:
"""
The server's allowed bandwidth usage in GB.
"""
return pulumi.get(self, "allowed_bandwidth")
@allowed_bandwidth.setter
def allowed_bandwidth(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "allowed_bandwidth", value)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the Vultr application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications)
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter
def backups(self) -> Optional[pulumi.Input[str]]:
"""
Whether automatic backups will be enabled for this server (these have an extra charge associated with them). Values can be enabled or disabled.
"""
return pulumi.get(self, "backups")
@backups.setter
def backups(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backups", value)
@property
@pulumi.getter(name="backupsSchedule")
def backups_schedule(self) -> Optional[pulumi.Input['InstanceBackupsScheduleArgs']]:
"""
A block that defines the way backups should be scheduled. While this is an optional field if `backups` are `enabled` this field is mandatory. The configuration of a `backups_schedule` is listed below.
"""
return pulumi.get(self, "backups_schedule")
@backups_schedule.setter
def backups_schedule(self, value: Optional[pulumi.Input['InstanceBackupsScheduleArgs']]):
pulumi.set(self, "backups_schedule", value)
@property
@pulumi.getter(name="dateCreated")
def date_created(self) -> Optional[pulumi.Input[str]]:
"""
The date the server was added to your Vultr account.
"""
return pulumi.get(self, "date_created")
@date_created.setter
def date_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "date_created", value)
@property
@pulumi.getter(name="ddosProtection")
def ddos_protection(self) -> Optional[pulumi.Input[bool]]:
"""
Whether DDOS protection will be enabled on the server (there is an additional charge for this).
"""
return pulumi.get(self, "ddos_protection")
@ddos_protection.setter
def ddos_protection(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ddos_protection", value)
@property
@pulumi.getter(name="defaultPassword")
def default_password(self) -> Optional[pulumi.Input[str]]:
"""
The server's default password.
"""
return pulumi.get(self, "default_password")
@default_password.setter
def default_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_password", value)
@property
@pulumi.getter
def disk(self) -> Optional[pulumi.Input[int]]:
"""
The description of the disk(s) on the server.
"""
return pulumi.get(self, "disk")
@disk.setter
def disk(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "disk", value)
@property
@pulumi.getter(name="enableIpv6")
def enable_ipv6(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the server has IPv6 networking activated.
"""
return pulumi.get(self, "enable_ipv6")
@enable_ipv6.setter
def enable_ipv6(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_ipv6", value)
@property
@pulumi.getter(name="enablePrivateNetwork")
def enable_private_network(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the server has private networking support enabled.
"""
return pulumi.get(self, "enable_private_network")
@enable_private_network.setter
def enable_private_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_private_network", value)
@property
@pulumi.getter
def features(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Array of which features are enabled.
"""
return pulumi.get(self, "features")
@features.setter
def features(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "features", value)
@property
@pulumi.getter(name="firewallGroupId")
def firewall_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the firewall group to assign to the server.
"""
return pulumi.get(self, "firewall_group_id")
@firewall_group_id.setter
def firewall_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "firewall_group_id", value)
@property
@pulumi.getter(name="gatewayV4")
def gateway_v4(self) -> Optional[pulumi.Input[str]]:
"""
The server's IPv4 gateway.
"""
return pulumi.get(self, "gateway_v4")
@gateway_v4.setter
def gateway_v4(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gateway_v4", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
"""
The hostname to assign to the server.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter(name="imageId")
def image_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Vultr marketplace application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications) Note marketplace applications are denoted by type: `marketplace` and you must use the `image_id` not the id.
"""
return pulumi.get(self, "image_id")
@image_id.setter
def image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_id", value)
@property
@pulumi.getter(name="internalIp")
def internal_ip(self) -> Optional[pulumi.Input[str]]:
"""
The server's internal IP address.
"""
return pulumi.get(self, "internal_ip")
@internal_ip.setter
def internal_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_ip", value)
@property
@pulumi.getter(name="isoId")
def iso_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the ISO file to be installed on the server. [See List ISO](https://www.vultr.com/api/#operation/list-isos)
"""
return pulumi.get(self, "iso_id")
@iso_id.setter
def iso_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iso_id", value)
@property
@pulumi.getter
def kvm(self) -> Optional[pulumi.Input[str]]:
"""
The server's current KVM URL. This URL will change periodically. It is not advised to cache this value.
"""
return pulumi.get(self, "kvm")
@kvm.setter
def kvm(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kvm", value)
@property
@pulumi.getter
def label(self) -> Optional[pulumi.Input[str]]:
"""
A label for the server.
"""
return pulumi.get(self, "label")
@label.setter
def label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "label", value)
@property
@pulumi.getter(name="mainIp")
def main_ip(self) -> Optional[pulumi.Input[str]]:
"""
The server's main IP address.
"""
return pulumi.get(self, "main_ip")
@main_ip.setter
def main_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "main_ip", value)
@property
@pulumi.getter(name="netmaskV4")
def netmask_v4(self) -> Optional[pulumi.Input[str]]:
"""
The server's IPv4 netmask.
"""
return pulumi.get(self, "netmask_v4")
@netmask_v4.setter
def netmask_v4(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "netmask_v4", value)
@property
@pulumi.getter
def os(self) -> Optional[pulumi.Input[str]]:
"""
The string description of the operating system installed on the server.
"""
return pulumi.get(self, "os")
@os.setter
def os(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "os", value)
@property
@pulumi.getter(name="osId")
def os_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the operating system to be installed on the server. [See List OS](https://www.vultr.com/api/#operation/list-os)
"""
return pulumi.get(self, "os_id")
@os_id.setter
def os_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "os_id", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the plan that you want the instance to subscribe to. [See List Plans](https://www.vultr.com/api/#tag/plans)
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter(name="powerStatus")
def power_status(self) -> Optional[pulumi.Input[str]]:
"""
Whether the server is powered on or not.
"""
return pulumi.get(self, "power_status")
@power_status.setter
def power_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "power_status", value)
@property
@pulumi.getter(name="privateNetworkIds")
def private_network_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of private network IDs to be attached to the server.
"""
return pulumi.get(self, "private_network_ids")
@private_network_ids.setter
def private_network_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "private_network_ids", value)
@property
@pulumi.getter
def ram(self) -> Optional[pulumi.Input[int]]:
"""
The amount of memory available on the server in MB.
"""
return pulumi.get(self, "ram")
@ram.setter
def ram(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ram", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the region that the instance is to be created in. [See List Regions](https://www.vultr.com/api/#operation/list-regions)
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="reservedIpId")
def reserved_ip_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the floating IP to use as the main IP of this server.
"""
return pulumi.get(self, "reserved_ip_id")
@reserved_ip_id.setter
def reserved_ip_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reserved_ip_id", value)
@property
@pulumi.getter(name="scriptId")
def script_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the startup script you want added to the server.
"""
return pulumi.get(self, "script_id")
@script_id.setter
def script_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "script_id", value)
@property
@pulumi.getter(name="serverStatus")
def server_status(self) -> Optional[pulumi.Input[str]]:
"""
A more detailed server status (none, locked, installingbooting, isomounting, ok).
"""
return pulumi.get(self, "server_status")
@server_status.setter
def server_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_status", value)
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Vultr snapshot that the server will restore for the initial installation. [See List Snapshots](https://www.vultr.com/api/#operation/list-snapshots)
"""
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter(name="sshKeyIds")
def ssh_key_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of SSH key IDs to apply to the server on install (only valid for Linux/FreeBSD).
"""
return pulumi.get(self, "ssh_key_ids")
@ssh_key_ids.setter
def ssh_key_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ssh_key_ids", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the server's subscription.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag to assign to the server.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@property
@pulumi.getter(name="userData")
def user_data(self) -> Optional[pulumi.Input[str]]:
"""
Generic data store, which some provisioning tools and cloud operating systems use as a configuration file. It is generally consumed only once after an instance has been launched, but individual needs may vary.
"""
return pulumi.get(self, "user_data")
@user_data.setter
def user_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user_data", value)
@property
@pulumi.getter(name="v6MainIp")
def v6_main_ip(self) -> Optional[pulumi.Input[str]]:
"""
The main IPv6 network address.
"""
return pulumi.get(self, "v6_main_ip")
@v6_main_ip.setter
def v6_main_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "v6_main_ip", value)
@property
@pulumi.getter(name="v6Network")
def v6_network(self) -> Optional[pulumi.Input[str]]:
"""
The IPv6 subnet.
"""
return pulumi.get(self, "v6_network")
@v6_network.setter
def v6_network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "v6_network", value)
@property
@pulumi.getter(name="v6NetworkSize")
def v6_network_size(self) -> Optional[pulumi.Input[int]]:
"""
The IPv6 network size in bits.
"""
return pulumi.get(self, "v6_network_size")
@v6_network_size.setter
def v6_network_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "v6_network_size", value)
@property
@pulumi.getter(name="vcpuCount")
def vcpu_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of virtual CPUs available on the server.
"""
return pulumi.get(self, "vcpu_count")
@vcpu_count.setter
def vcpu_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "vcpu_count", value)
class Instance(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
activation_email: Optional[pulumi.Input[bool]] = None,
app_id: Optional[pulumi.Input[int]] = None,
backups: Optional[pulumi.Input[str]] = None,
backups_schedule: Optional[pulumi.Input[pulumi.InputType['InstanceBackupsScheduleArgs']]] = None,
ddos_protection: Optional[pulumi.Input[bool]] = None,
enable_ipv6: Optional[pulumi.Input[bool]] = None,
enable_private_network: Optional[pulumi.Input[bool]] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
iso_id: Optional[pulumi.Input[str]] = None,
label: Optional[pulumi.Input[str]] = None,
os_id: Optional[pulumi.Input[int]] = None,
plan: Optional[pulumi.Input[str]] = None,
private_network_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
region: Optional[pulumi.Input[str]] = None,
reserved_ip_id: Optional[pulumi.Input[str]] = None,
script_id: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
ssh_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tag: Optional[pulumi.Input[str]] = None,
user_data: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Vultr instance resource. This can be used to create, read, modify, and delete instances on your Vultr account.
## Example Usage
Create a new instance:
```python
import pulumi
import pulumi_vultr as vultr
my_instance = vultr.Instance("myInstance",
os_id=167,
plan="vc2-1c-1gb",
region="sea")
```
Create a new instance with options:
```python
import pulumi
import pulumi_vultr as vultr
my_instance = vultr.Instance("myInstance",
activation_email=False,
backups="enabled",
ddos_protection=True,
enable_ipv6=True,
hostname="my-instance-hostname",
label="my-instance-label",
os_id=167,
plan="vc2-1c-1gb",
region="sea",
tag="my-instance-tag")
```
## Import
Servers can be imported using the server `ID`, e.g.
```sh
$ pulumi import vultr:index/instance:Instance my_server b6a859c5-b299-49dd-8888-b1abbc517d08
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] activation_email: Whether an activation email will be sent when the server is ready.
:param pulumi.Input[int] app_id: The ID of the Vultr application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications)
:param pulumi.Input[str] backups: Whether automatic backups will be enabled for this server (these have an extra charge associated with them). Values can be enabled or disabled.
:param pulumi.Input[pulumi.InputType['InstanceBackupsScheduleArgs']] backups_schedule: A block that defines the way backups should be scheduled. While this is an optional field if `backups` are `enabled` this field is mandatory. The configuration of a `backups_schedule` is listed below.
:param pulumi.Input[bool] ddos_protection: Whether DDOS protection will be enabled on the server (there is an additional charge for this).
:param pulumi.Input[bool] enable_ipv6: Whether the server has IPv6 networking activated.
:param pulumi.Input[bool] enable_private_network: Whether the server has private networking support enabled.
:param pulumi.Input[str] firewall_group_id: The ID of the firewall group to assign to the server.
:param pulumi.Input[str] hostname: The hostname to assign to the server.
:param pulumi.Input[str] image_id: The ID of the Vultr marketplace application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications) Note marketplace applications are denoted by type: `marketplace` and you must use the `image_id` not the id.
:param pulumi.Input[str] iso_id: The ID of the ISO file to be installed on the server. [See List ISO](https://www.vultr.com/api/#operation/list-isos)
:param pulumi.Input[str] label: A label for the server.
:param pulumi.Input[int] os_id: The ID of the operating system to be installed on the server. [See List OS](https://www.vultr.com/api/#operation/list-os)
:param pulumi.Input[str] plan: The ID of the plan that you want the instance to subscribe to. [See List Plans](https://www.vultr.com/api/#tag/plans)
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_network_ids: A list of private network IDs to be attached to the server.
:param pulumi.Input[str] region: The ID of the region that the instance is to be created in. [See List Regions](https://www.vultr.com/api/#operation/list-regions)
:param pulumi.Input[str] reserved_ip_id: ID of the floating IP to use as the main IP of this server.
:param pulumi.Input[str] script_id: The ID of the startup script you want added to the server.
:param pulumi.Input[str] snapshot_id: The ID of the Vultr snapshot that the server will restore for the initial installation. [See List Snapshots](https://www.vultr.com/api/#operation/list-snapshots)
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_key_ids: A list of SSH key IDs to apply to the server on install (only valid for Linux/FreeBSD).
:param pulumi.Input[str] tag: The tag to assign to the server.
:param pulumi.Input[str] user_data: Generic data store, which some provisioning tools and cloud operating systems use as a configuration file. It is generally consumed only once after an instance has been launched, but individual needs may vary.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InstanceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Vultr instance resource. This can be used to create, read, modify, and delete instances on your Vultr account.
## Example Usage
Create a new instance:
```python
import pulumi
import pulumi_vultr as vultr
my_instance = vultr.Instance("myInstance",
os_id=167,
plan="vc2-1c-1gb",
region="sea")
```
Create a new instance with options:
```python
import pulumi
import pulumi_vultr as vultr
my_instance = vultr.Instance("myInstance",
activation_email=False,
backups="enabled",
ddos_protection=True,
enable_ipv6=True,
hostname="my-instance-hostname",
label="my-instance-label",
os_id=167,
plan="vc2-1c-1gb",
region="sea",
tag="my-instance-tag")
```
## Import
Servers can be imported using the server `ID`, e.g.
```sh
$ pulumi import vultr:index/instance:Instance my_server b6a859c5-b299-49dd-8888-b1abbc517d08
```
:param str resource_name: The name of the resource.
:param InstanceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
activation_email: Optional[pulumi.Input[bool]] = None,
app_id: Optional[pulumi.Input[int]] = None,
backups: Optional[pulumi.Input[str]] = None,
backups_schedule: Optional[pulumi.Input[pulumi.InputType['InstanceBackupsScheduleArgs']]] = None,
ddos_protection: Optional[pulumi.Input[bool]] = None,
enable_ipv6: Optional[pulumi.Input[bool]] = None,
enable_private_network: Optional[pulumi.Input[bool]] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
iso_id: Optional[pulumi.Input[str]] = None,
label: Optional[pulumi.Input[str]] = None,
os_id: Optional[pulumi.Input[int]] = None,
plan: Optional[pulumi.Input[str]] = None,
private_network_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
region: Optional[pulumi.Input[str]] = None,
reserved_ip_id: Optional[pulumi.Input[str]] = None,
script_id: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
ssh_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tag: Optional[pulumi.Input[str]] = None,
user_data: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceArgs.__new__(InstanceArgs)
__props__.__dict__["activation_email"] = activation_email
__props__.__dict__["app_id"] = app_id
__props__.__dict__["backups"] = backups
__props__.__dict__["backups_schedule"] = backups_schedule
__props__.__dict__["ddos_protection"] = ddos_protection
__props__.__dict__["enable_ipv6"] = enable_ipv6
if enable_private_network is not None and not opts.urn:
warnings.warn("""In the next release of this provider we will be removing `enable_private_network` due to issues that may cause drift and having to maintain private network ip state. Please switch to using private_network_ids to manage your private network fields.""", DeprecationWarning)
pulumi.log.warn("""enable_private_network is deprecated: In the next release of this provider we will be removing `enable_private_network` due to issues that may cause drift and having to maintain private network ip state. Please switch to using private_network_ids to manage your private network fields.""")
__props__.__dict__["enable_private_network"] = enable_private_network
__props__.__dict__["firewall_group_id"] = firewall_group_id
__props__.__dict__["hostname"] = hostname
__props__.__dict__["image_id"] = image_id
__props__.__dict__["iso_id"] = iso_id
__props__.__dict__["label"] = label
__props__.__dict__["os_id"] = os_id
if plan is None and not opts.urn:
raise TypeError("Missing required property 'plan'")
__props__.__dict__["plan"] = plan
__props__.__dict__["private_network_ids"] = private_network_ids
if region is None and not opts.urn:
raise TypeError("Missing required property 'region'")
__props__.__dict__["region"] = region
__props__.__dict__["reserved_ip_id"] = reserved_ip_id
__props__.__dict__["script_id"] = script_id
__props__.__dict__["snapshot_id"] = snapshot_id
__props__.__dict__["ssh_key_ids"] = ssh_key_ids
__props__.__dict__["tag"] = tag
__props__.__dict__["user_data"] = user_data
__props__.__dict__["allowed_bandwidth"] = None
__props__.__dict__["date_created"] = None
__props__.__dict__["default_password"] = None
__props__.__dict__["disk"] = None
__props__.__dict__["features"] = None
__props__.__dict__["gateway_v4"] = None
__props__.__dict__["internal_ip"] = None
__props__.__dict__["kvm"] = None
__props__.__dict__["main_ip"] = None
__props__.__dict__["netmask_v4"] = None
__props__.__dict__["os"] = None
__props__.__dict__["power_status"] = None
__props__.__dict__["ram"] = None
__props__.__dict__["server_status"] = None
__props__.__dict__["status"] = None
__props__.__dict__["v6_main_ip"] = None
__props__.__dict__["v6_network"] = None
__props__.__dict__["v6_network_size"] = None
__props__.__dict__["vcpu_count"] = None
super(Instance, __self__).__init__(
'vultr:index/instance:Instance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
activation_email: Optional[pulumi.Input[bool]] = None,
allowed_bandwidth: Optional[pulumi.Input[int]] = None,
app_id: Optional[pulumi.Input[int]] = None,
backups: Optional[pulumi.Input[str]] = None,
backups_schedule: Optional[pulumi.Input[pulumi.InputType['InstanceBackupsScheduleArgs']]] = None,
date_created: Optional[pulumi.Input[str]] = None,
ddos_protection: Optional[pulumi.Input[bool]] = None,
default_password: Optional[pulumi.Input[str]] = None,
disk: Optional[pulumi.Input[int]] = None,
enable_ipv6: Optional[pulumi.Input[bool]] = None,
enable_private_network: Optional[pulumi.Input[bool]] = None,
features: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
gateway_v4: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
internal_ip: Optional[pulumi.Input[str]] = None,
iso_id: Optional[pulumi.Input[str]] = None,
kvm: Optional[pulumi.Input[str]] = None,
label: Optional[pulumi.Input[str]] = None,
main_ip: Optional[pulumi.Input[str]] = None,
netmask_v4: Optional[pulumi.Input[str]] = None,
os: Optional[pulumi.Input[str]] = None,
os_id: Optional[pulumi.Input[int]] = None,
plan: Optional[pulumi.Input[str]] = None,
power_status: Optional[pulumi.Input[str]] = None,
private_network_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ram: Optional[pulumi.Input[int]] = None,
region: Optional[pulumi.Input[str]] = None,
reserved_ip_id: Optional[pulumi.Input[str]] = None,
script_id: Optional[pulumi.Input[str]] = None,
server_status: Optional[pulumi.Input[str]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
ssh_key_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
status: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
user_data: Optional[pulumi.Input[str]] = None,
v6_main_ip: Optional[pulumi.Input[str]] = None,
v6_network: Optional[pulumi.Input[str]] = None,
v6_network_size: Optional[pulumi.Input[int]] = None,
vcpu_count: Optional[pulumi.Input[int]] = None) -> 'Instance':
"""
Get an existing Instance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] activation_email: Whether an activation email will be sent when the server is ready.
:param pulumi.Input[int] allowed_bandwidth: The server's allowed bandwidth usage in GB.
:param pulumi.Input[int] app_id: The ID of the Vultr application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications)
:param pulumi.Input[str] backups: Whether automatic backups will be enabled for this server (these have an extra charge associated with them). Values can be enabled or disabled.
:param pulumi.Input[pulumi.InputType['InstanceBackupsScheduleArgs']] backups_schedule: A block that defines the way backups should be scheduled. While this is an optional field if `backups` are `enabled` this field is mandatory. The configuration of a `backups_schedule` is listed below.
:param pulumi.Input[str] date_created: The date the server was added to your Vultr account.
:param pulumi.Input[bool] ddos_protection: Whether DDOS protection will be enabled on the server (there is an additional charge for this).
:param pulumi.Input[str] default_password: The server's default password.
:param pulumi.Input[int] disk: The description of the disk(s) on the server.
:param pulumi.Input[bool] enable_ipv6: Whether the server has IPv6 networking activated.
:param pulumi.Input[bool] enable_private_network: Whether the server has private networking support enabled.
:param pulumi.Input[Sequence[pulumi.Input[str]]] features: Array of which features are enabled.
:param pulumi.Input[str] firewall_group_id: The ID of the firewall group to assign to the server.
:param pulumi.Input[str] gateway_v4: The server's IPv4 gateway.
:param pulumi.Input[str] hostname: The hostname to assign to the server.
:param pulumi.Input[str] image_id: The ID of the Vultr marketplace application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications) Note marketplace applications are denoted by type: `marketplace` and you must use the `image_id` not the id.
:param pulumi.Input[str] internal_ip: The server's internal IP address.
:param pulumi.Input[str] iso_id: The ID of the ISO file to be installed on the server. [See List ISO](https://www.vultr.com/api/#operation/list-isos)
:param pulumi.Input[str] kvm: The server's current KVM URL. This URL will change periodically. It is not advised to cache this value.
:param pulumi.Input[str] label: A label for the server.
:param pulumi.Input[str] main_ip: The server's main IP address.
:param pulumi.Input[str] netmask_v4: The server's IPv4 netmask.
:param pulumi.Input[str] os: The string description of the operating system installed on the server.
:param pulumi.Input[int] os_id: The ID of the operating system to be installed on the server. [See List OS](https://www.vultr.com/api/#operation/list-os)
:param pulumi.Input[str] plan: The ID of the plan that you want the instance to subscribe to. [See List Plans](https://www.vultr.com/api/#tag/plans)
:param pulumi.Input[str] power_status: Whether the server is powered on or not.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_network_ids: A list of private network IDs to be attached to the server.
:param pulumi.Input[int] ram: The amount of memory available on the server in MB.
:param pulumi.Input[str] region: The ID of the region that the instance is to be created in. [See List Regions](https://www.vultr.com/api/#operation/list-regions)
:param pulumi.Input[str] reserved_ip_id: ID of the floating IP to use as the main IP of this server.
:param pulumi.Input[str] script_id: The ID of the startup script you want added to the server.
:param pulumi.Input[str] server_status: A more detailed server status (none, locked, installingbooting, isomounting, ok).
:param pulumi.Input[str] snapshot_id: The ID of the Vultr snapshot that the server will restore for the initial installation. [See List Snapshots](https://www.vultr.com/api/#operation/list-snapshots)
:param pulumi.Input[Sequence[pulumi.Input[str]]] ssh_key_ids: A list of SSH key IDs to apply to the server on install (only valid for Linux/FreeBSD).
:param pulumi.Input[str] status: The status of the server's subscription.
:param pulumi.Input[str] tag: The tag to assign to the server.
:param pulumi.Input[str] user_data: Generic data store, which some provisioning tools and cloud operating systems use as a configuration file. It is generally consumed only once after an instance has been launched, but individual needs may vary.
:param pulumi.Input[str] v6_main_ip: The main IPv6 network address.
:param pulumi.Input[str] v6_network: The IPv6 subnet.
:param pulumi.Input[int] v6_network_size: The IPv6 network size in bits.
:param pulumi.Input[int] vcpu_count: The number of virtual CPUs available on the server.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _InstanceState.__new__(_InstanceState)
__props__.__dict__["activation_email"] = activation_email
__props__.__dict__["allowed_bandwidth"] = allowed_bandwidth
__props__.__dict__["app_id"] = app_id
__props__.__dict__["backups"] = backups
__props__.__dict__["backups_schedule"] = backups_schedule
__props__.__dict__["date_created"] = date_created
__props__.__dict__["ddos_protection"] = ddos_protection
__props__.__dict__["default_password"] = default_password
__props__.__dict__["disk"] = disk
__props__.__dict__["enable_ipv6"] = enable_ipv6
__props__.__dict__["enable_private_network"] = enable_private_network
__props__.__dict__["features"] = features
__props__.__dict__["firewall_group_id"] = firewall_group_id
__props__.__dict__["gateway_v4"] = gateway_v4
__props__.__dict__["hostname"] = hostname
__props__.__dict__["image_id"] = image_id
__props__.__dict__["internal_ip"] = internal_ip
__props__.__dict__["iso_id"] = iso_id
__props__.__dict__["kvm"] = kvm
__props__.__dict__["label"] = label
__props__.__dict__["main_ip"] = main_ip
__props__.__dict__["netmask_v4"] = netmask_v4
__props__.__dict__["os"] = os
__props__.__dict__["os_id"] = os_id
__props__.__dict__["plan"] = plan
__props__.__dict__["power_status"] = power_status
__props__.__dict__["private_network_ids"] = private_network_ids
__props__.__dict__["ram"] = ram
__props__.__dict__["region"] = region
__props__.__dict__["reserved_ip_id"] = reserved_ip_id
__props__.__dict__["script_id"] = script_id
__props__.__dict__["server_status"] = server_status
__props__.__dict__["snapshot_id"] = snapshot_id
__props__.__dict__["ssh_key_ids"] = ssh_key_ids
__props__.__dict__["status"] = status
__props__.__dict__["tag"] = tag
__props__.__dict__["user_data"] = user_data
__props__.__dict__["v6_main_ip"] = v6_main_ip
__props__.__dict__["v6_network"] = v6_network
__props__.__dict__["v6_network_size"] = v6_network_size
__props__.__dict__["vcpu_count"] = vcpu_count
return Instance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activationEmail")
def activation_email(self) -> pulumi.Output[Optional[bool]]:
"""
Whether an activation email will be sent when the server is ready.
"""
return pulumi.get(self, "activation_email")
@property
@pulumi.getter(name="allowedBandwidth")
def allowed_bandwidth(self) -> pulumi.Output[int]:
"""
The server's allowed bandwidth usage in GB.
"""
return pulumi.get(self, "allowed_bandwidth")
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Output[int]:
"""
The ID of the Vultr application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications)
"""
return pulumi.get(self, "app_id")
@property
@pulumi.getter
def backups(self) -> pulumi.Output[Optional[str]]:
"""
Whether automatic backups will be enabled for this server (these have an extra charge associated with them). Values can be enabled or disabled.
"""
return pulumi.get(self, "backups")
@property
@pulumi.getter(name="backupsSchedule")
def backups_schedule(self) -> pulumi.Output[Optional['outputs.InstanceBackupsSchedule']]:
"""
A block that defines the way backups should be scheduled. While this is an optional field if `backups` are `enabled` this field is mandatory. The configuration of a `backups_schedule` is listed below.
"""
return pulumi.get(self, "backups_schedule")
@property
@pulumi.getter(name="dateCreated")
def date_created(self) -> pulumi.Output[str]:
"""
The date the server was added to your Vultr account.
"""
return pulumi.get(self, "date_created")
@property
@pulumi.getter(name="ddosProtection")
def ddos_protection(self) -> pulumi.Output[Optional[bool]]:
"""
Whether DDOS protection will be enabled on the server (there is an additional charge for this).
"""
return pulumi.get(self, "ddos_protection")
@property
@pulumi.getter(name="defaultPassword")
def default_password(self) -> pulumi.Output[str]:
"""
The server's default password.
"""
return pulumi.get(self, "default_password")
@property
@pulumi.getter
def disk(self) -> pulumi.Output[int]:
"""
The description of the disk(s) on the server.
"""
return pulumi.get(self, "disk")
@property
@pulumi.getter(name="enableIpv6")
def enable_ipv6(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the server has IPv6 networking activated.
"""
return pulumi.get(self, "enable_ipv6")
@property
@pulumi.getter(name="enablePrivateNetwork")
def enable_private_network(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the server has private networking support enabled.
"""
return pulumi.get(self, "enable_private_network")
@property
@pulumi.getter
def features(self) -> pulumi.Output[Sequence[str]]:
"""
Array of which features are enabled.
"""
return pulumi.get(self, "features")
@property
@pulumi.getter(name="firewallGroupId")
def firewall_group_id(self) -> pulumi.Output[str]:
"""
The ID of the firewall group to assign to the server.
"""
return pulumi.get(self, "firewall_group_id")
@property
@pulumi.getter(name="gatewayV4")
def gateway_v4(self) -> pulumi.Output[str]:
"""
The server's IPv4 gateway.
"""
return pulumi.get(self, "gateway_v4")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[str]:
"""
The hostname to assign to the server.
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter(name="imageId")
def image_id(self) -> pulumi.Output[str]:
"""
The ID of the Vultr marketplace application to be installed on the server. [See List Applications](https://www.vultr.com/api/#operation/list-applications) Note marketplace applications are denoted by type: `marketplace` and you must use the `image_id` not the id.
"""
return pulumi.get(self, "image_id")
@property
@pulumi.getter(name="internalIp")
def internal_ip(self) -> pulumi.Output[str]:
"""
The server's internal IP address.
"""
return pulumi.get(self, "internal_ip")
@property
@pulumi.getter(name="isoId")
def iso_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the ISO file to be installed on the server. [See List ISO](https://www.vultr.com/api/#operation/list-isos)
"""
return pulumi.get(self, "iso_id")
@property
@pulumi.getter
def kvm(self) -> pulumi.Output[str]:
"""
The server's current KVM URL. This URL will change periodically. It is not advised to cache this value.
"""
return pulumi.get(self, "kvm")
@property
@pulumi.getter
def label(self) -> pulumi.Output[str]:
"""
A label for the server.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter(name="mainIp")
def main_ip(self) -> pulumi.Output[str]:
"""
The server's main IP address.
"""
return pulumi.get(self, "main_ip")
@property
@pulumi.getter(name="netmaskV4")
def netmask_v4(self) -> pulumi.Output[str]:
"""
The server's IPv4 netmask.
"""
return pulumi.get(self, "netmask_v4")
@property
@pulumi.getter
def os(self) -> pulumi.Output[str]:
"""
The string description of the operating system installed on the server.
"""
return pulumi.get(self, "os")
@property
@pulumi.getter(name="osId")
def os_id(self) -> pulumi.Output[int]:
"""
The ID of the operating system to be installed on the server. [See List OS](https://www.vultr.com/api/#operation/list-os)
"""
return pulumi.get(self, "os_id")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[str]:
"""
The ID of the plan that you want the instance to subscribe to. [See List Plans](https://www.vultr.com/api/#tag/plans)
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="powerStatus")
def power_status(self) -> pulumi.Output[str]:
"""
Whether the server is powered on or not.
"""
return pulumi.get(self, "power_status")
@property
@pulumi.getter(name="privateNetworkIds")
def private_network_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of private network IDs to be attached to the server.
"""
return pulumi.get(self, "private_network_ids")
@property
@pulumi.getter
def ram(self) -> pulumi.Output[int]:
"""
The amount of memory available on the server in MB.
"""
return pulumi.get(self, "ram")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The ID of the region that the instance is to be created in. [See List Regions](https://www.vultr.com/api/#operation/list-regions)
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="reservedIpId")
def reserved_ip_id(self) -> pulumi.Output[str]:
"""
ID of the floating IP to use as the main IP of this server.
"""
return pulumi.get(self, "reserved_ip_id")
@property
@pulumi.getter(name="scriptId")
def script_id(self) -> pulumi.Output[str]:
"""
The ID of the startup script you want added to the server.
"""
return pulumi.get(self, "script_id")
@property
@pulumi.getter(name="serverStatus")
def server_status(self) -> pulumi.Output[str]:
"""
A more detailed server status (none, locked, installingbooting, isomounting, ok).
"""
return pulumi.get(self, "server_status")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> pulumi.Output[str]:
"""
The ID of the Vultr snapshot that the server will restore for the initial installation. [See List Snapshots](https://www.vultr.com/api/#operation/list-snapshots)
"""
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter(name="sshKeyIds")
def ssh_key_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of SSH key IDs to apply to the server on install (only valid for Linux/FreeBSD).
"""
return pulumi.get(self, "ssh_key_ids")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the server's subscription.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tag(self) -> pulumi.Output[str]:
"""
The tag to assign to the server.
"""
return pulumi.get(self, "tag")
@property
@pulumi.getter(name="userData")
def user_data(self) -> pulumi.Output[str]:
"""
Generic data store, which some provisioning tools and cloud operating systems use as a configuration file. It is generally consumed only once after an instance has been launched, but individual needs may vary.
"""
return pulumi.get(self, "user_data")
@property
@pulumi.getter(name="v6MainIp")
def v6_main_ip(self) -> pulumi.Output[str]:
"""
The main IPv6 network address.
"""
return pulumi.get(self, "v6_main_ip")
@property
@pulumi.getter(name="v6Network")
def v6_network(self) -> pulumi.Output[str]:
"""
The IPv6 subnet.
"""
return pulumi.get(self, "v6_network")
@property
@pulumi.getter(name="v6NetworkSize")
def v6_network_size(self) -> pulumi.Output[int]:
"""
The IPv6 network size in bits.
"""
return pulumi.get(self, "v6_network_size")
@property
@pulumi.getter(name="vcpuCount")
def vcpu_count(self) -> pulumi.Output[int]:
"""
The number of virtual CPUs available on the server.
"""
return pulumi.get(self, "vcpu_count")
|
import re
import traceback
import urllib
import requests
from bs4 import BeautifulSoup as Soup
import scrapy
from scrapy import Request
from twisted.internet.error import TimeoutError, TCPTimedOutError, ConnectionRefusedError
from spider.items import DoubanItem, DoubanDetailsItem
from spider.settings import DOUBAN_FAST, DOUBAN_PROXY
from spider.spiders import DOUBAN_COOKIE
from spider_utils.constants import Constants
from spider_utils.douban_database import douban_db
from spider_utils.proxies import get_proxy
from spider_utils.strings import my_strip
class DoubanDetailsSpider(scrapy.Spider):
name = 'douban_details'
allowed_domains = ['douban.com']
target_page_url = 'https://book.douban.com/subject/{douban_id}/'
def start_requests(self):
# 获取所有的 douban_id
douban_ids = douban_db.get_douban_ids()
douban_id_cnt = douban_db.get_douban_id_cnt()
douban_details_finished: set = douban_db.get_douban_details_finished()
if douban_id_cnt < 0:
douban_id_cnt = douban_ids[0]
douban_db.set_douban_id_cnt(douban_id_cnt)
try:
douban_id_index = douban_ids.index(douban_id_cnt)
except ValueError:
self.logger.warning(f"Cannot find {douban_id_cnt} in douban_ids")
douban_id_index = 0
# 开爬,轮流来
for i in range(douban_id_index, len(douban_ids)):
douban_id_cnt = douban_ids[i]
if douban_id_cnt in douban_details_finished:
self.logger.debug(f"skip {douban_id_cnt}")
continue
self.logger.info(f"fetching {douban_id_cnt}")
if DOUBAN_PROXY:
proxy = get_proxy()
yield Request(self.target_page_url.format(douban_id=douban_id_cnt),
meta={"proxy": f"http{'s' if proxy.get('https', False) else ''}://{proxy.get('proxy')}"},
cookies=DOUBAN_COOKIE,
callback=self.parse, errback=self.handle_errors)
else:
yield Request(self.target_page_url.format(douban_id=douban_id_cnt),
cookies=DOUBAN_COOKIE,
callback=self.parse, errback=self.handle_errors)
# self.logger.info(f"finishing {douban_id_cnt}")
douban_details_finished.add(douban_id_cnt)
self.logger.info(f"DONE!")
def handle_errors(self, failure):
request = failure.request
if failure.check(TCPTimedOutError, TimeoutError, ConnectionRefusedError):
self.logger.error(f"{failure}, {dir(request)}")
def parse(self, response) -> list:
url = response.url
self.logger.debug(f"Start parsing {url}")
url_info = urllib.parse.urlparse(url)
douban_id = int(url_info.path.split('/')[2])
html = response.body
if not isinstance(html, str):
html = html.decode(errors='ignore')
if len(html) == 0:
self.logger.debug(f"got empty page {douban_id}")
yield None
return
if '异常请求' in html:
self.logger.debug(f"请求异常 {douban_id}")
yield None
return
if '页面不存在' in html:
self.logger.debug(f"页面不存在 {douban_id}")
yield None
return
soup = Soup(response.body, 'html.parser')
self.logger.info(f"title: {soup.title}")
book_info = soup.find("div", id='info')
if book_info is None:
self.logger.debug(f"No #info {douban_id}")
yield None
return
convert_map = {
'作者': 'author',
'出版社': 'publisher',
'出版年': 'publish_time',
'页数': 'page_count',
'定价': 'pricing',
'装帧': 'binding',
'丛书': 'series',
'ISBN': 'ISBN',
'标题': 'title',
'副标题': 'subtitle',
'译者': 'translator',
'出品方': 'producer'
}
info_lines = book_info.get_text().strip().replace(" ", "").replace(" ", "").replace("\n\n", "\n").splitlines()
# print(info_lines)
for i in range(len(info_lines)):
if i == 0:
continue
if ':' not in info_lines[i]:
p = i - 1
while p >= 0 and (info_lines[p] is None or ':' not in info_lines[p]):
p -= 1
if p != 0:
info_lines[p] += info_lines[i]
info_lines[i] = None
info_lines = [line for line in info_lines if isinstance(line, str) and ':' in line]
info_raw = {line.split(':')[0]: line.split(':')[-1] for line in info_lines}
info = {convert_map.get(key, key): info_raw[key] for key in info_raw}
details = info
span_content = soup.find('span', text='内容简介')
if span_content is not None:
next_sibling = span_content.parent.next_sibling
while next_sibling == '\n':
next_sibling = next_sibling.next_sibling
# print(next_sibling)
if next_sibling.find('a', text="(展开全部)") is not None:
target = next_sibling.find("span", attrs={"class": "all"})
else:
target = next_sibling.find("div", attrs={"class": "intro"})
# print(target)
if target is not None:
content = target.get_text().replace("(展开全部)", "").strip()
# print(content)
details['description'] = content
span_author = soup.find('span', text='作者简介')
if span_author is not None:
next_sibling = span_author.parent.next_sibling
while next_sibling == '\n':
next_sibling = next_sibling.next_sibling
# print(next_sibling)
if next_sibling.find('a', text="(展开全部)") is not None:
target = next_sibling.find("span", attrs={"class": "all"})
else:
target = next_sibling.find("div", attrs={"class": "intro"})
# print(target)
if target is not None:
content = target.get_text().replace("(展开全部)", "").strip()
# print(content)
details['description_author'] = content
span_tags = soup.find('div', id='db-tags-section')
if span_tags is not None:
target_list = span_tags.find("div", attrs={"class": "indent"})
if target_list is not None:
tags_list = []
for target in target_list:
try:
tags_list.append(str(target.get_text()).strip())
except AttributeError:
pass
details['tags'] = tags_list
div_comments = soup.find('div', id='comment-list-wrapper')
comments_got_cid = set({})
if div_comments is not None:
comments_items = div_comments.find_all('li', class_='comment-item')
details['comments'] = []
for comment_item in comments_items:
comment = {}
comment_cid = int(comment_item.attrs.get('data-cid', 0))
if comment_cid in comments_got_cid:
continue
comment['cid'] = comment_cid
comment_content = comment_item.find("p", class_="comment-content")
if comment_content is not None:
comment['content'] = comment_content.get_text().strip().replace("\n", "")
vote_count = comment_item.find("span", class_="vote-count")
if vote_count is not None:
try:
comment['vote_count'] = int(vote_count.get_text().strip())
except ValueError:
pass
comment_info_span = comment_item.find("span", class_="comment-info")
if comment_info_span is not None:
comment_info = {}
comment_username = comment_info_span.find("a")
if comment_username is not None:
comment_info['username'] = comment_username.get_text().strip()
comment_time = comment_info_span.find("span", attrs={"class": "comment-time"})
if comment_time is not None:
comment_info['time'] = comment_time.get_text().strip()
comment_rating = comment_info_span.find("span", attrs={"class": "rating"})
if comment_rating is not None:
try:
comment_info['rating'] = int(
[cl for cl in comment_rating.attrs.get('class', "") if 'allstar' in cl][0].replace(
'allstar',
'')) / 50
except ValueError:
pass
comment['info'] = comment_info
details['comments'].append(comment)
# print(json.dumps(details, indent=2, sort_keys=True).encode('utf-8').decode('unicode_escape'))
details_item = DoubanDetailsItem()
for key in details:
try:
details_item[key] = details[key]
except KeyError:
if details_item.get('extras', None) is None:
details_item['extras'] = {}
details_item['extras'][key] = details[key]
yield details_item
douban_db.set_details_finish(douban_id, finished=True)
return None
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest helpers."""
from __future__ import absolute_import, print_function
import os
import shutil
import tempfile
def make_fake_template(content=""):
"""Create fake template for testing.
:param content: File content.
:returns: The temprorary directory.
"""
temp_dir = tempfile.mkdtemp()
invenio_theme_dir = os.path.join(temp_dir, 'invenio_theme')
os.mkdir(invenio_theme_dir)
fake_file = open(os.path.join(invenio_theme_dir, 'fake.html'), 'w+')
fake_file.write(content)
fake_file.close()
return temp_dir
|
expected_output = {
'application': 'ERROR MESSAGE',
'error_message': {
'11/11/2019 03:46:31': ['IOSXE-2-DIAGNOSTICS_PASSED : Diagnostics Thermal passed'],
'11/11/2019 03:46:41': ['IOSXE-2-DIAGNOSTICS_PASSED : Diagnostics Fantray passed'],
'11/11/2019 03:45:41': ['IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/1', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/4', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/5', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/10', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/11', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/12', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Fou1/0/15', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Fou1/0/16', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Fou1/0/17', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Fou1/0/18', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Fou1/0/19', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Fou1/0/20', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Fou1/0/21', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Fou1/0/22', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/23', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/24', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/25', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/26', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/28', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/29', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/32', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/33', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/35', 'IOSXE-2-TRANSCEIVER_INSERTED : Transceiver module inserted in Hu1/0/36']
}
}
|
from .abc import (
EventRepository,
)
from .memory import (
InMemoryEventRepository,
)
from .pg import (
PostgreSqlEventRepository,
)
|
#!/bin/env python
import pytest
import re
import A_re as A
def test_pattern():
assert re.search(A.P_URL, 'http://www.npr.org')
assert not re.search(A.P_URL, 'Mr.DillonNiederhut')
def test_function():
with open('../../data/03_text.md', 'r') as f:
d = f.read()
r = A.get_urls(d)
assert len(r) == 28
assert r[0] == 'https://github.com'
|
#!/usr/bin/env python
total = sum([n**n for n in range(1,1001)])
print str(total)[-10::]
|
import sys
from regenerate_configs import DELAY_BY_PITCH, SOUND_BY_PITCHES, REPS_PER_SECOND
IN_FILE = './in.txt'
OUT_FILE = './out.txt'
SHIFT_DOWN = {
'F#2': 'F2',
'G2': 'F#2',
'Ab2': 'G2',
'A2': 'Ab2',
'Bb2': 'A2',
'B2': 'Bb2',
'C3': 'B2',
'C#3': 'C3',
'D3': 'C#3',
'Eb3': 'D3',
'E3': 'Eb3',
'F3': 'E3',
'F#3': 'F3',
'G3': 'F#3',
'Ab3': 'G3',
'A3': 'Ab3',
'Bb3': 'A3',
'B3': 'Bb3',
'C4': 'B3',
'C#4': 'C4',
'D4': 'C#4',
'Eb4': 'D4',
'E4': 'Eb4',
'F4': 'E4',
'F#4': 'F4',
'res': 'res',
}
def compile_note(note, length_secs):
if note == 'res':
return 'sleep %s ' % (length_secs)
sound, voice = SOUND_BY_PITCHES[note]
reps = int(round(REPS_PER_SECOND[note] * length_secs))
return 'say %s --voice=%s --rate=720 ' % (sound*reps, voice)
def compile_line(line, bpm, delay):
delay_cmd = ['sleep %s' % (delay)] if delay > 0 else []
return '( ' + ' && '.join(delay_cmd + [compile_note(SHIFT_DOWN[note], SECS_PER_MIN*length / bpm) for note, length in line]) + ' )'
def compile_sync_unit(sync_unit, bpm):
wait_cmd = ['wait'] if len(sync_unit) > 1 else []
nlines = len(sync_unit)
delays = [(DELAY_BY_PITCH[SHIFT_DOWN[line[0][0]]] if line[0][0] != 'res' else 0 ) for line in sync_unit ]
max_delay = max(delays)
return '( ' + ' & '.join([compile_line(sync_unit[i], bpm, max_delay - delays[i]) for i in range(nlines)] + wait_cmd) + ' )'
def compile_song(song, bpm):
return ' &&\n'.join(compile_sync_unit(sync_unit, bpm) for sync_unit in song)
SECS_PER_MIN = 60.
def get_commands_for_song(notes_and_lengths, bpm):
return [
get_command_for_note(note, SECS_PER_MIN * length / bpm)
for note, length in notes_and_lengths
]
def concat_commands(commands):
ans = commands[0]
for command in commands[1:]:
ans += "&&\n" + command
return ans + "\n"
def parse_into_sync_units(lines):
sync_units = []
def _parse_line(line):
tokens = line.split(' ')
assert len(tokens)%2 == 0, line
parsed = []
while tokens:
note, length = tokens[:2]
tokens = tokens[2:]
parsed.append((note, float(length)))
return parsed
for line in lines:
if line == '-----':
sync_units.append([])
else:
sync_units[-1].append(_parse_line(line))
return sync_units
def parse_input(filename):
lines = [line.rstrip('\n\r') for line in open(filename)]
def _parse_line(line):
note, length = line.split(' ')
return (note, float(length))
tempo = float(lines.pop(0))
return (parse_into_sync_units(lines), tempo)
# return ([_parse_line(line) for line in lines], tempo)
if __name__ == '__main__':
in_filename = sys.argv[1] if len(sys.argv)>1 else IN_FILE
out_filename = sys.argv[2] if len(sys.argv)>2 else OUT_FILE
song, tempo = parse_input(in_filename)
command = compile_song(song, tempo)
with open(out_filename, 'w') as f:
f.writelines(command)
|
import subprocess
from typing import List # noqa: F401
from libqtile import bar, layout, widget, hook
from libqtile.config import Click, Drag, Group, Key, Match, Screen
from libqtile.lazy import lazy
from libqtile.utils import guess_terminal
@hook.subscribe.startup_once
def autostart():
subprocess.Popen('/home/steven/.config/qtile/autostart.sh')
mod = "mod4"
group_names = [
"Main",
"Terminal",
"Web",
"Files",
"Side 1",
"Side 2",
"Video",
"Comms",
"Music",
"Background",
]
groups = [Group(name) for name in group_names]
keys = [
# Changing Focus on Windows
Key([mod], "h", lazy.layout.left()),
Key([mod], "l", lazy.layout.right()),
Key([mod], "j", lazy.layout.down()),
Key([mod], "k", lazy.layout.up()),
# Moving Windows Around
Key([mod, "shift"], "h", lazy.layout.shuffle_left()),
Key([mod, "shift"], "l", lazy.layout.shuffle_right()),
Key([mod, "shift"], "j", lazy.layout.shuffle_down()),
Key([mod, "shift"], "k", lazy.layout.shuffle_up()),
# Resizing
Key([mod, "control"], "h", lazy.layout.grow_left()),
Key([mod, "control"], "l", lazy.layout.grow_right()),
Key([mod, "control"], "j", lazy.layout.grow_down()),
Key([mod, "control"], "k", lazy.layout.grow_up()),
Key([mod, "control"], "n", lazy.layout.normalize()),
# Layouts
Key([mod], "Tab", lazy.layout.next()),
Key([mod], "space", lazy.next_layout()),
Key([mod, "shift"], "space", lazy.window.toggle_floating()),
Key([mod], "f", lazy.window.toggle_fullscreen()),
Key([mod, "shift"], "q", lazy.window.kill()),
Key([mod, "shift"], "r", lazy.restart()),
Key([mod, "shift"], "e", lazy.shutdown()),
# Changing Group
Key([mod], "1", lazy.group[group_names[0]].toscreen()),
Key([mod], "2", lazy.group[group_names[1]].toscreen()),
Key([mod], "3", lazy.group[group_names[2]].toscreen()),
Key([mod], "4", lazy.group[group_names[3]].toscreen()),
Key([mod], "5", lazy.group[group_names[4]].toscreen()),
Key([mod], "6", lazy.group[group_names[5]].toscreen()),
Key([mod], "7", lazy.group[group_names[6]].toscreen()),
Key([mod], "8", lazy.group[group_names[7]].toscreen()),
Key([mod], "9", lazy.group[group_names[8]].toscreen()),
Key([mod], "0", lazy.group[group_names[9]].toscreen()),
# Moving window to Group
Key([mod, "shift"], "1", lazy.window.togroup(group_names[0], switch_group=False)),
Key([mod, "shift"], "2", lazy.window.togroup(group_names[1], switch_group=False)),
Key([mod, "shift"], "3", lazy.window.togroup(group_names[2], switch_group=False)),
Key([mod, "shift"], "4", lazy.window.togroup(group_names[3], switch_group=False)),
Key([mod, "shift"], "5", lazy.window.togroup(group_names[4], switch_group=False)),
Key([mod, "shift"], "6", lazy.window.togroup(group_names[5], switch_group=False)),
Key([mod, "shift"], "7", lazy.window.togroup(group_names[6], switch_group=False)),
Key([mod, "shift"], "8", lazy.window.togroup(group_names[7], switch_group=False)),
Key([mod, "shift"], "9", lazy.window.togroup(group_names[8], switch_group=False)),
Key([mod, "shift"], "0", lazy.window.togroup(group_names[9], switch_group=False)),
# Launch Programs
Key([mod], "w", lazy.spawn("firefox")),
Key([mod], "Return", lazy.spawn("xfce4-terminal -e fish")),
Key([mod, "shift"], "Return", lazy.spawn("xfce4-terminal")),
Key([mod], "d", lazy.spawn("rofi -show drun -modi drun")),
Key([mod, "shift"], "d", lazy.spawn("rofi -show run -modi run")),
Key([mod], "e", lazy.spawn("nautilus")),
]
bg_color = "#282C34"
fg_color = "#ABB2BF"
accent_color = "#61AFEF"
accent_color2 = "#C678DD"
accent_color3 = "#56B6C2"
good_color = "#98C379"
warn_color = "#E5C07B"
error_color = "#E06C75"
layouts = [
layout.MonadTall(
border_focus=good_color,
border_normal=fg_color,
margin=9,
border_width=3
),
layout.Max(),
]
widget_defaults = dict(
font='font-awesome',
fontsize=12,
padding=3,
)
extension_defaults = widget_defaults.copy()
screens = [
Screen(
bottom=bar.Bar(
[
widget.CurrentLayout(foreground=good_color),
widget.GroupBox(
borderwidth=2,
active=accent_color,
inactive=fg_color,
foreground=accent_color,
this_screen_border=accent_color,
this_current_screen_border=accent_color,
warn_color=warn_color,
urgent_border=error_color,
),
widget.WindowName(
foreground=accent_color2,
),
widget.Chord(
chords_colors={'launch': (accent_color2,bg_color)},
name_transform=lambda name: name.upper(),
),
widget.Clock(
format='%Y-%m-%d %a %I:%M %p',
foreground=fg_color,
),
widget.Sep(foreground=fg_color, padding=5),
#widget.Bluetooth(),
widget.Systray(),
#widget.BatteryIcon(),
widget.QuickExit(foreground=warn_color),
],
24,
background=bg_color,
),
),
]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(), start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(), start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front()),
]
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
*layout.Floating.default_float_rules,
Match(wm_class='confirmreset'), # gitk
Match(wm_class='makebranch'), # gitk
Match(wm_class='maketag'), # gitk
Match(wm_class='ssh-askpass'), # ssh-askpass
Match(title='branchdialog'), # gitk
Match(title='pinentry'), # GPG key password entry
])
auto_fullscreen = True
focus_on_window_activation = "smart"
reconfigure_screens = True
auto_minimize = True
wmname = "LG3D"
|
# coding: utf-8
import numpy as np
import pandas as pd
from PredictBase import PredictBase
from keras.callbacks import EarlyStopping
class TestValidate(PredictBase):
def __init__(self):
super().__init__()
self.draw_graph = False
def set_draw_graph(self, v):
self.draw_graph = v
def test_predict(self, stock_data_files, target_stock):
adj_starts, high, low, adj_ends, ommyo_rate = self.load_data(
stock_data_files)
_y_data = self.pct_change(adj_starts[target_stock])
# y_data = pct_change(adj_ends[stock_data_files.index(target_stock)])
# y_data = ommyo_rate[stock_data_files.index(target_stock)]
y_data = pd.cut(_y_data, self.category_threshold, labels=False).values
# 学習データを生成
X, Y = self.create_train_data(
adj_starts, high, low, adj_ends, ommyo_rate, y_data, self.training_days)
# データを学習用と検証用に分割
split_pos = int(len(X) * 0.9)
train_x = X[:split_pos]
train_y = Y[:split_pos]
test_x = X[split_pos:]
test_y = Y[split_pos:]
# LSTM モデルを作成
dimension = len(X[0][0])
model = self.create_model(dimension)
es = EarlyStopping(patience=10, verbose=1)
history = model.fit(train_x, train_y, batch_size=self.batch_size,
epochs=self.epochs, verbose=1, validation_split=0.1, callbacks=[es])
# 学習の履歴
self.print_train_history(history)
if self.draw_graph:
self.draw_train_history(history)
# 検証
preds = model.predict(test_x)
self.__print_predict_result(preds, test_y)
def __print_predict_result(self, preds, test_y):
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(len(preds)):
predict = np.argmax(preds[i])
test = np.argmax(test_y[i])
positive = True if predict == 2 or predict == 3 else False
true = True if test == 2 or test == 3 else False
if true and positive:
tp += 1
if not true and positive:
fp += 1
if true and not positive:
tn += 1
if not true and not positive:
fn += 1
print("TP = %d, FP = %d, TN = %d, FN = %d" % (tp, fp, tn, fn))
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f_value = 2 * recall * precision / (recall + precision)
print("Precision = %f, Recall = %f, F = %f" %
(precision, recall, f_value))
|
# -*- coding: utf-8 -*-
import colander
import pytest
from mock import Mock
from pyramid.exceptions import BadCSRFToken
from itsdangerous import BadData, SignatureExpired
from h.accounts import schemas
from h.services.user import UserNotActivated, UserService
from h.services.user_password import UserPasswordService
from h.schemas import ValidationError
class TestUnblacklistedUsername(object):
def test(self, dummy_node):
blacklist = set(['admin', 'root', 'postmaster'])
# Should not raise for valid usernames
schemas.unblacklisted_username(dummy_node, "john", blacklist)
schemas.unblacklisted_username(dummy_node, "Abigail", blacklist)
# Should raise for usernames in blacklist
pytest.raises(colander.Invalid,
schemas.unblacklisted_username,
dummy_node,
"admin",
blacklist)
# Should raise for case variants of usernames in blacklist
pytest.raises(colander.Invalid,
schemas.unblacklisted_username,
dummy_node,
"PostMaster",
blacklist)
@pytest.mark.usefixtures('user_model')
class TestUniqueEmail(object):
def test_it_looks_up_user_by_email(self,
dummy_node,
pyramid_request,
user_model):
with pytest.raises(colander.Invalid):
schemas.unique_email(dummy_node, "foo@bar.com")
user_model.get_by_email.assert_called_with(pyramid_request.db,
"foo@bar.com",
pyramid_request.authority)
def test_it_is_invalid_when_user_exists(self, dummy_node):
pytest.raises(colander.Invalid,
schemas.unique_email,
dummy_node,
"foo@bar.com")
def test_it_is_invalid_when_user_does_not_exist(self,
dummy_node,
user_model):
user_model.get_by_email.return_value = None
assert schemas.unique_email(dummy_node, "foo@bar.com") is None
def test_it_is_valid_when_authorized_users_email(self,
dummy_node,
pyramid_config,
user_model):
"""
If the given email is the authorized user's current email it's valid.
This is so that we don't get a "That email is already taken" validation
error when a user tries to change their email address to the same email
address that they already have it set to.
"""
pyramid_config.testing_securitypolicy('acct:elliot@hypothes.is')
user_model.get_by_email.return_value = Mock(
spec_set=('userid',),
userid='acct:elliot@hypothes.is')
schemas.unique_email(dummy_node, "elliot@bar.com")
@pytest.mark.usefixtures('user_model')
class TestRegisterSchema(object):
def test_it_is_invalid_when_password_too_short(self, pyramid_request):
schema = schemas.RegisterSchema().bind(request=pyramid_request)
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({"password": "a"})
assert exc.value.asdict()['password'] == (
"Must be 2 characters or more.")
def test_it_is_invalid_when_username_too_short(self,
pyramid_request,
user_model):
schema = schemas.RegisterSchema().bind(request=pyramid_request)
user_model.get_by_username.return_value = None
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({"username": "a"})
assert exc.value.asdict()['username'] == (
"Must be 3 characters or more.")
def test_it_is_invalid_when_username_too_long(self,
pyramid_request,
user_model):
schema = schemas.RegisterSchema().bind(request=pyramid_request)
user_model.get_by_username.return_value = None
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({"username": "a" * 500})
assert exc.value.asdict()['username'] == (
"Must be 30 characters or less.")
def test_it_is_invalid_with_invalid_characters_in_username(self,
pyramid_request,
user_model):
user_model.get_by_username.return_value = None
schema = schemas.RegisterSchema().bind(request=pyramid_request)
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({"username": "Fred Flintstone"})
assert exc.value.asdict()['username'] == ("Must have only letters, "
"numbers, periods, and "
"underscores.")
@pytest.mark.usefixtures('user_service', 'user_password_service')
class TestLoginSchema(object):
def test_passes_username_to_user_service(self,
factories,
pyramid_csrf_request,
user_service):
user = factories.User.build(username='jeannie')
user_service.fetch_for_login.return_value = user
schema = schemas.LoginSchema().bind(request=pyramid_csrf_request)
schema.deserialize({
'username': 'jeannie',
'password': 'cake',
})
user_service.fetch_for_login.assert_called_once_with(username_or_email='jeannie')
def test_passes_password_to_user_password_service(self,
factories,
pyramid_csrf_request,
user_service,
user_password_service):
user = factories.User.build(username='jeannie')
user_service.fetch_for_login.return_value = user
schema = schemas.LoginSchema().bind(request=pyramid_csrf_request)
schema.deserialize({
'username': 'jeannie',
'password': 'cake',
})
user_password_service.check_password.assert_called_once_with(user, 'cake')
def test_it_returns_user_when_valid(self,
factories,
pyramid_csrf_request,
user_service):
user = factories.User.build(username='jeannie')
user_service.fetch_for_login.return_value = user
schema = schemas.LoginSchema().bind(request=pyramid_csrf_request)
result = schema.deserialize({
'username': 'jeannie',
'password': 'cake',
})
assert result['user'] is user
def test_invalid_with_bad_csrf(self, pyramid_request, user_service):
schema = schemas.LoginSchema().bind(request=pyramid_request)
with pytest.raises(BadCSRFToken):
schema.deserialize({
'username': 'jeannie',
'password': 'cake',
})
def test_invalid_with_inactive_user(self,
pyramid_csrf_request,
user_service):
schema = schemas.LoginSchema().bind(request=pyramid_csrf_request)
user_service.fetch_for_login.side_effect = UserNotActivated()
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'username': 'jeannie',
'password': 'cake',
})
errors = exc.value.asdict()
assert 'username' in errors
assert 'activate your account' in errors['username']
def test_invalid_with_unknown_user(self,
pyramid_csrf_request,
user_service):
schema = schemas.LoginSchema().bind(request=pyramid_csrf_request)
user_service.fetch_for_login.return_value = None
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'username': 'jeannie',
'password': 'cake',
})
errors = exc.value.asdict()
assert 'username' in errors
assert 'does not exist' in errors['username']
def test_invalid_with_bad_password(self,
factories,
pyramid_csrf_request,
user_service,
user_password_service):
user = factories.User.build(username='jeannie')
user_service.fetch_for_login.return_value = user
user_password_service.check_password.return_value = False
schema = schemas.LoginSchema().bind(request=pyramid_csrf_request)
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'username': 'jeannie',
'password': 'cake',
})
errors = exc.value.asdict()
assert 'password' in errors
assert 'Wrong password' in errors['password']
@pytest.mark.usefixtures('user_model')
class TestForgotPasswordSchema(object):
def test_it_is_invalid_with_no_user(self,
pyramid_csrf_request,
user_model):
schema = schemas.ForgotPasswordSchema().bind(
request=pyramid_csrf_request)
user_model.get_by_email.return_value = None
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({'email': 'rapha@example.com'})
assert 'email' in exc.value.asdict()
assert exc.value.asdict()['email'] == 'Unknown email address.'
def test_it_returns_user_when_valid(self,
pyramid_csrf_request,
user_model):
schema = schemas.ForgotPasswordSchema().bind(
request=pyramid_csrf_request)
user = user_model.get_by_email.return_value
appstruct = schema.deserialize({'email': 'rapha@example.com'})
assert appstruct['user'] == user
@pytest.mark.usefixtures('user_model')
class TestResetPasswordSchema(object):
def test_it_is_invalid_with_password_too_short(self, pyramid_csrf_request):
schema = schemas.ResetPasswordSchema().bind(
request=pyramid_csrf_request)
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({"password": "a"})
assert "password" in exc.value.asdict()
def test_it_is_invalid_with_invalid_user_token(self, pyramid_csrf_request):
pyramid_csrf_request.registry.password_reset_serializer = (
self.FakeInvalidSerializer())
schema = schemas.ResetPasswordSchema().bind(
request=pyramid_csrf_request)
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'user': 'abc123',
'password': 'secret',
})
assert 'user' in exc.value.asdict()
assert 'Wrong reset code.' in exc.value.asdict()['user']
def test_it_is_invalid_with_expired_token(self, pyramid_csrf_request):
pyramid_csrf_request.registry.password_reset_serializer = (
self.FakeExpiredSerializer())
schema = schemas.ResetPasswordSchema().bind(
request=pyramid_csrf_request)
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'user': 'abc123',
'password': 'secret',
})
assert 'user' in exc.value.asdict()
assert 'Reset code has expired.' in exc.value.asdict()['user']
def test_it_is_invalid_if_user_has_already_reset_their_password(
self, pyramid_csrf_request, user_model):
pyramid_csrf_request.registry.password_reset_serializer = (
self.FakeSerializer())
schema = schemas.ResetPasswordSchema().bind(
request=pyramid_csrf_request)
user = user_model.get_by_username.return_value
user.password_updated = 2
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'user': 'abc123',
'password': 'secret',
})
assert 'user' in exc.value.asdict()
assert 'This reset code has already been used.' in exc.value.asdict()['user']
def test_it_returns_user_when_valid(self,
pyramid_csrf_request,
user_model):
pyramid_csrf_request.registry.password_reset_serializer = (
self.FakeSerializer())
schema = schemas.ResetPasswordSchema().bind(
request=pyramid_csrf_request)
user = user_model.get_by_username.return_value
user.password_updated = 0
appstruct = schema.deserialize({
'user': 'abc123',
'password': 'secret',
})
assert appstruct['user'] == user
class FakeSerializer(object):
def dumps(self, obj):
return 'faketoken'
def loads(self, token, max_age=0, return_timestamp=False):
payload = {'username': 'foo@bar.com'}
if return_timestamp:
return payload, 1
return payload
class FakeExpiredSerializer(FakeSerializer):
def loads(self, token, max_age=0, return_timestamp=False):
raise SignatureExpired("Token has expired")
class FakeInvalidSerializer(FakeSerializer):
def loads(self, token, max_age=0, return_timestamp=False):
raise BadData("Invalid token")
@pytest.mark.usefixtures('models', 'user_password_service')
class TestEmailChangeSchema(object):
def test_it_returns_the_new_email_when_valid(self, schema):
appstruct = schema.deserialize({
'email': 'foo@bar.com',
'password': 'flibble',
})
assert appstruct['email'] == 'foo@bar.com'
def test_it_is_valid_if_email_same_as_users_existing_email(self,
schema,
user,
models,
pyramid_config):
"""
It is valid if the new email is the same as the user's existing one.
Trying to change your email to what your email already is should not
return an error.
"""
models.User.get_by_email.return_value = Mock(spec_set=['userid'],
userid=user.userid)
pyramid_config.testing_securitypolicy(user.userid)
schema.deserialize({'email': user.email, 'password': 'flibble'})
def test_it_is_invalid_if_csrf_token_missing(self,
pyramid_request,
schema):
del pyramid_request.headers['X-CSRF-Token']
with pytest.raises(BadCSRFToken):
schema.deserialize({
'email': 'foo@bar.com',
'password': 'flibble',
})
def test_it_is_invalid_if_csrf_token_wrong(self, pyramid_request, schema):
pyramid_request.headers['X-CSRF-Token'] = 'WRONG'
with pytest.raises(BadCSRFToken):
schema.deserialize({
'email': 'foo@bar.com',
'password': 'flibble',
})
def test_it_is_invalid_if_password_wrong(self, schema, user_password_service):
user_password_service.check_password.return_value = False
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'email': 'foo@bar.com',
'password': 'WRONG'
})
assert exc.value.asdict() == {'password': 'Wrong password.'}
def test_it_returns_incorrect_password_error_if_password_too_short(
self, schema, user_password_service):
"""
The schema should be invalid if the password is too short.
Test that this returns a "that was not the right password" error rather
than a "that password is too short error" as it used to (the user is
entering their current password for authentication, they aren't
choosing a new password).
"""
user_password_service.check_password.return_value = False
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'email': 'foo@bar.com',
'password': 'a' # Too short to be a valid password.
})
assert exc.value.asdict() == {'password': 'Wrong password.'}
def test_it_is_invalid_if_email_too_long(self, schema):
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'email': 'a' * 100 + '@bar.com',
'password': 'flibble',
})
assert exc.value.asdict() == {
'email': 'Must be 100 characters or less.'}
def test_it_is_invalid_if_email_not_a_valid_email_address(self, schema):
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'email': 'this is not a valid email address',
'password': 'flibble',
})
assert exc.value.asdict() == {'email': 'Invalid email address.'}
def test_it_is_invalid_if_email_already_taken(self, models, schema):
models.User.get_by_email.return_value = Mock(spec_set=['userid'])
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({
'email': 'foo@bar.com',
'password': 'flibble',
})
assert exc.value.asdict() == {'email': 'Sorry, an account with this '
'email address already exists.'}
@pytest.fixture
def pyramid_request(self, pyramid_csrf_request, user):
pyramid_csrf_request.user = user
return pyramid_csrf_request
@pytest.fixture
def schema(self, pyramid_request):
return schemas.EmailChangeSchema().bind(request=pyramid_request)
@pytest.fixture
def user(self, factories):
return factories.User.build()
@pytest.fixture
def models(self, patch):
models = patch('h.accounts.schemas.models')
# By default there isn't already an account with the email address that
# we're trying to change to.
models.User.get_by_email.return_value = None
return models
@pytest.mark.usefixtures('user_password_service')
class TestPasswordChangeSchema(object):
def test_it_is_invalid_if_passwords_dont_match(self, pyramid_csrf_request):
user = Mock()
pyramid_csrf_request.user = user
schema = schemas.PasswordChangeSchema().bind(
request=pyramid_csrf_request)
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({'new_password': 'wibble',
'new_password_confirm': 'wibble!',
'password': 'flibble'})
assert 'new_password_confirm' in exc.value.asdict()
def test_it_is_invalid_if_current_password_is_wrong(self,
pyramid_csrf_request,
user_password_service):
user = Mock()
pyramid_csrf_request.user = user
schema = schemas.PasswordChangeSchema().bind(
request=pyramid_csrf_request)
user_password_service.check_password.return_value = False
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({'new_password': 'wibble',
'new_password_confirm': 'wibble',
'password': 'flibble'})
user_password_service.check_password.assert_called_once_with(user, 'flibble')
assert 'password' in exc.value.asdict()
class TestEditProfileSchema(object):
def test_accepts_valid_input(self, pyramid_csrf_request):
schema = schemas.EditProfileSchema().bind(request=pyramid_csrf_request)
appstruct = schema.deserialize({
'display_name': 'Michael Granitzer',
'description': 'Professor at University of Passau',
'link': 'http://mgrani.github.io/',
'location': 'Bavaria, Germany',
'orcid': '0000-0003-3566-5507',
})
def test_rejects_invalid_orcid(self, pyramid_csrf_request, validate_orcid):
validate_orcid.side_effect = ValueError('Invalid ORCID')
schema = schemas.EditProfileSchema().bind(request=pyramid_csrf_request)
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({'orcid': 'abcdef'})
assert exc.value.asdict()['orcid'] == 'Invalid ORCID'
def test_rejects_invalid_url(self, pyramid_csrf_request, validate_url):
validate_url.side_effect = ValueError('Invalid URL')
schema = schemas.EditProfileSchema().bind(request=pyramid_csrf_request)
with pytest.raises(colander.Invalid) as exc:
schema.deserialize({'link': '"invalid URL"'})
assert exc.value.asdict()['link'] == 'Invalid URL'
class TestCreateUserAPISchema(object):
def test_it_raises_when_authority_missing(self, schema, payload):
del payload['authority']
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_authority_not_a_string(self, schema, payload):
payload['authority'] = 34
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_username_missing(self, schema, payload):
del payload['username']
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_username_not_a_string(self, schema, payload):
payload['username'] = ['hello']
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_username_empty(self, schema, payload):
payload['username'] = ''
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_username_too_short(self, schema, payload):
payload['username'] = 'da'
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_username_too_long(self, schema, payload):
payload['username'] = 'dagrun-lets-make-this-username-really-long'
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_username_format_invalid(self, schema, payload):
payload['username'] = 'dagr!un'
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_email_missing(self, schema, payload):
del payload['email']
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_email_empty(self, schema, payload):
payload['email'] = ''
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_email_not_a_string(self, schema, payload):
payload['email'] = {'foo': 'bar'}
with pytest.raises(ValidationError):
schema.validate(payload)
def test_it_raises_when_email_format_invalid(self, schema, payload):
payload['email'] = 'not-an-email'
with pytest.raises(ValidationError):
schema.validate(payload)
@pytest.fixture
def payload(self):
return {
'authority': 'foobar.org',
'username': 'dagrun',
'email': 'dagrun@foobar.org',
}
@pytest.fixture
def schema(self):
return schemas.CreateUserAPISchema()
@pytest.fixture
def validate_url(patch):
return patch('h.accounts.schemas.util.validate_url')
@pytest.fixture
def validate_orcid(patch):
return patch('h.accounts.schemas.util.validate_orcid')
@pytest.fixture
def dummy_node(pyramid_request):
class DummyNode(object):
def __init__(self, request):
self.bindings = {
'request': request
}
return DummyNode(pyramid_request)
@pytest.fixture
def user_model(patch):
return patch('h.accounts.schemas.models.User')
@pytest.fixture
def user_service(db_session, pyramid_config):
service = Mock(spec_set=UserService(default_authority='example.com',
session=db_session))
service.fetch_for_login.return_value = None
pyramid_config.register_service(service, name='user')
return service
@pytest.fixture
def user_password_service(pyramid_config):
service = Mock(spec_set=UserPasswordService())
service.check_password.return_value = True
pyramid_config.register_service(service, name='user_password')
return service
|
import RPi.GPIO as GPIO
import time
power_channel = 24
def blink(times):
print("start blink:" + str(times))
GPIO.setmode(GPIO.BCM)
GPIO.setup(power_channel, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(power_channel, GPIO.LOW)
time.sleep(times)
GPIO.output(power_channel, GPIO.HIGH)
time.sleep(1)
GPIO.cleanup()
print("end blink:" + str(times))
|
# Copyright (c) 2013, Abhishek Balam and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns, data = [], []
columns = [
{
'label': 'Account',
'fieldname': 'account',
'fieldtype': 'Data',
'width': 200
},
{
'label': 'Balance (INR)',
'fieldname': 'balance',
'fieldtype': 'Currency',
'default': 0,
'width': 200
}
]
accounts = frappe.get_all('Account', filters={'is_group': 0})
accounts = ['Sales', 'Purchases']
fiscal_year = frappe.get_doc('Fiscal Year', filters['fiscal_year'])
start_date = str(fiscal_year.start_date)
end_date = str(fiscal_year.end_date)
all_balances = []
for name in accounts:
try:
balance = abs(frappe.db.sql("Select SUM(debit_amount) - SUM(credit_amount) FROM `tabGL Entry` WHERE posting_date >= '" + \
start_date + "' AND posting_date <= '" + end_date + "' GROUP BY account HAVING account='"+ name + \
"'")[0][0])
except:
balance = 0
data.append({
'account': name,
'balance': balance,
'indent': 1.0
})
all_balances.append(balance)
data.insert(0, {
'account': '<span style="font-weight:500">Income (Credit)</span>',
'balance': '',
'indent': 0.0
})
data.insert(2, {
'account': '<span style="font-weight:500">Expenses (Debit)</span>',
'balance': '',
'indent': 0.0
})
p_or_f = all_balances[0] - all_balances[1]
data.append({
'account': '<b style="color:green">Profit</b>' if p_or_f >=0 else '<b style="color:red">Loss</b>' ,
'balance': abs(p_or_f),
'indent': 0.0
})
return columns, data
|
from __future__ import division # confidence high
from pyraf import iraf
from pyraf.iraf import stsdas, hst_calib, stis
from stistools import wx2d as WX
version = "1.2 (2010 April 27)"
def _wx2d_iraf (input, output="", wavelengths="",
helcorr="perform",
# algorithm="wavelet",
trace="", order=7, subdiv=8,
psf_width=0., rows="",
subsampled="", convolved=""):
# Convert IRAF values to Python values.
if wavelengths.strip() == "":
wavelengths = None
helcorr = helcorr.strip()
algorithm = "wavelet"
if algorithm != "wavelet" and algorithm != "kd":
raise ValueError("algorithm can only be 'wavelet' or 'kd'")
if trace.strip() == "":
trace = None
if subsampled.strip() == "":
subsampled = None
if convolved.strip() == "":
convolved = None
rows = convert_rows (rows)
WX.wx2d (input, output=output, wavelengths=wavelengths,
helcorr=helcorr, algorithm=algorithm,
trace=trace, order=order, subdiv=subdiv,
psf_width=psf_width, rows=rows,
subsampled=subsampled, convolved=convolved)
_parfile = "stis$wx2d.par"
t_wx2d = iraf.IrafTaskFactory (taskname="wx2d", value=iraf.osfn(_parfile),
pkgname=PkgName, pkgbinary=PkgBinary,
function=_wx2d_iraf)
def convert_rows (rows):
"""Read a two-element tuple from a string.
rows should be a string containing two integers separated by a
comma, blank, or colon. The numbers may be enclosed in parentheses
or brackets, but this is not necessary. Note: the row numbers
are one indexed and inclusive, e.g. rows = "480, 544" means process
rows 479 through 543 (zero indexed), which is equivalent to the
slice 479:544.
"""
if rows.strip() == "":
rows = None
else:
bad = True
if rows.find (",") >= 0:
rownum = rows.split (",")
else:
rownum = rows.split (" ")
if len (rownum) == 2:
bad = False
try:
row0 = int (rownum[0]) - 1
row1 = int (rownum[1])
except:
bad = True
if bad:
raise ValueError("can't interpret rows = %s" % (rows,))
rows = (row0, row1)
return rows
|
# coding: utf-8
# In[78]:
#!/usr/bin/python
import time
start_time = time.time()
import sys
import pickle
import numpy as np
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from tester import dump_classifier_and_data
from tester import test_classifier
# In[79]:
#------------------------------------------------------------------
### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
poi = ['poi']
financial_features = ['salary','bonus','deferral_payments','deferred_income',
'exercised_stock_options','expenses','long_term_incentive',
'other','restricted_stock','total_payments','total_stock_value']
email_features = ['from_messages','from_poi_to_this_person','from_this_person_to_poi',
'shared_receipt_with_poi','to_messages']
features_list = poi + financial_features + email_features
### Load the dictionary containing the dataset
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
print "Size dataset: " , len(data_dict)
poi = 0
not_poi = 0
for k in data_dict:
if data_dict[k]['poi'] == True:
poi += 1
if data_dict[k]['poi'] == False:
not_poi += 1
print "Number of POI: " , poi
print "Number of not POI: " , not_poi
print "Number of financial Features: " , len(financial_features)
print "Number of email Features: " , len(email_features)
# In[80]:
#-----------------------------------------------------------------
### Task 2: Remove outliers
# remove 'TOTAL' from dictionary
del data_dict['TOTAL']
# remove 'THE TRAVEL AGENCY IN THE PARK' from dictionary
del data_dict['THE TRAVEL AGENCY IN THE PARK']
# remove negative values from 'restricted_stock'
for person in data_dict:
if data_dict[person]['restricted_stock'] < 0 and data_dict[person]['restricted_stock'] != 'NaN':
data_dict[person]['restricted_stock'] = 'NaN'
# remove negative values from 'deferral_payments'
for person in data_dict:
if data_dict[person]['deferral_payments'] < 0 and data_dict[person]['deferral_payments'] != 'NaN':
data_dict[person]['deferral_payments'] = 'NaN'
# remove negative values from 'total_stock_value'
for person in data_dict:
if data_dict[person]['total_stock_value'] < 0 and data_dict[person]['total_stock_value'] != 'NaN':
data_dict[person]['total_stock_value'] = 'NaN'
# Remove 'restricted_stock_deferred' and 'loan_advances' from the features, few relevant data available
# Remove 'director_fee' because there is only non-POI data
# In[81]:
# Checking if had some person without value
not_NaN_data = {}
for key in data_dict:
not_NaN_feature = 0
for feature in data_dict[key]:
if data_dict[key][feature] != 'NaN':
not_NaN_feature += 1
not_NaN_data[key] = not_NaN_feature
for k in not_NaN_data:
if not_NaN_data[k] == 1:
print k
print data_dict[k]
# In[82]:
# remove 'THE TRAVEL AGENCY IN THE PARK' from dictionary
del data_dict['LOCKHART EUGENE E']
# In[83]:
#------------------------------------------------------------------
### Task 3: Create new feature(s)
### Store to my_dataset for easy export below.
### The messages to and from POI are an absolute measure, let's create new features that are a ratio of the total messages.
def new_feature_ratio(new_feature, numerator, denominator):
for key in data_dict:
if data_dict[key][denominator] != 'NaN' and data_dict[key][numerator] != "NaN":
data_dict[key][new_feature] = float(data_dict[key][numerator]) / float(data_dict[key][denominator])
else:
data_dict[key][new_feature] = "NaN"
features_list.append(new_feature)
### Feature - 'from_this_person_to_poi_ratio'
new_feature_ratio('from_this_person_to_poi_ratio', 'from_this_person_to_poi', 'from_messages')
### Feature - 'from_poi_to_this_person_ratio'
new_feature_ratio('from_poi_to_this_person_ratio', 'from_poi_to_this_person', 'to_messages')
### Feature - 'bonus_ratio'
new_feature_ratio('bonus_ratio', 'bonus', 'salary')
# In[84]:
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
my_dataset = data_dict
### Put features with "long tail" in log10 scale
features_list_log = ['salary','bonus','deferral_payments','exercised_stock_options',
'expenses','long_term_incentive','other','restricted_stock',
'total_payments','total_stock_value', 'from_messages',
'from_poi_to_this_person', 'from_this_person_to_poi',
'shared_receipt_with_poi', 'bonus_ratio']
features_list_log = []
for n in range(1,len(features_list_log)):
for person in my_dataset:
if my_dataset[person][features_list_log[n]] != "NaN":
if my_dataset[person][features_list_log[n]] >= 0:
if my_dataset[person][features_list_log[n]] == 0:
my_dataset[person][features_list_log[n]] = 0
else:
my_dataset[person][features_list_log[n]] = np.log10(my_dataset[person][features_list_log[n]]*-1)
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
### Put all features in same reange (0,1)
for n in range(0,len(features[0])):
feature = []
for person in range(0,len(features)):
feature.append(features[person][n])
feature = np.array(feature).reshape(-1,1)
feature = scaler.fit_transform(feature)
for person in range(0,len(features)):
features[person][n] = feature[person]
# In[85]:
#-----------------------------------------------------------------
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
# Provided to give you a starting point. Try a variety of classifiers.
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import GaussianNB
clf_NB = GaussianNB()
from sklearn import svm
from sklearn.svm import SVC
parameters = {'kernel':('linear', 'rbf', 'poly', 'sigmoid'), 'C':[1, 10], 'degree': [2,10]}
svr = svm.SVC()
clf_SVM = GridSearchCV(svr, parameters, scoring = 'f1')
from sklearn.tree import DecisionTreeClassifier
parameters = {'criterion':('gini', 'entropy'), 'splitter':('best', 'random'), 'min_samples_split':[2,200]}
svr = DecisionTreeClassifier()
clf_tree = GridSearchCV(svr, parameters, scoring = 'f1')
from sklearn.ensemble import RandomForestClassifier
parameters = {'n_estimators': [2,20], 'criterion':('gini', 'entropy'), 'min_samples_split':[2,200]}
svr = RandomForestClassifier()
clf_randon_forest = GridSearchCV(svr, parameters, scoring = 'f1')
classifiers = {"clf_NB": clf_NB,
"clf_SVM": clf_SVM,
"clf_tree": clf_tree,
"clf_randon_forest": clf_randon_forest}
#----------------------------------------------------------------
### Task 5: Tune your classifier to achieve better than .3 precision and recall
### using our testing script. Check the tester.py script in the final project
### folder for details on the evaluation method, especially the test_classifier
### function. Because of the small size of the dataset, the script uses
### stratified shuffle split cross validation. For more info:
### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
# Example starting point. Try investigating other evaluation techniques!
### Using K-fold
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.pipeline import make_pipeline
def train_test_StratifiedKFold(clf, k_best, features):
# Enter a classifier and the number of the k-best features and the function return
# the classifier and validation metrics
acc = []
pre = []
rec = []
f = []
skf = StratifiedKFold(2, shuffle=True)
for train_index, test_index in skf.split(features, labels):
features_train = [features[ii] for ii in train_index]
labels_train = [labels[ii] for ii in train_index]
features_test = [features[ii] for ii in test_index]
labels_test = [labels[ii] for ii in test_index]
skb = SelectKBest(f_classif, k = k_best)
pipe = make_pipeline(skb, clf)
pipe.fit(features_train, labels_train)
labels_pred = pipe.predict(features_test)
acc.append(accuracy_score (labels_test, labels_pred))
pre_rec_f = precision_recall_fscore_support (labels_test, labels_pred)
try:
pre.append(pre_rec_f[0][1])
except:
pass
try:
rec.append(pre_rec_f[1][1])
except:
pass
try:
f.append(pre_rec_f[2][1])
except:
pass
return [pipe, np.mean(acc), np.mean(pre), np.mean(rec), np.mean(f)]
# In[86]:
#---------------------------------------------------------
# Now we will test the best classifiers
best_clf = [None, None, None]
# We will test all combination of the 4 algoritms and k-best features (k from 1 to 19).
# For each metric (accuracy, precision, recall and f) we will print the best combination.
# We will try 5 times to be sure to choose the best combination
for test in range(1,6):
max_acc = [0, 'NaN', 'NaN']
max_pre = [0, 'NaN', 'NaN']
max_rec = [0, 'NaN', 'NaN']
max_f = [0, 'NaN', 'NaN']
for algor in classifiers:
for k_best in range(1, 17): #20):
preview_clf, acc, pre, rec, f = train_test_StratifiedKFold(classifiers[algor], k_best, features)
if acc > max_acc[0]:
max_acc = [acc, algor, k_best]
if pre > max_pre[0]:
max_pre = [pre, algor, k_best]
if rec > max_rec[0]:
max_rec = [rec, algor, k_best]
if f > max_f[0]:
max_f = [f, algor, k_best]
best_clf = ['k-best', max_f, preview_clf]
print ""
print "Test k-best ", test
print 'Accuracy: ', max_acc
print 'Precision: ', max_pre
print 'Reccal: ', max_rec
print 'f Score: ', max_f
### We will do the same but decomponding the features using PCA (n° of componnents 1 to 19)
from sklearn.decomposition import PCA
for test in range(1,6):
max_acc = [0, 'NaN', 'NaN']
max_pre = [0, 'NaN', 'NaN']
max_rec = [0, 'NaN', 'NaN']
max_f = [0, 'NaN', 'NaN']
for algor in classifiers:
for n_comp in range(1, 17): #20):
pca = PCA(n_components = n_comp)
pipe = make_pipeline(pca, classifiers[algor])
#pca_features = pca.fit_transform(features)
preview_clf, acc, pre, rec, f = train_test_StratifiedKFold(pipe, "all", features)
if acc > max_acc[0]:
max_acc = [acc, algor, n_comp]
if pre > max_pre[0]:
max_pre = [pre, algor, n_comp]
if rec > max_rec[0]:
max_rec = [rec, algor, n_comp]
if f > max_f[0]:
max_f = [f, algor, n_comp]
if f > best_clf[1][0]:
best_clf = ['PCA', max_f, preview_clf]
print ""
print "Test PCA", test
print 'Accuracy: ', max_acc
print 'Precision: ', max_pre
print 'Reccal: ', max_rec
print 'f Score: ', max_f
# In[87]:
#--------------------------------------------------------
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
print "f classi: ", best_clf[1]
print "K-best or PCA: ", best_clf[0]
print "Classifier:"
print best_clf[2]
### The best classifier is
clf = best_clf[2]
dump_classifier_and_data(clf, my_dataset, features_list)
print ""
test_classifier(clf, my_dataset, features_list, folds = 1000)
print ""
print("--- %s seconds ---" % (time.time() - start_time))
|
# -*- coding: utf-8 -*-
"""Test fixtures."""
from os import environ, makedirs
from os.path import abspath, exists
from shutil import copyfile, rmtree
from tempfile import mkdtemp
from config import Config
from pytest import fixture
WRENTMPDIR = f"{mkdtemp(prefix='wren-pytest-')}"
for b in ("1111111", "2222222", "3333333", "4444444", "5555555", "6666666", "7777777"):
makedirs(abspath(f"{WRENTMPDIR}/bin/{b}"))
@fixture(name="config")
def fixture_config():
"""Yield config object."""
environ["WRENTMPDIR"] = WRENTMPDIR + "\\bin"
cfg = Config("./tests/testdata/test.cfg")
yield cfg
@fixture(name="wowsdir")
def fixture_wowsdir():
"""Yield WoWs directory."""
_prepare_global_mo(f"{WRENTMPDIR}\\bin\\7777777")
yield f"{WRENTMPDIR}\\bin\\7777777"
rmtree(WRENTMPDIR)
def _prepare_global_mo(wowsbin):
binlcdir = abspath(f"{wowsbin}/res/texts/en/LC_MESSAGES")
makedirs(binlcdir)
copyfile("./tests/testdata/global.mo", f"{binlcdir}/global.mo")
assert exists(f"{binlcdir}/global.mo")
|
# coding: utf-8
from operator import itemgetter
''' 1. Responda: Dicionarios podem ser ordenados?'''
# Não é possível ordenar um dicionario, mas podemos criar uma representação do mesmo com as chaves ordenadas.
# No caso uma lista de tuplas
def ordena(dicionario):
L=dict.keys(dicionario)
L.sort()
D=[]
for i in L:
D.append((i,dicionario[i]))
return D
# Imprimindo os dados do dicionario de forma ordenada
def print_ordenado(dicionario):
for k,v in sorted(dicionario.items(), key=itemgetter(0)):
print 'chave(',k,')-> valor:', v
''' 2. Escreva uma função que converte números inteiros entre 1 e 999 para algarismos romanos. Não converta
o número para uma string. Use os três dicionários abaixo: '''
def romanos(n):
UNIDADES = { 0: '', 1: 'I', 2: 'II', 3: 'III', 4: 'IV', 5: 'V', 6: 'VI', 7: 'VII', 8: 'VIII', 9: 'IX' }
DEZENAS = { 0: '', 1: 'X', 2: 'XX', 3: 'XXX', 4: 'XL', 5: 'L', 6: 'LX', 7: 'LXX', 8: 'LXXX', 9:'XC' }
CENTENAS = { 0: '', 1: 'C', 2: 'CC', 3: 'CCC', 4: 'CD', 5: 'D', 6: 'DC', 7: 'DCC', 8:'DCCC', 9:'CM' }
u= n%10
c= n/100
d= n/10 - c*10
num = CENTENAS[c] + DEZENAS[d] + UNIDADES[u]
return num
''' 3. Construa uma função que receba uma string e retorne um dicionário onde cada palavra dessa string seja
uma chave e tenha como valor o número de vezes que a palavra aparece.'''
def freq_palavras(string):
Chaves=str.split(string)
dicionario={}
for i in Chaves:
dicionario[i]=list.count(Chaves,i)
return dicionario
''' 4. Sabe-se que uma molécula de RNA mensageiro é utilizada como base para sintetizar proteínas, no
processo denominado de tradução. Cada trinca de bases de RNA mensageiro está relacionado com um
aminoácido. Combinando vários aminoácidos, temos uma proteína. Com base na tabela (simplificada)
de trincas de RNA abaixo, crie uma função que receba uma string representando uma molécula de RNA
mensageiro válida, segundo essa tabela, e retorne a cadeia de aminoácidos que representam a proteína
correspondente:'''
def traducao_rnaM(molecula):
trincas={'UUU': 'Phe','CUU': 'Leu','UUA': 'Leu','AAG': 'Lisina','UCU':'Ser','UAU':'Tyr','CAA':'Gln'}
i=0
L=[]
while i<len(molecula):
L.append(molecula[i:i+3])
i+=3
"""
Se quiser usar for, ao inves de while seria:
for i in range(0, len(molecula), 3):
L.append(molecula[i:i+3])
"""
M=[]
for j in L:
M.append(trincas[j])
return str.join("-",M)
''' 5. Escreva uma função que recebe uma lista de compras e um dicionário contendo o preço de cada produto
disponível em uma determinada loja, e retorna o valor total dos itens da lista que estejam disponíveis
nesta loja. '''
def compras(lista, supermercado = {'amaciante':4.99,'arroz':10.90,'biscoito':1.69,'cafe':6.98,'chocolate':3.79,'farinha':2.99}):
conta=0
for i in lista:
conta += supermercado[i]
return conta
|
#!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import wiredtiger, wttest
from wiredtiger import stat
# test_search_near01.py
# Test various prefix search near scenarios.
class test_search_near01(wttest.WiredTigerTestCase):
conn_config = 'statistics=(all)'
def get_stat(self, stat, local_session = None):
if (local_session != None):
stat_cursor = local_session.open_cursor('statistics:')
else:
stat_cursor = self.session.open_cursor('statistics:')
val = stat_cursor[stat][2]
stat_cursor.close()
return val
def unique_insert(self, cursor, prefix, id, keys):
key = prefix + ',' + str(id)
keys.append(key)
cursor.set_key(prefix)
cursor.set_value(prefix)
self.assertEqual(cursor.insert(), 0)
cursor.set_key(prefix)
self.assertEqual(cursor.remove(), 0)
cursor.set_key(prefix)
cursor.search_near()
cursor.set_key(key)
cursor.set_value(key)
self.assertEqual(cursor.insert(), 0)
def test_base_scenario(self):
uri = 'table:test_base_scenario'
self.session.create(uri, 'key_format=u,value_format=u')
cursor = self.session.open_cursor(uri)
session2 = self.conn.open_session()
cursor3 = self.session.open_cursor(uri, None, "debug=(release_evict=true)")
# Basic character array.
l = "abcdefghijklmnopqrstuvwxyz"
# Start our older reader.
session2.begin_transaction()
key_count = 26*26*26
# Insert keys aaa -> zzz.
self.session.begin_transaction()
for i in range (0, 26):
for j in range (0, 26):
for k in range (0, 26):
cursor[l[i] + l[j] + l[k]] = l[i] + l[j] + l[k]
self.session.commit_transaction()
# Evict the whole range.
for i in range (0, 26):
for j in range(0, 26):
cursor3.set_key(l[i] + l[j] + 'a')
cursor3.search()
cursor3.reset()
# Search near for the "aa" part of the range.
cursor2 = session2.open_cursor(uri)
cursor2.set_key('aa')
cursor2.search_near()
skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100)
# This should be equal to roughly key_count * 2 as we're going to traverse the whole
# range forward, and then the whole range backwards.
self.assertGreater(skip_count, key_count * 2)
cursor2.reconfigure("prefix_search=true")
cursor2.set_key('aa')
self.assertEqual(cursor2.search_near(), wiredtiger.WT_NOTFOUND)
prefix_skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100)
# We should've skipped ~26 here as we're only looking at the "aa" range.
self.assertGreaterEqual(prefix_skip_count - skip_count, 26)
skip_count = prefix_skip_count
# The prefix code will have come into play at once as we walked to "aba". The prev
# traversal will go off the end of the file and as such we don't expect it to increment
# this statistic again.
self.assertEqual(self.get_stat(stat.conn.cursor_search_near_prefix_fast_paths), 1)
# Search for a key not at the start.
cursor2.set_key('bb')
self.assertEqual(cursor2.search_near(), wiredtiger.WT_NOTFOUND)
# Assert it to have only incremented the skipped statistic ~26 times.
prefix_skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100)
self.assertGreaterEqual(prefix_skip_count - skip_count, 26)
skip_count = prefix_skip_count
# Here we should have hit the prefix fast path code twice, as we have called prefix
# search near twice, both of which should have early exited when going forwards.
self.assertEqual(self.get_stat(stat.conn.cursor_search_near_prefix_fast_paths), 2)
cursor2.close()
cursor2 = session2.open_cursor(uri)
cursor2.set_key('bb')
cursor2.search_near()
# Assert that we've incremented the stat key_count times, as we closed the cursor and
# reopened it.
#
# This validates cursor caching logic, as if we don't clear the flag correctly this will
# fail.
#
# It should be closer to key_count * 2 but this an approximation.
prefix_skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100)
self.assertGreaterEqual(prefix_skip_count - skip_count, key_count)
# This test aims to simulate a unique index insertion.
def test_unique_index_case(self):
uri = 'table:test_unique_index_case'
self.session.create(uri, 'key_format=u,value_format=u')
cursor = self.session.open_cursor(uri)
session2 = self.conn.open_session()
cursor3 = self.session.open_cursor(uri, None, "debug=(release_evict=true)")
l = "abcdefghijklmnopqrstuvwxyz"
# A unique index has the following insertion method:
# 1. Insert the prefix
# 2. Remove the prefix
# 3. Search near for the prefix
# 4. Insert the full value
# All of these operations are wrapped in the same txn, this test attempts to test scenarios
# that could arise from this insertion method.
# A unique index key has the format (prefix, _id), we'll insert keys that look similar.
# Start our old reader txn.
session2.begin_transaction()
key_count = 26*26
id = 0
cc_id = 0
keys = []
# Insert keys aa,1 -> zz,N
for i in range (0, 26):
for j in range (0, 26):
# Skip inserting 'c'.
if (i == 2 and j == 2):
cc_id = id
id = id + 1
continue
self.session.begin_transaction()
prefix = l[i] + l[j]
self.unique_insert(cursor, prefix, id, keys)
id = id + 1
self.session.commit_transaction()
# Evict the whole range.
for i in keys:
cursor3.set_key(i)
cursor3.search()
cursor3.reset()
# Using our older reader attempt to find a value.
# Search near for the "cc" prefix.
cursor2 = session2.open_cursor(uri)
cursor2.set_key('cc')
cursor2.search_near()
skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100)
# This should be slightly greater than key_count as we're going to traverse most of the
# range forwards.
self.assertGreater(skip_count, key_count)
self.assertEqual(self.get_stat(stat.conn.cursor_search_near_prefix_fast_paths), 0)
cursor2.reconfigure("prefix_search=true")
cursor2.set_key('cc')
self.assertEqual(cursor2.search_near(), wiredtiger.WT_NOTFOUND)
self.assertEqual(self.get_stat(stat.conn.cursor_search_near_prefix_fast_paths), 1)
# This still isn't visible to our older reader and as such we expect this statistic to
# increment again.
self.unique_insert(cursor2, 'cc', cc_id, keys)
self.assertEqual(self.get_stat(stat.conn.cursor_search_near_prefix_fast_paths), 2)
# In order for prefix key fast pathing to work we rely on some guarantees provided by row
# search. Test some of the guarantees.
def test_row_search(self):
uri = 'table:test_row_search'
self.session.create(uri, 'key_format=u,value_format=u')
cursor = self.session.open_cursor(uri)
expect_count = self.get_stat(stat.conn.cursor_next_skip_lt_100)
session2 = self.conn.open_session()
l = "abcdefghijklmnopqrstuvwxyz"
# Insert keys a -> z, except c
self.session.begin_transaction()
for i in range (0, 26):
if (i == 2):
continue
cursor[l[i]] = l[i]
self.session.commit_transaction()
# Start our older reader transaction.
session2.begin_transaction()
# Insert a few keys in the 'c' range
self.session.begin_transaction()
cursor['c'] = 'c'
cursor['cc'] = 'cc'
cursor['ccc'] = 'ccc'
self.session.commit_transaction()
# Search_near for 'c' and assert we skip 3 entries. Internally the row search is landing on
# 'c'.
cursor2 = session2.open_cursor(uri)
cursor2.set_key('c')
cursor2.search_near()
expect_count += 1
skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100)
self.assertEqual(skip_count, expect_count)
session2.commit_transaction()
# Perform an insertion and removal of a key next to another key, then search for the
# removed key.
self.session.begin_transaction()
cursor.set_key('dd')
cursor.set_value('dd')
cursor.insert()
cursor.set_key('dd')
cursor.remove()
cursor.set_key('ddd')
cursor.set_value('ddd')
cursor.insert()
cursor.set_key('dd')
cursor.search_near()
self.session.commit_transaction()
expect_count += 1
skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100)
self.assertEqual(skip_count, expect_count)
# Test a basic prepared scenario.
def test_prepared(self):
uri = 'table:test_base_scenario'
self.session.create(uri, 'key_format=u,value_format=u')
cursor = self.session.open_cursor(uri)
session2 = self.conn.open_session()
cursor3 = session2.open_cursor(uri, None, "debug=(release_evict=true)")
# Insert an update without timestamp
l = "abcdefghijklmnopqrstuvwxyz"
session2.begin_transaction()
key_count = 26*26
# Insert 'cc'
self.session.begin_transaction()
cursor['cc'] = 'cc'
self.session.commit_transaction()
# Prepare keys aa -> zz
self.session.begin_transaction()
for i in range (0, 26):
if (i == 2):
continue
for j in range (0, 26):
cursor[l[i] + l[j]] = l[i] + l[j]
self.session.prepare_transaction('prepare_timestamp=2')
# Evict the whole range.
for i in range (0, 26):
for j in range(0, 26):
cursor3.set_key(l[i] + l[j])
cursor3.search()
cursor3.reset()
# Search near for the "aa" part of the range.
cursor2 = session2.open_cursor(uri)
cursor2.set_key('c')
self.assertEqual(cursor2.search_near(), wiredtiger.WT_NOTFOUND)
skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100, session2)
# This should be equal to roughly key_count as we're going to traverse the whole
# range forwards.
self.assertGreater(skip_count, key_count)
cursor2.reconfigure("prefix_search=true")
cursor2.set_key('c')
self.assertEqual(cursor2.search_near(), wiredtiger.WT_NOTFOUND)
prefix_skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100, session2)
# We expect to traverse one entry and have a buffer to account for anomalies.
self.assertEqual(prefix_skip_count - skip_count, 2)
skip_count = prefix_skip_count
# We early exit here as "cc" is not the last key.
self.assertEqual(self.get_stat(stat.conn.cursor_search_near_prefix_fast_paths, session2), 1)
session2.rollback_transaction()
session2.begin_transaction('ignore_prepare=true')
cursor4 = session2.open_cursor(uri)
cursor4.reconfigure("prefix_search=true")
cursor4.set_key('c')
self.assertEqual(cursor4.search_near(), 1)
prefix_skip_count = self.get_stat(stat.conn.cursor_next_skip_lt_100, session2)
# We expect to traverse one entry and have a buffer to account for anomalies.
self.assertEqual(prefix_skip_count - skip_count, 2)
skip_count = prefix_skip_count
cursor4.reconfigure("prefix_search=false")
cursor4.set_key('c')
ret = cursor4.search_near()
self.assertTrue(ret == -1 or ret == 1)
self.assertEqual(self.get_stat(stat.conn.cursor_next_skip_lt_100, session2) - skip_count, 2)
|
# The Grid Search
# Given a 2D array of digits, try to find a given 2D grid pattern of digits within it.
#
# https://www.hackerrank.com/challenges/the-grid-search/problem
#
def gridSearch(G, P):
# on cherche dans toute les lignes de G
i = 0
while i <= len(G) - len(P):
p0 = 0
while True:
j = 0
i0 = i
# cherche si P[0] se trouve dans la ligne courante de G
p = G[i0].find(P[j], p0)
if p == -1:
break
# cherche le reste de P, à la même position
# (OK pas optimisé, mais plus clair à écrire!)
while j < len(P) and i0 < len(G) and p == G[i0].find(P[j], p0):
i0 += 1
j += 1
if j == len(P):
return "YES"
# le motif P[0] peut se retrouver plus loin dans la ligne...
p0 = p + 1
i += 1
return "NO"
if __name__ == "__main__":
t = int(input().strip())
for a0 in range(t):
R, C = input().strip().split(' ')
R, C = [int(R), int(C)]
G = []
G_i = 0
for G_i in range(R):
G_t = str(input().strip())
G.append(G_t)
r, c = input().strip().split(' ')
r, c = [int(r), int(c)]
P = []
P_i = 0
for P_i in range(r):
P_t = str(input().strip())
P.append(P_t)
result = gridSearch(G, P)
print(result)
|
def build_iter_wise_lr_scheduler(optimizer, optimizer_config: dict, num_epochs, iterations_per_epoch: int):
lr_scheduler_config = optimizer_config['lr_scheduler']
lr_scheduler_type = lr_scheduler_config['type']
total_iterations = num_epochs * iterations_per_epoch
if lr_scheduler_type == 'MultiStepLR':
from fvcore.common.param_scheduler import MultiStepParamScheduler
values = lr_scheduler_config['values']
milestones = lr_scheduler_config['milestones']
milestones = [int(round(num_epochs * milestone)) for milestone in milestones]
lr_scheduler = MultiStepParamScheduler(values, milestones=milestones)
else:
raise NotImplementedError
if 'warmup' in lr_scheduler_config:
warmup_config = lr_scheduler_config['warmup']
warmup_factor = warmup_config['initial_factor']
warmup_length = warmup_config['length']
warmup_method = warmup_config['method']
from .iter_wise import WarmupParamScheduler
lr_scheduler = WarmupParamScheduler(lr_scheduler, warmup_factor, warmup_length, warmup_method)
from .iter_wise import LRMultiplier
lr_scheduler = LRMultiplier(optimizer, lr_scheduler, total_iterations)
return lr_scheduler
|
from TASSELpy.utils.Overloading import javaOverload
from TASSELpy.utils.helper import make_sig
from TASSELpy.java.lang.Object import Object
from TASSELpy.java.lang.String import String
import javabridge
java_imports = {'Map':'java/util/Map',
'GeneralAnnotation':'net/maizegenetics/util/GeneralAnnotation',
'Object':'java/lang/Object',
'SetMultimap':'com/google/common/collect/SetMultimap',
'String':'java/lang/String'}
## Provide generalized annotations (descriptors) for taxon or site
class GeneralAnnotation(Object):
"""
Provide generalized annotations (descriptors) for taxon or site
"""
_java_name = java_imports['GeneralAnnotation']
## Returns all annotation values for a given annotation key
# @param annoName annotation key
# @return array of annotation values (if not present new String[0])
@javaOverload("getAnnotation",
(make_sig([java_imports['String']],java_imports['Object']+'[]'),
(str,),lambda x: javabridge.get_env().get_object_array_elements(x)))
def getAnnotation(self, *args):
"""
Returns all annotation value for a given annotation key
Signatures:
Object[] getAnnotation(String annoName)
Arguments:
annoName -- annotation key
Returns:
array of annotation values (if not present new String[0])
"""
pass
## Returns all annotation values for a given annotation key
# @param annoName annotation key
# @return array of annotation values (if not present new String[0])
@javaOverload("getTextAnnotation",
(make_sig([java_imports['String']],java_imports['String']+'[]'),(str,),
lambda x: map(lambda y: String(obj=y).toString(),
javabridge.get_env().get_object_array_elements(x))))
def getTextAnnotation(self, *args):
"""
Returns all annotation values for a given annotation key
Signatures:
String[] getTextAnnotation(String annoName)
Arguments:
annoName -- annotation key
Returns:
array of annotation values (if not present new String[0])
"""
pass
## Returns consensus value for given annotation key
# @param annoName annotation key
# @return Consensus value (if not present new String[0])
@javaOverload("getConsensusAnnotation",
(make_sig([java_imports['String']],java_imports['String']),(str,),
None))
def getConsensusAnnotation(self, *args):
"""
Returns consensus value for given annotation key
Signatures:
String getConsensusAnnotation(String annoName)
Arguments:
annoName -- annotation key
Returns:
Consensus value (if not present new String[0])
"""
pass
## Returns all annotation value for given annotation key
# @param annoName annotation key
# @return array of annotation values (if not present new double[0])
@javaOverload("getQuantAnnotation",
(make_sig([java_imports['String']],'double[]'),(str,),
lambda x: javabridge.get_env().get_double_array_elements(x)))
def getQuantAnnotation(self, *args):
"""
Returns all annotation value for given annotation key
Signatures:
double[] getQuantAnnotation(String annoName)
Arguments:
annoName -- annotation key
Returns:
array of annotation values (if not present new double[0])
"""
pass
## Returns average annotation for a given annotation key
# @param annoName annotation key
# @return average value (if not present - return Double.NaN)
@javaOverload("getAverageAnnotation",
(make_sig([java_imports['String']],'double'),(str,),None))
def getAverageAnnotation(self, *args):
"""
Returns average annotation for a given annotation key
Signatures:
double getAverageAnnotation(String annoName)
Arguments:
annoName -- annotation key
Returns:
average value (if not present - return Double.NaN)
"""
pass
## Returns all annotation Map.Entries
# @return Array of Map.Entry
@javaOverload("getAllAnnotationEntries",
(make_sig([],java_imports['Map']+'$Entry[]'),(),
lambda x: javabridge.get_env().get_object_array_elements(x)))
def getAllAnnotationEntries(self, *args):
"""
Returns all annotation Map.Entries
Signatures:
Map.Entry<String, String>[] getAllAnnotationEntries()
Returns:
Array of Map.Entry
"""
pass
## Returns all annotations in TreeMap
# @return Map of annotations
@javaOverload("getAnnotationAsMap",
(make_sig([],java_imports['SetMultimap']),(),
None))
def getAnnotationAsMap(self, *args):
"""
Returns all annotations in TreeMap
Signatures:
SetMultimap<String,String> getAnnotationAsMap()
Returns:
Map of annotations
"""
pass
|
'''
JoyCursor
=========
.. versionadded:: 1.10.0
The JoyCursor is a tool for navigating with a joystick as if using a mouse
or touch. Most of the actions that are possible for a mouse user are available
in this module.
For example:
* left click
* right click
* double click (two clicks)
* moving the cursor
* holding the button (+ moving at the same time)
* selecting
* scrolling
There are some properties that can be edited live, such as intensity of the
JoyCursor movement and toggling mouse button holding.
Usage
-----
For normal module usage, please see the :mod:`~kivy.modules` documentation
and these bindings:
+------------------+--------------------+
| Event | Joystick |
+==================+====================+
| cursor move | Axis 3, Axis 4 |
+------------------+--------------------+
| cursor intensity | Button 0, Button 1 |
+------------------+--------------------+
| left click | Button 2 |
+------------------+--------------------+
| right click | Button 3 |
+------------------+--------------------+
| scroll up | Button 4 |
+------------------+--------------------+
| scroll down | Button 5 |
+------------------+--------------------+
| hold button | Button 6 |
+------------------+--------------------+
| joycursor on/off | Button 7 |
+------------------+--------------------+
The JoyCursor, like Inspector, can also be imported and used as a normal
python module. This has the added advantage of being able to activate and
deactivate the module programmatically::
from kivy.lang import Builder
from kivy.base import runTouchApp
runTouchApp(Builder.load_string("""
#:import jc kivy.modules.joycursor
BoxLayout:
Button:
text: 'Press & activate with Ctrl+E or Button 7'
on_release: jc.create_joycursor(root.parent, root)
Button:
text: 'Disable'
on_release: jc.stop(root.parent, root)
"""))
'''
__all__ = ('start', 'stop', 'create_joycursor')
from kivy.clock import Clock
from kivy.logger import Logger
from kivy.uix.widget import Widget
from kivy.graphics import Color, Line
from kivy.properties import (
ObjectProperty,
NumericProperty,
BooleanProperty
)
class JoyCursor(Widget):
win = ObjectProperty()
activated = BooleanProperty(False)
cursor_width = NumericProperty(1.1)
cursor_hold = BooleanProperty(False)
intensity = NumericProperty(4)
dead_zone = NumericProperty(10000)
offset_x = NumericProperty(0)
offset_y = NumericProperty(0)
def __init__(self, **kwargs):
super(JoyCursor, self).__init__(**kwargs)
self.avoid_bring_to_top = False
self.size_hint = (None, None)
self.size = (21, 21)
self.set_cursor()
# draw cursor
with self.canvas:
Color(rgba=(0.19, 0.64, 0.81, 0.5))
self.cursor_ox = Line(
points=self.cursor_pts[:4],
width=self.cursor_width + 0.1
)
self.cursor_oy = Line(
points=self.cursor_pts[4:],
width=self.cursor_width + 0.1
)
Color(rgba=(1, 1, 1, 0.5))
self.cursor_x = Line(
points=self.cursor_pts[:4],
width=self.cursor_width
)
self.cursor_y = Line(
points=self.cursor_pts[4:],
width=self.cursor_width
)
self.pos = [-i for i in self.size]
def on_window_children(self, win, *args):
# pull JoyCursor to the front when added
# as a child directly to the window.
if self.avoid_bring_to_top or not self.activated:
return
self.avoid_bring_to_top = True
win.remove_widget(self)
win.add_widget(self)
self.avoid_bring_to_top = False
def on_activated(self, instance, activated):
# bind/unbind when JoyCursor's state is changed
if activated:
self.win.add_widget(self)
self.move = Clock.schedule_interval(self.move_cursor, 0)
self.win.fbind('on_joy_axis', self.check_cursor)
self.win.fbind('on_joy_button_down', self.set_intensity)
self.win.fbind('on_joy_button_down', self.check_dispatch)
self.win.fbind('mouse_pos', self.stop_cursor)
mouse_pos = self.win.mouse_pos
self.pos = (
mouse_pos[0] - self.size[0] / 2.0,
mouse_pos[1] - self.size[1] / 2.0
)
Logger.info('JoyCursor: joycursor activated')
else:
self.pos = [-i for i in self.size]
Clock.unschedule(self.move)
self.win.funbind('on_joy_axis', self.check_cursor)
self.win.funbind('on_joy_button_down', self.set_intensity)
self.win.funbind('on_joy_button_down', self.check_dispatch)
self.win.funbind('mouse_pos', self.stop_cursor)
self.win.remove_widget(self)
Logger.info('JoyCursor: joycursor deactivated')
def set_cursor(self, *args):
# create cursor points
px, py = self.pos
sx, sy = self.size
self.cursor_pts = [
px, py + round(sy / 2.0), px + sx, py + round(sy / 2.0),
px + round(sx / 2.0), py, px + round(sx / 2.0), py + sy
]
def check_cursor(self, win, stickid, axisid, value):
# check axes and set offset if a movement is registered
intensity = self.intensity
dead = self.dead_zone
if axisid == 3:
if value < -dead:
self.offset_x = -intensity
elif value > dead:
self.offset_x = intensity
else:
self.offset_x = 0
elif axisid == 4:
# invert Y axis to behave like mouse
if value < -dead:
self.offset_y = intensity
elif value > dead:
self.offset_y = -intensity
else:
self.offset_y = 0
else:
self.offset_x = 0
self.offset_y = 0
def set_intensity(self, win, stickid, buttonid):
# set intensity of joycursor with joystick buttons
intensity = self.intensity
if buttonid == 0 and intensity > 2:
intensity -= 1
elif buttonid == 1:
intensity += 1
self.intensity = intensity
def check_dispatch(self, win, stickid, buttonid):
if buttonid == 6:
self.cursor_hold = not self.cursor_hold
if buttonid not in (2, 3, 4, 5, 6):
return
x, y = self.center
# window event, correction necessary
y = self.win.system_size[1] - y
modifiers = []
actions = {
2: 'left',
3: 'right',
4: 'scrollup',
5: 'scrolldown',
6: 'left'
}
button = actions[buttonid]
self.win.dispatch('on_mouse_down', x, y, button, modifiers)
if not self.cursor_hold:
self.win.dispatch('on_mouse_up', x, y, button, modifiers)
def move_cursor(self, *args):
# move joycursor as a mouse
self.pos[0] += self.offset_x
self.pos[1] += self.offset_y
modifiers = []
if self.cursor_hold:
self.win.dispatch(
'on_mouse_move',
self.center[0],
self.win.system_size[1] - self.center[1],
modifiers
)
def stop_cursor(self, instance, mouse_pos):
# pin the cursor to the mouse pos
self.offset_x = 0
self.offset_y = 0
self.pos = (
mouse_pos[0] - self.size[0] / 2.0,
mouse_pos[1] - self.size[1] / 2.0
)
def on_pos(self, instance, new_pos):
self.set_cursor()
self.cursor_x.points = self.cursor_pts[:4]
self.cursor_y.points = self.cursor_pts[4:]
self.cursor_ox.points = self.cursor_pts[:4]
self.cursor_oy.points = self.cursor_pts[4:]
def keyboard_shortcuts(self, win, scancode, *args):
modifiers = args[-1]
if scancode == 101 and modifiers == ['ctrl']:
self.activated = not self.activated
return True
elif scancode == 27:
if self.activated:
self.activated = False
return True
def joystick_shortcuts(self, win, stickid, buttonid):
if buttonid == 7:
self.activated = not self.activated
if self.activated:
self.pos = [round(i / 2.0) for i in win.size]
def create_joycursor(win, ctx, *args):
'''Create a JoyCursor instance attached to the *ctx* and bound to the
Window's :meth:`~kivy.core.window.WindowBase.on_keyboard` event for
capturing the keyboard shortcuts.
:Parameters:
`win`: A :class:`Window <kivy.core.window.WindowBase>`
The application Window to bind to.
`ctx`: A :class:`~kivy.uix.widget.Widget` or subclass
The Widget for JoyCursor to attach to.
'''
ctx.joycursor = JoyCursor(win=win)
win.bind(children=ctx.joycursor.on_window_children,
on_keyboard=ctx.joycursor.keyboard_shortcuts)
# always listen for joystick input to open the module
# (like a keyboard listener)
win.fbind('on_joy_button_down', ctx.joycursor.joystick_shortcuts)
def start(win, ctx):
Clock.schedule_once(lambda *t: create_joycursor(win, ctx))
def stop(win, ctx):
'''Stop and unload any active JoyCursors for the given *ctx*.
'''
if hasattr(ctx, 'joycursor'):
ctx.joycursor.activated = False
win.unbind(children=ctx.joycursor.on_window_children,
on_keyboard=ctx.joycursor.keyboard_shortcuts)
win.funbind('on_joy_button_down', ctx.joycursor.joystick_shortcuts)
win.remove_widget(ctx.joycursor)
del ctx.joycursor
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListFunctionTriggerResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'trigger_id': 'str',
'trigger_type_code': 'str',
'trigger_status': 'str',
'event_data': 'object',
'last_updated_time': 'datetime',
'created_time': 'datetime'
}
attribute_map = {
'trigger_id': 'trigger_id',
'trigger_type_code': 'trigger_type_code',
'trigger_status': 'trigger_status',
'event_data': 'event_data',
'last_updated_time': 'last_updated_time',
'created_time': 'created_time'
}
def __init__(self, trigger_id=None, trigger_type_code=None, trigger_status=None, event_data=None, last_updated_time=None, created_time=None):
"""ListFunctionTriggerResult - a model defined in huaweicloud sdk"""
self._trigger_id = None
self._trigger_type_code = None
self._trigger_status = None
self._event_data = None
self._last_updated_time = None
self._created_time = None
self.discriminator = None
self.trigger_id = trigger_id
self.trigger_type_code = trigger_type_code
self.trigger_status = trigger_status
self.event_data = event_data
self.last_updated_time = last_updated_time
self.created_time = created_time
@property
def trigger_id(self):
"""Gets the trigger_id of this ListFunctionTriggerResult.
触发器ID。
:return: The trigger_id of this ListFunctionTriggerResult.
:rtype: str
"""
return self._trigger_id
@trigger_id.setter
def trigger_id(self, trigger_id):
"""Sets the trigger_id of this ListFunctionTriggerResult.
触发器ID。
:param trigger_id: The trigger_id of this ListFunctionTriggerResult.
:type: str
"""
self._trigger_id = trigger_id
@property
def trigger_type_code(self):
"""Gets the trigger_type_code of this ListFunctionTriggerResult.
触发器类型。 - TIMER: \"定时触发器。\" - APIG: \"APIG触发器。\" - CTS: \"云审计服务触发器。\" - DDS: \"文档数据库服务触发器。\" - DMS: \"分布式服务触发器。\" - DIS: \"数据接入服务触发器。\" - LTS: \"云日志服务触发器。\" - OBS: \"对象存储触发器。\" - SMN: \"消息通知服务触发器。\" - KAFKA: \"专享版消息通知服务触发器。\"
:return: The trigger_type_code of this ListFunctionTriggerResult.
:rtype: str
"""
return self._trigger_type_code
@trigger_type_code.setter
def trigger_type_code(self, trigger_type_code):
"""Sets the trigger_type_code of this ListFunctionTriggerResult.
触发器类型。 - TIMER: \"定时触发器。\" - APIG: \"APIG触发器。\" - CTS: \"云审计服务触发器。\" - DDS: \"文档数据库服务触发器。\" - DMS: \"分布式服务触发器。\" - DIS: \"数据接入服务触发器。\" - LTS: \"云日志服务触发器。\" - OBS: \"对象存储触发器。\" - SMN: \"消息通知服务触发器。\" - KAFKA: \"专享版消息通知服务触发器。\"
:param trigger_type_code: The trigger_type_code of this ListFunctionTriggerResult.
:type: str
"""
self._trigger_type_code = trigger_type_code
@property
def trigger_status(self):
"""Gets the trigger_status of this ListFunctionTriggerResult.
\"触发器状态\" - ACTIVE: 启用状态。 - DISABLED: 禁用状态。
:return: The trigger_status of this ListFunctionTriggerResult.
:rtype: str
"""
return self._trigger_status
@trigger_status.setter
def trigger_status(self, trigger_status):
"""Sets the trigger_status of this ListFunctionTriggerResult.
\"触发器状态\" - ACTIVE: 启用状态。 - DISABLED: 禁用状态。
:param trigger_status: The trigger_status of this ListFunctionTriggerResult.
:type: str
"""
self._trigger_status = trigger_status
@property
def event_data(self):
"""Gets the event_data of this ListFunctionTriggerResult.
触发器源事件。
:return: The event_data of this ListFunctionTriggerResult.
:rtype: object
"""
return self._event_data
@event_data.setter
def event_data(self, event_data):
"""Sets the event_data of this ListFunctionTriggerResult.
触发器源事件。
:param event_data: The event_data of this ListFunctionTriggerResult.
:type: object
"""
self._event_data = event_data
@property
def last_updated_time(self):
"""Gets the last_updated_time of this ListFunctionTriggerResult.
最后更新时间。
:return: The last_updated_time of this ListFunctionTriggerResult.
:rtype: datetime
"""
return self._last_updated_time
@last_updated_time.setter
def last_updated_time(self, last_updated_time):
"""Sets the last_updated_time of this ListFunctionTriggerResult.
最后更新时间。
:param last_updated_time: The last_updated_time of this ListFunctionTriggerResult.
:type: datetime
"""
self._last_updated_time = last_updated_time
@property
def created_time(self):
"""Gets the created_time of this ListFunctionTriggerResult.
触发器创建时间。
:return: The created_time of this ListFunctionTriggerResult.
:rtype: datetime
"""
return self._created_time
@created_time.setter
def created_time(self, created_time):
"""Sets the created_time of this ListFunctionTriggerResult.
触发器创建时间。
:param created_time: The created_time of this ListFunctionTriggerResult.
:type: datetime
"""
self._created_time = created_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListFunctionTriggerResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from django import forms
from django.forms import ModelForm
from .models import *
class GasPurchaseForm(ModelForm):
class Meta:
model = GasPurchase
fields = [
'vehicle',
'datetime',
'odometer_reading',
'cost_per_gallon',
'gallons',
]
|
from System import *
from System.IO import *
from System.Windows import *
from Window1 import *
class $safeprojectname$App: # namespace
@staticmethod
def RealEntryPoint():
a = Application()
window1 = $safeprojectname$.Window1()
a.Run(window1.Root)
if __name__ == "Program":
$safeprojectname$App.RealEntryPoint();
|
#!/usr/bin/env python3
import os
import math
import argparse
import subprocess as sub
from sys import stderr, exit
from datetime import timedelta
from tempfile import mkdtemp
from threading import Semaphore, Thread, Lock
MAJOR, MINOR, PATCH = 0, 1, 1
VERSION = f'{MAJOR}.{MINOR}.{PATCH}'
class EmptyConfig:
"""Helper placeholder class
"""
def __getattribute__(self, name):
return None
def get_levels_in_time(probe_process: sub.Popen):
for line in probe_process.stdout:
yield map(float, line.decode().strip().split(','))
class AutoCut:
def __init__(self, input_file, output_base_name=None, config=None):
"""Constructs an AutoCut instance
input_file -- video/audio file to cut
output_base_name -- base name of an output file
config -- namespace of an argument parser
"""
self.check_utilities('ffmpeg', 'ffprobe')
if not os.path.exists(input_file):
print('error: file does not exist', file=stderr)
exit(1)
self.config = config if config else EmptyConfig()
self.input_file = input_file
input_base, extension = os.path.splitext(self.input_file)
self.output_base = output_base_name if output_base_name else input_base
self.extension = extension
def run_montage(self, rms_threshold: float):
"""Executes the segmentation and audio analysis
rms_threshold -- Maximum RootMeansSquare threshold level of noise
"""
segments = self.audio_level_segmentation(rms_threshold)
if not segments:
print(f'warning: no cuts received', file=stderr)
return
temp_dir = mkdtemp(
prefix=f'autocut_{self.output_base}_',
dir=os.getcwd())
print(f'created clips directory: {temp_dir}')
logn = math.ceil(math.log(len(segments) - 1, 10)) # max width of index
segments_path = os.path.join(temp_dir, 'segments.txt')
count_guard, count = Lock(), 0
def create_clip(segments_file, lock):
nonlocal count
file_base = f'{self.output_base}.{i+1:0{logn}}{self.extension}'
output_file = os.path.join(temp_dir, file_base)
success = True
if not self.config.dry_run:
success = self.slice_and_copy(start, end, output_file)
lock.release()
if success:
with count_guard:
count += 1
print(
f'successful: {count} / {len(segments)}',
end='\r',
flush=True)
segments_file.write(f'{file_base}: {start, end}\n')
else:
print(f'\nerror at {start, end}')
segments_file.write(f'<no file>: {start, end}\n')
with open(segments_path, 'w') as segments_file:
jobs = []
lock = Semaphore(8)
for i, (start, end) in enumerate(segments):
t = Thread(target=create_clip, args=(segments_file, lock))
lock.acquire()
jobs.append(t)
t.start()
for job in jobs:
job.join()
print()
if self.config.dry_run and self.config.verbose:
with open(segments_path, 'r') as segments_file:
print('-' * 12)
print(f'{segments_path}\n')
print(segments_file.read())
try:
if self.config.dry_run:
os.remove(segments_path)
os.rmdir(temp_dir)
except:
pass
def slice_and_copy(self, start: float, end: float, output_file):
"""Slices the input file at a section
and copies to a separate file
start -- beginning of a section
end -- end of a section
output_file -- file name to create a copy with
"""
args = [
'ffmpeg',
'-i',
self.input_file,
'-c',
'copy',
'-ss',
f'{timedelta(seconds=start)}',
'-to',
f'{timedelta(seconds=end)}',
output_file
]
edit_process = sub.Popen(args, stdout=sub.PIPE, stderr=sub.PIPE)
out, err = edit_process.communicate()
# print(f'process stdout: {out.decode()}')
# print(f'process stderr: {err.decode()}')
return edit_process.returncode == 0
def audio_level_segmentation(self, threshold: float):
"""Performs audio-level analysis and returns segments
threshold -- Maximum RootMeansSquare threshold level of noise
"""
volumes = {} # for the purpose of averaging
print(f'analyzing audio of {self.input_file} ...')
probe_process = self._probe_rms(
self.input_file,
stdout=sub.PIPE,
stderr=sub.PIPE)
for timestamp, volume, *rest in get_levels_in_time(probe_process):
average = volumes.setdefault(
round(round(timestamp * 10) / 10, 1),
[0, 0])
average[0] += volume # sum
average[1] += 1 # count
out, err = probe_process.communicate()
print(f'processing cuts -- threshold: {threshold:.2f}...')
segments = []
time_step = 0.1 # seconds
begin_time, end_time = None, None
loud_count, silent_count = 0, 0
# TODO: hardcoded values
loud_needed, silent_needed = 3, 5 # 3 * 0.1 sec and 5 * 0.1 sec
recording = False
margin = 3
for timestamp, (acc, count) in volumes.items():
current = acc / count
if self.config.trace_rms:
print(f'trace: {timestamp}: {current}')
if abs(current) != math.inf and int(current) > threshold:
loud_count += 1
silent_count = 0
else:
silent_count += 1
loud_count = 0
if not recording and loud_count == loud_needed:
begin_time = timestamp - time_step * (loud_needed + margin)
recording = True
elif recording and silent_count == silent_needed:
end_time = timestamp - time_step * (silent_needed - margin)
recording = False
segments.append((begin_time, end_time))
if segments and segments[0] and segments[0][0] < 0:
# TODO: this is ugly
# If feasible, discard the possibility of a negative boundary
del segments[0]
print(f'found {len(segments)} cuts')
return segments
def scan_noise_level(self, duration=120, start=0):
"""Infers audio level for noise
at a span of the video
duration -- sample duration time in seconds
start -- starting point for the scan (in seconds)
"""
acc, count = 0, 0
max_vol, min_vol = -math.inf, math.inf
probe_process = self._probe_rms(
self.input_file,
stdout=sub.PIPE,
stderr=sub.PIPE)
# calculate the stats
for timestamp, volume, *rest in get_levels_in_time(probe_process):
if int(timestamp) > start + duration:
break
if int(timestamp) < start:
# TODO: O(n) to get to the start
# I think it could be O(1)
continue
max_vol = max(max_vol, volume)
if volume != -math.inf:
min_vol = min(min_vol, volume)
acc += volume
count += 1
if count != 0:
avg_vol = acc / count
else:
avg_vol = math.nan
# find the most stable lower bound
prev_vol, smallest_diff = math.inf, math.inf
stable_volume = math.inf
probe_process = self._probe_rms(
self.input_file,
stdout=sub.PIPE,
stderr=sub.PIPE)
for timestamp, volume, *rest in get_levels_in_time(probe_process):
if int(timestamp) > duration:
break
if volume != -math.inf:
if abs(volume - prev_vol) < smallest_diff and volume < avg_vol:
smallest_diff = abs(volume - prev_vol)
stable_volume = volume
prev_vol = volume
# calculate suggested noise
suggest_noise = stable_volume - (stable_volume - avg_vol) * 0.3
return suggest_noise, max_vol, min_vol, avg_vol
def check_utilities(self, *utilities):
"""Checks whether utilities exist
and return help message
*utilities -- string parameters with names of utilities
"""
must_exit = False
for utility in utilities:
try:
sub.check_call(
[utility, '-h'],
stdout=sub.DEVNULL,
stderr=sub.DEVNULL)
except FileNotFoundError:
print(f'error: could not find {utility}', file=stderr)
must_exit = True
if must_exit:
exit(1)
def _probe_rms(self, filename, **kwargs):
args = [
'ffprobe',
'-f',
'lavfi',
'-i',
f'amovie={filename},astats=metadata=1:reset=1',
'-show_entries',
'frame=pkt_pts_time:frame_tags=lavfi.astats.Overall.RMS_level',
'-of',
'csv=p=0'
]
return sub.Popen(args, **kwargs)
def run_autocut():
parser = argparse.ArgumentParser(
prog='autocut',
description='video splitting based on noise threshold')
parser.add_argument(
'input_file',
help='file to process')
parser.add_argument(
'-t', '--threshold',
type=float,
metavar='N',
help='maximum RMS threshold level of noise')
parser.add_argument(
'-s', '--start-scan',
type=float,
default=0,
metavar='<seconds>',
help='time in seconds to start noise scan from (default=0)')
parser.add_argument(
'-d', '--scan-duration',
type=float,
default=120,
metavar='<seconds>',
help='duration time in seconds of the noise scan (default=120)')
parser.add_argument(
'--scan-noise',
action='store_true',
dest='scan_noise_only',
help='scan noise only, skip the rest')
parser.add_argument(
'-n', '--dry-run',
action='store_true',
help='do not execute')
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='provide debug information')
parser.add_argument(
'--trace-rms',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument(
'--version',
action='version',
version=f'%(prog)s v{VERSION}')
args = parser.parse_args()
autocut = AutoCut(args.input_file, config=args)
if not args.threshold:
print('scanning noise level threshold ...')
span = args.start_scan, args.start_scan + args.scan_duration
print(f'sampling time span: {span}')
args.threshold, max_vol, min_vol, avg_vol = autocut.scan_noise_level(
duration=args.scan_duration,
start=args.start_scan)
print(f'''detected audio levels ({args.scan_duration} seconds)
max: {max_vol}
min: {min_vol}
avg: {avg_vol}
---
suggested noise level: {args.threshold}
''')
if args.scan_noise_only:
exit(0)
autocut.run_montage(rms_threshold=args.threshold)
exit(0)
if __name__ == '__main__':
try:
run_autocut()
except KeyboardInterrupt:
pass
except Exception as e:
print('error: caught unexpected exception', file=stderr)
print(e, file=stderr)
raise e
|
for i in range(1, 3):
print "The loop ran %d time%s" % (i, (lambda:'', lambda:'s')[i!=1]())
|
import math
import time
from simple_playgrounds.engine import Engine
from simple_playgrounds.playgrounds.layouts import SingleRoom
from simple_playgrounds.elements.collection.teleport import InvisibleBeam, VisibleBeamHoming, Portal, PortalColor
from simple_playgrounds.common.position_utils import CoordinateSampler
from simple_playgrounds.elements.collection.basic import Physical
def test_beam(base_forward_interactive_agent_external):
playground = SingleRoom(size=(200, 200))
agent = base_forward_interactive_agent_external
beam = InvisibleBeam(destination=((50, 50), 0))
playground.add_agent(agent, ((100, 100), 0))
playground.add_element(beam, ((140, 100), 0))
engine = Engine(playground, time_limit=100)
actions = {agent: {agent.longitudinal_force: 1}}
while engine.game_on:
engine.step(actions)
assert agent.position[1] == 50
engine.terminate()
def test_beam_orientation(base_forward_interactive_agent_external):
playground = SingleRoom(size=(200, 200))
agent = base_forward_interactive_agent_external
beam = InvisibleBeam(destination=((50, 50), math.pi/2))
playground.add_agent(agent, ((100, 100), 0))
playground.add_element(beam, ((140, 100), 0))
engine = Engine(playground, time_limit=100)
actions = {agent: {agent.longitudinal_force: 1}}
while engine.game_on:
engine.step(actions)
assert agent.position[0] == 50
engine.terminate()
def test_beam_area(base_forward_interactive_agent_external):
playground = SingleRoom(size=(200, 200))
agent = base_forward_interactive_agent_external
area = CoordinateSampler(center=(50, 50), area_shape='rectangle', size=(20, 20))
beam = InvisibleBeam(destination=area)
playground.add_agent(agent, ((100, 100), 0))
playground.add_element(beam, ((140, 100), 0))
engine = Engine(playground, time_limit=100)
actions = {agent: {agent.longitudinal_force: 1}}
while not agent.is_teleporting:
engine.step(actions)
assert 30 <= agent.position[0] <= 80
assert 30 <= agent.position[1] <= 80
def test_beam_homing(base_forward_interactive_agent_external):
playground = SingleRoom(size=(200, 200))
agent = base_forward_interactive_agent_external
destination = Physical(config_key='pentagon')
playground.add_element(destination, ((70, 70), 0))
beam = VisibleBeamHoming(destination=destination, invisible_range=4)
playground.add_agent(agent, ((100, 100), 0))
playground.add_element(beam, ((140, 100), 0))
engine = Engine(playground, time_limit=100)
actions = {agent: {agent.longitudinal_force: 1}}
while not agent.is_teleporting:
engine.step(actions)
assert agent.position.get_distance(destination.position) < agent.base_platform.radius + destination.radius + 4 + 3
def test_portal(base_forward_interactive_agent_external):
playground = SingleRoom(size=(200, 200))
agent = base_forward_interactive_agent_external
portal_1 = Portal(color=PortalColor.RED)
portal_2 = Portal(color=PortalColor.BLUE)
portal_3 = Portal(color=PortalColor.GREEN)
portal_4 = Portal(color=(50, 50, 50))
playground.add_agent(agent, ((100, 80), 0))
playground.add_element(portal_1, ((140, 80), math.pi))
playground.add_element(portal_2, ((50, 50), math.pi/2))
playground.add_element(portal_3, ((50, 120), -math.pi/2))
playground.add_element(portal_4, ((150, 160), math.pi))
portal_1.destination = portal_2
portal_3.destination = portal_4
engine = Engine(playground, time_limit=1000)
actions = {agent: {agent.longitudinal_force: 1}}
while engine.game_on:
engine.step(actions)
assert agent.position[1] == 160
assert agent.angle % (2 * math.pi) == math.pi
|
# Klasse, die den Schrittzähler realisiert
class StepCounter:
# Initialisierung
# Alle Instanzvariablen werden im Konstruktor initialisiert
# Die Klasse kann nur mit der Angabe eines Schrittzählers
# initialisiert werden, wenn es sich bei diesem Konstruktor NICHT um
# einen Standardkonstruktor handelt
def __init__(self, date):
self.__date = date
self.__steps = 0
# Öffentliche Methode, um den Schrittzähler um 1 zu erhöhen
def incrementSteps(self):
self.__steps += 1
# Öffentliche Methode zur Erzeugung einer Statusnachricht, die
# zurückgegeben wird
def __str__(self):
return "Am " + self.__date + " bin ich " + \
str(self.__steps) + " Schritte gegangen."
# Startpunkt des Hauptprogramms
# Hier wird die implementierte Klasse zu Demonstrations- und Testzwecken
# instanziiert und verwendet.
# Objekt der Klasse StepCounter durch Konstrukturaufruf erzeugen
# Das Datum wird auf den 11.11.2011 gesetzt
sc = StepCounter("11.11.2011")
# Gehe 1111 Schritte
for i in range(0, 1111):
sc.incrementSteps()
# Gebe Schritte aus
print sc
|
import responses
import re
from ..common import (
HypoApiTest,
TEST_HYPOTHESIS_API_URL,
)
class ProfileTest(HypoApiTest):
@responses.activate
def test_get_users_profile(self):
responses.add(
method=responses.GET,
url=re.compile(f"{TEST_HYPOTHESIS_API_URL}/*"),
)
result = self.hypo.profile()
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.method, responses.GET)
self.assertEqual(responses.calls[0].request.path_url, f"/profile")
class ProfileGroupsTest(HypoApiTest):
@responses.activate
def test_get_users_groups(self):
responses.add(
method=responses.GET,
url=re.compile(f"{TEST_HYPOTHESIS_API_URL}/*"),
)
result = self.hypo.profile.groups()
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.method, responses.GET)
self.assertEqual(responses.calls[0].request.path_url, f"/profile/groups")
|
import os
from functions.lib.manifest import JasmineFile
def test_get_matching_tests(test_resource_dir):
# get_matching_tests: returns a list of JasminTest Objects for every it('Block') in a passed file
# Test: pass file to jasmineFile object containing no it blocks. Expect: return empty jasmin_test_list
no_it_blocks = JasmineFile('Using file name: default_test_class_name', test_resource_dir + 'no_it_block.ts', [], [])
assert len(no_it_blocks.jasmine_tests) == 0
# Test: pass file containing one it block, Expect: return jasmin_test_list with one JasminTest Object
one_it_blocks = JasmineFile('default_test_class_name', test_resource_dir + 'one_it_block.ts', [], [])
assert len(one_it_blocks.jasmine_tests) == 1
# Test: pass file containing more than one it block, Expect: return jasmin_test_list with more than one JasminTest Object
multi_it_blocks = JasmineFile('default_test_class_name', test_resource_dir + 'multi_it_block.ts', [], [])
assert len(multi_it_blocks.jasmine_tests) > 1
def test_get_test_class_name(test_resource_dir):
# Test: check if default test name works, Expect: Object List with one JasminTest Obj, containing defualt test name member
no_describe_block = JasmineFile('Test: default_test_name', test_resource_dir + 'no_describe_block.ts', [], [])
assert no_describe_block.jasmine_tests[0].test_class_name == 'Test: default_test_name'
# Test: Check if test_class_name properly assigned, Expect: JasminFile.jasmin_list.JasminTest Obj test_name member to match
one_describe_block = JasmineFile('Test: default_test_name', test_resource_dir + 'one_describe_block.ts', [], [])
assert one_describe_block.jasmine_tests[0].test_class_name == 'describeBlock1'
# Test: Check if name properly chosen from multiple choices, Expect: JasminTest Obj test_name member to match first describe block
multi_describe_block = JasmineFile('Test: default_test_name', test_resource_dir + 'multi_describe_block.ts', [], [])
assert multi_describe_block.jasmine_tests[0].test_class_name == 'describeBlock1'
def test_get_test_name(test_resource_dir):
# Test: check that no test name from itblock returns no objects in jasmin_list[], Expect: empty return
no_it_block = JasmineFile('default_file_name', test_resource_dir + 'no_it_block.ts', [], [])
assert len(no_it_block.jasmine_tests) == 0
# Test: check that proper test name is parsed from itblock, Expect: proper test name assigned
one_it_block = JasmineFile('default_file_name', test_resource_dir + 'one_it_block.ts', [], [])
assert 'it_block_1' in one_it_block.jasmine_tests[0].test_name
# Test: check that multiple it_blocks are turned into JasminTest Objects and properly named
multi_it_block = JasmineFile('default_file_name', test_resource_dir + 'multi_it_block.ts', [], [])
assert 'it_block_1' in multi_it_block.jasmine_tests[0].test_name
assert 'it_block_2' in multi_it_block.jasmine_tests[1].test_name
def test_has_tests(test_resource_dir):
# Expect True:, test method with one or more jasmine test objects
multi_it_blocks = JasmineFile('default_file_name', test_resource_dir + 'multi_it_block.ts', [], [])
assert len(multi_it_blocks.jasmine_tests) > 0
# Expect False:, test method with no jasmine test objects
no_it_block = JasmineFile('default_file_name', test_resource_dir + 'no_it_block.ts', [], [])
assert len(no_it_block.jasmine_tests) == 0
|
import logging
from pathlib import Path
from nsft_cache_utils.dir import PyTestFixtureRequestT
from test_lib.gpg_ctx_fixture_gen import (
generate_gpg_encrypt_decrypt_basic_fixture_cached,
)
def test_generate_gpg_encrypt_decrypt_basic_fixture(
request: PyTestFixtureRequestT,
tmp_root_homes_dir: Path) -> None:
fixture = generate_gpg_encrypt_decrypt_basic_fixture_cached(
tmp_root_homes_dir, request)
# Each of the 3 encrypter decrypter contexts knows of all other contexts're
# public keys.
for ck, c in fixture.__dict__.items():
logging.info(f"ck: {ck}")
assert 1 == len(c.keys.secret)
assert 2 == len(c.keys.public)
|
import gzip
import itertools
from typing import Set, Optional, Dict
from rdflib.plugins.parsers.ntriples import NTriplesParser
from rdflib.plugins.serializers.nt import NT11Serializer
from rdflib.term import URIRef, Literal
import networkx as nx
from kgx import RdfTransformer
from kgx.config import get_logger
from kgx.utils.kgx_utils import current_time_in_millis, apply_filters, generate_edge_identifiers
log = get_logger()
class NtTransformer(RdfTransformer):
"""
Transformer that parses n-triples (NT) and loads triples, as nodes and edges, into a networkx.MultiDiGraph
.. note::
This is a specialized version of RdfTransformer that doesn't rely on rdflib.Graph when parsing NTs.
Depending on performance, this implementation will be subsumed into RdfTransformer.
"""
def __init__(self, source_graph: nx.MultiDiGraph = None, curie_map: Dict = None):
super().__init__(source_graph, curie_map)
def parse(self, filename: str, input_format: Optional[str] = 'nt', compression: Optional[str] = None, provided_by: Optional[str] = None, node_property_predicates: Optional[Set[str]] = None) -> None:
"""
Parse a n-triple file into networkx.MultiDiGraph
The file must be a *.nt formatted file.
Parameters
----------
filename : str
File to read from.
input_format : Optional[str]
The input file format. Must be ``nt``
compression: Optional[str]
The compression type. For example, ``gz``
provided_by : Optional[str]
Define the source providing the input file.
node_property_predicates: Optional[Set[str]]
A set of rdflib.URIRef representing predicates that are to be treated as node properties
"""
p = NTriplesParser(self)
if node_property_predicates:
self.node_properties.update([URIRef(self.prefix_manager.expand(x)) for x in node_property_predicates])
if provided_by:
self.graph_metadata['provided_by'] = [provided_by]
self.start = current_time_in_millis()
if compression == 'gz':
p.parse(gzip.open(filename, 'rb'))
else:
p.parse(open(filename, 'rb'))
self.dereify(self.reified_nodes)
log.info(f"Done parsing {filename}")
apply_filters(self.graph, self.node_filters, self.edge_filters)
generate_edge_identifiers(self.graph)
def save(self, filename: str, output_format: str = 'nt', compression: str = None, reify_all_edges = False, **kwargs) -> None:
"""
Export networkx.MultiDiGraph into n-triple format.
Uses rdflib.plugins.serializers.nt.NT11Serializer.
Parameters
----------
filename: str
Filename to write to
output_format: str
The output format. Must be ``nt``
compression: str
The compression type. For example, ``gz``
reify_all_edges: bool
Whether to reify all edges in the graph
kwargs: dict
Any additional arguments
"""
nodes_generator = self.export_nodes()
edges_generator = self.export_edges(reify_all_edges)
generator = itertools.chain(nodes_generator, edges_generator)
serializer = NT11Serializer(generator)
if compression == 'gz':
f = gzip.open(filename, 'wb')
else:
f = open(filename, 'wb')
serializer.serialize(f)
|
# -*- coding: utf-8 -*-
""" VoicePlay database entities container """
from datetime import datetime
from pony.orm import Database, PrimaryKey, Required
from pony.orm.ormtypes import buffer
# pylint:disable=invalid-name
db = Database()
class Artist(db.Entity):
"""
Artist database entity container
"""
name = PrimaryKey(str)
created_at = Required(datetime)
updated_at = Required(datetime)
image = Required(buffer)
class PlayedTracks(db.Entity):
"""
Save played tracks for history and stats
"""
track = PrimaryKey(str)
created_at = Required(datetime)
updated_at = Required(datetime)
playcount = Required(int)
# loved/banned/neutral
status = Required(str)
class LastFmCache(db.Entity):
"""
Cache last.fm results
"""
method_args = PrimaryKey(str)
created_at = Required(datetime)
updated_at = Required(datetime)
content = Required(str)
class ServiceCache(db.Entity):
"""
Cache service results
"""
service_name = PrimaryKey(str)
created_at = Required(datetime)
updated_at = Required(datetime)
content = Required(str)
|
import torch
import numpy as np
class NP(torch.nn.Module):
def __init__(self, x_dim, y_dim, r_dim, z_dim, encoder_specs, decoder_specs, init_func):
super().__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.r_dim = r_dim
self.z_dim = z_dim
self.encoder_specs = encoder_specs
self.decoder_specs = decoder_specs
self.init_func = init_func
self.softplus_act = torch.nn.Softplus()
self.r_to_z_mean = torch.nn.Linear(self.r_dim, self.z_dim)
self.r_to_z_logvar = torch.nn.Linear(self.r_dim, self.z_dim)
# Create the encoder
for i in range(len(self.encoder_specs)):
if i == 0:
encoder_input_dim = self.x_dim + self.y_dim
self.add_module('h_layer' + str(i), \
torch.nn.Linear(encoder_input_dim, self.encoder_specs[i][0]))
if self.encoder_specs[i][1]:
self.add_module('h_layer' + str(i) + '_act', self.encoder_specs[i][1])
else:
self.add_module('h_layer' + str(i), \
torch.nn.Linear(self.encoder_specs[i-1][0], self.encoder_specs[i][0]))
if self.encoder_specs[i][1]:
self.add_module('h_layer' + str(i) + '_act', self.encoder_specs[i][1])
# Create the decoder
for i in range(len(self.decoder_specs)):
if i == 0:
decoder_input_dim = self.x_dim + self.z_dim
self.add_module('g_layer' + str(i), \
torch.nn.Linear(decoder_input_dim, self.decoder_specs[i][0]))
if self.decoder_specs[i][1]:
self.add_module('g_layer' + str(i) + '_act', self.decoder_specs[i][1])
else:
self.add_module('g_layer' + str(i), \
torch.nn.Linear(self.decoder_specs[i-1][0], self.decoder_specs[i][0]))
if self.decoder_specs[i][1]:
self.add_module('g_layer' + str(i) + '_act', self.decoder_specs[i][1])
if init_func:
for layer_name,_ in self._modules.items():
if layer_name.endswith('act') == False:
init_func(getattr(getattr(self, layer_name), 'weight'))
def h(self, x, y):
x_y = torch.cat([x, y], dim=1)
for layer_name, layer_func in self._modules.items():
if layer_name.startswith('h'):
x_y = layer_func(x_y)
return x_y
def aggregate(self, r):
return torch.mean(r, dim=0)
def xy_to_z_params(self, x, y):
r = self.h(x, y)
r = self.aggregate(r)
mean = self.r_to_z_mean(r)
logvar = self.r_to_z_logvar(r)
return mean.unsqueeze(-1), logvar.unsqueeze(-1)
def sample_z(self, z, how_many):
"""
Returns a sample from z of size (z_dim, how_many)
"""
mean, logvar = z
std = torch.exp(0.5 * logvar)
eps = torch.randn([self.z_dim, how_many])
z_samples = mean + std * eps
return z_samples
def g(self, x, z):
z_reshape = z.t().unsqueeze(1).expand(-1, x.shape[0], -1)
x_reshape = x.unsqueeze(0).expand(z_reshape.shape[0], x.shape[0], x.shape[1])
x_z = torch.cat([x_reshape, z_reshape], dim=2)
y_mean = x_z
for layer_name, layer_func in self._modules.items():
if layer_name.startswith('g'):
y_mean = layer_func(y_mean)
return y_mean
def forward(self, x_context, y_context, x_target, y_target):
z_context = self.xy_to_z_params(x_context, y_context)
print(self.training)
if self.training:
z_target = self.xy_to_z_params(x_target, y_target)
else:
z_target = z_context
z_sample = self.sample_z(z_target, how_many=1)
y_hat = self.g(x_target, z_sample)
return y_hat, z_target, z_context
def KL_div(mu_q, logvar_q, mu_p, logvar_p):
KL = (torch.exp(logvar_q) + (mu_q - mu_p) ** 2) / torch.exp(logvar_p) \
- 1.0 \
+ logvar_p - logvar_q
KL = 0.5 * KL.sum()
return KL
def ELBO(y_hat, y, z_target, z_context):
log_lik = torch.nn.functional.mse_loss(y_hat, y)
KL = KL_div(z_target[0], z_target[1], z_context[0], z_context[1])
return - log_lik + KL
# Log-likelihood
def MC_loglikelihood(inputs, outputs, decoder, z_mean, z_std, how_many, device=None):
"""
Returns a Monte Carlo estimate of the log-likelihood
z_mean: mean of the distribution from which to sample z
z_std: std of the distribution from which to sample z
how_many: number of monte carlo samples
decoder: the decoder to be used to produce estimates of mean
"""
# sample z
if device:
z_samples = sample_z(z_mean, z_std, how_many, device=device)
else:
z_samples = sample_z(z_mean, z_std, how_many)
# produce the 10 estimated of the mean and std
y_mean, y_std = decoder(inputs, z_samples)
# define likelihood for each value of z
likelihood = torch.distributions.Normal(y_mean, y_std)
# for each value of z:
# evaluate log-likelihood for each data point
# sum these per-data point log-likelihoods
# compute the mean
log_likelihood = likelihood.log_prob(outputs).sum(dim=1).mean()
return log_likelihood
# KL divergence
def KL_div(mean_1, std_1, mean_2, std_2):
"""Analytical KLD between 2 Gaussians."""
KL = (torch.log(std_2) - torch.log(std_1) + \
(std_1**2/ (2*std_2**2)) + \
((mean_1 - mean_2)**2 / (2*std_2**2)) - 1).sum()*0.5
return KL
def predict(inputs, decoder, z_mean, z_std, how_many, numpy=True, device=None):
"""
Generates prediction from the NP
inputs: inputs to the NP
decoder: the specific decoder employed
z_mean: the mean of the latent variable distribution
z_std: the mean of the latent variable distribution
how_many: the number of functions to predict
numpy: convert torch tensor to numpy array
"""
if device:
z = sample_z(z_mean, z_std, how_many, device=device)
y_pred, _ = decoder(inputs, z)
if numpy:
return y_pred.cpu().detach().numpy()
else:
return y_pred
else:
z = sample_z(z_mean, z_std, how_many)
y_pred, _ = decoder(inputs, z)
if numpy:
return y_pred.detach().numpy()
else:
return y_pred
|
#!/usr/bin/env python
"""Find the last created file in a given directory."""
import os
import sys
import stat
folder = sys.argv[1] if len(sys.argv) > 1 else os.curdir
files = (os.path.join(folder, name) for name in os.listdir(folder))
entries = ((path, os.lstat(path)) for path in files) # don't follow symlinks
path, _ = max((e for e in entries if stat.S_ISREG(e[1].st_mode)), # find regular files
key=lambda e: getattr(e[1], 'st_birthtime', None) or e[1].st_ctime)
print(path)
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maximumAverageSubtree(self, root: TreeNode) -> float:
self.max_avg = -math.inf
def postorder(node):
if node is None:
return 0.0, 0
left_sum, left_cnt = postorder(node.left)
right_sum, right_cnt = postorder(node.right)
s = left_sum + right_sum + node.val
cnt = left_cnt + right_cnt + 1
self.max_avg = max(self.max_avg, s / cnt)
return s, cnt
postorder(root)
return self.max_avg
|
from io import StringIO
import inspect
from textwrap import wrap
import pygments
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.lexers.python import PythonLexer
from .filters import CallHighlightFilter, DecoratorOperatorFilter, TypeHighlightFilter
from .styles import Jellybeans
class Formatter:
"""
Implements a formatter to prettify arguments received by `flashback.debugging.xp` and parsed
by `flashback.debugging.parser`.
Currently has special formatting for the following types:
- str / bytes
- list / tuple / set / frozenset / deque
- dict / OrderedDict / defaultdict / Counter
- module
- type / ABCMeta
- function / method
- Generator
Formats all other types via their __repr__ method.
"""
TYPE_TO_SYMBOLS = {
"deque": ("deque([\n", "])"),
"frozenset": ("frozenset({\n", "})"),
"list": ("[\n", "]"),
"set": ("{\n", "}"),
"tuple": ("(\n", ")"),
"Counter": ("Counter({\n", "})"),
"defaultdict": ("defaultdict(_TYPE_, {\n", "})"),
"dict": ("{\n", "}"),
"OrderedDict": ("OrderedDict({\n", "})"),
}
DIM_START = "\033[2m"
DIM_END = "\033[0m"
def __init__(self, indent_str=" "):
"""
Params:
indent_str (str): the indentation string to use
"""
self._indent_str = indent_str
self._indent_str_len = len(indent_str)
self._width = None
self._buffer = None
self._code_lexer = PythonLexer(
ensurenl=False,
filters=[
DecoratorOperatorFilter(),
CallHighlightFilter(),
TypeHighlightFilter(
names=[
"bool",
"bytearray",
"bytes",
"dict",
"float",
"frozenset",
"int",
"list",
"object",
"set",
"str",
"tuple",
],
),
]
)
self._code_formatter = Terminal256Formatter(style=Jellybeans)
def format(self, filename, lineno, arguments, warning, width=120):
"""
Formats the output of `Parser.parse` following the given style and width.
Params:
filename (str): the filename from where `flashback.debugging.xp` has been called
lineno (int): the line number from where `flashback.debugging.xp` has been called
arguments (list<tuple>): the arguments to format, as name-value couples
warning (str): the error encountered when parsing the code or None
width (int): the maximum width before wrapping the output
Returns:
str: the formatted arguments, and location of the call to `flashback.debugging.xp`
"""
self._width = width
# We need to use ANSI color coding because pygments can only highlight code
content = f"\033[2m{filename}:{lineno}"
if warning:
content += f" ({warning})"
content += "\033[0m\n"
if len(arguments) == 0:
return content[:-1] # Remove the last newline
arguments_content = []
for (name, value) in arguments:
argument_content = f" {name}:\n" if name is not None else ""
# self._format is called recursively, so we use a stream
# to progressively write the formatting without passing it around
self._buffer = StringIO()
self._format(value)
buf = self._buffer.getvalue()
argument_content += self._highlight(buf)
argument_content += f" \033[2m({value.__class__.__name__})\033[0m"
arguments_content.append(argument_content)
content += "\n".join(arguments_content)
return content
def format_code(self, lines, start_lineno=1, highlight=None):
"""
Formats code with syntax highlighting and line numbers, with optional highlighting of
specific range of lines.
Params:
lines (Iterable<str>): the lines of code to render
start_lineno (int): the line number of the code's first line
highlight (tuple<int>): the start and end indices of the code to highlight
Returns:
str: the formatted and highlighted code
"""
linenos = list(range(start_lineno, start_lineno + len(lines) + 2))
pad_len = len(str(max(linenos)))
lines_with_linenos = []
for lineno, line in zip(linenos, lines):
lines_with_linenos.append(f"{lineno:{pad_len}} {line}")
if highlight is not None:
start = highlight[0]
end = highlight[1]
# Dim the context instead of highlighting the focus
highlighted_lines = []
highlighted_lines.append(self.DIM_START)
highlighted_lines.append(self._highlight("".join(lines_with_linenos[:start])))
highlighted_lines.append(f"{self.DIM_END}\n")
highlighted_lines.append(self._highlight("".join(lines_with_linenos[start:end])))
highlighted_lines.append(f"{self.DIM_START}\n")
highlighted_lines.append(self._highlight("".join(lines_with_linenos[end:])))
highlighted_lines.append(self.DIM_END)
return "".join(highlighted_lines)
return self._highlight("".join(lines_with_linenos))
def _format(self, value, current_indent=1, force_indent=True):
if force_indent:
self._buffer.write(current_indent * self._indent_str)
next_indent = current_indent + 1
try:
# Converts classes such as OrderedDict, Counter, etc.
class_name = value.__class__.__name__
method = getattr(self, f"_format_{class_name}")
method(value, current_indent, next_indent)
except AttributeError:
self._format_raw(value, current_indent, next_indent)
def _format_ABCMeta(self, meta, _current_indent, _next_indent): # pylint: disable=invalid-name
self._format_type(meta, _current_indent, _next_indent)
def _format_type(self, cls, _current_indent, _next_indent):
self._buffer.write(" < ".join([x.__qualname__ for x in cls.__mro__]))
def _format_module(self, module, current_indent, next_indent):
prefix = current_indent * self._indent_str
nested_prefix = next_indent * self._indent_str
suffix = "\n"
self._buffer.write("Name:\n")
self._buffer.write(nested_prefix + module.__name__ + suffix)
self._buffer.write(prefix + "Location:\n")
self._buffer.write(nested_prefix + module.__path__[0] + suffix)
self._buffer.write(prefix + "Contents:\n")
nested_prefix += "- "
for key, value in module.__dict__.items():
if not key.startswith("_"):
content = f"{key} ({value.__class__.__name__})"
self._buffer.write(nested_prefix + content + suffix)
def _format_method(self, method, _current_indent, _next_indent):
self._format_function(method, _current_indent, _next_indent)
def _format_function(self, function, _current_indent, _next_indent):
self._buffer.write(function.__qualname__)
self._buffer.write(str(inspect.signature(function)))
def _format_Counter(self, counter, current_indent, next_indent): # pylint: disable=invalid-name
self._format_mapping(counter, current_indent, next_indent)
def _format_defaultdict(self, default_dict, current_indent, next_indent):
self._format_mapping(default_dict, current_indent, next_indent)
def _format_OrderedDict(self, ordered_dict, current_indent, next_indent): # pylint: disable=invalid-name
self._format_mapping(ordered_dict, current_indent, next_indent)
def _format_dict(self, dictionary, current_indent, next_indent):
self._format_mapping(dictionary, current_indent, next_indent)
def _format_mapping(self, mapping, current_indent, next_indent):
prefix = next_indent * self._indent_str
separator = ": "
suffix = ",\n"
start, end = self.TYPE_TO_SYMBOLS[mapping.__class__.__name__]
# We're be processing a defaultdict
if "_TYPE_" in start:
start = start.replace("_TYPE_", repr(mapping.default_factory))
self._buffer.write(start)
for key, value in mapping.items():
self._buffer.write(prefix)
self._format(key, next_indent, False)
self._buffer.write(separator)
self._format(value, next_indent, False)
self._buffer.write(suffix)
self._buffer.write(current_indent * self._indent_str + end)
def _format_list(self, iterable, current_indent, next_indent):
self._format_iterables(iterable, current_indent, next_indent)
def _format_set(self, iterable, current_indent, next_indent):
self._format_iterables(iterable, current_indent, next_indent)
def _format_frozenset(self, iterable, current_indent, next_indent):
self._format_iterables(iterable, current_indent, next_indent)
def _format_tuple(self, iterable, current_indent, next_indent):
self._format_iterables(iterable, current_indent, next_indent)
def _format_deque(self, iterable, current_indent, next_indent):
self._format_iterables(iterable, current_indent, next_indent)
def _format_iterables(self, iterable, current_indent, next_indent):
suffix = ",\n"
start, end = self.TYPE_TO_SYMBOLS[iterable.__class__.__name__]
self._buffer.write(start)
for value in iterable:
self._format(value, next_indent, True)
self._buffer.write(suffix)
self._buffer.write(current_indent * self._indent_str + end)
def _format_bytes(self, string, current_indent, next_indent):
self._format_str(string, current_indent, next_indent)
def _format_str(self, string, current_indent, next_indent):
# We substract 3 to take in account the quotes and the newline
width = self._width - (next_indent * self._indent_str_len) - 3
if len(string) <= width:
self._buffer.write(repr(string))
else:
start = "(\n"
prefix = next_indent * self._indent_str
suffix = "\n"
end = ")"
# Wrap the lines to be shorter than width, keeping the newlines
lines = []
for line in string.splitlines(True):
begin = 0
for pos in range(width, len(line), width):
lines.append(line[begin:pos])
begin = pos
lines.append(line[begin:])
self._buffer.write(start)
for line in lines:
self._buffer.write(prefix + repr(line) + suffix)
self._buffer.write(current_indent * self._indent_str + end)
def _format_generator(self, generator, current_indent, next_indent):
start = "(\n"
suffix = ",\n"
end = ")"
self._buffer.write(start)
for item in generator:
self._format(item, next_indent, True)
self._buffer.write(suffix)
self._buffer.write(current_indent * self._indent_str + end)
def _format_raw(self, value, current_indent, next_indent):
representation = repr(value)
lines = representation.splitlines(True)
if len(lines) > 1 or (len(representation) + (current_indent * self._indent_str_len)) >= self._width:
start = "(\n"
prefix = next_indent * self._indent_str
suffix = "\n"
end = ")"
self._buffer.write(start)
wrap_at = self._width - (next_indent * self._indent_str_len)
for line in lines:
sub_lines = wrap(line, wrap_at)
for sub_line in sub_lines:
self._buffer.write(prefix + sub_line + suffix)
self._buffer.write(current_indent * self._indent_str + end)
else:
self._buffer.write(representation)
def _highlight(self, value):
return pygments.highlight(value, lexer=self._code_lexer, formatter=self._code_formatter)
|
"""Unit test for Key class."""
import unittest
from shufflealgos.image.key import Key
class TestKey(unittest.TestCase):
"""TestCase class that will test Key class methods."""
def test_shift_to_range(self):
"""Test shift_to_range method of a Key object."""
absolute_minimum: int = 11
absolute_maximum: int = 14
curr_key: Key = Key(values=[4, 5, 6])
shifted_key: Key = curr_key.shift_to_range(absolute_minimum,
absolute_maximum)
self.assertEqual(shifted_key.values, [11, 12, 13])
self.assertTrue(min(shifted_key.values) >= absolute_minimum)
self.assertTrue(max(shifted_key.values) <= absolute_maximum)
self.assertTrue(curr_key.length == shifted_key.length)
absolute_minimum = 1
absolute_maximum = 3
curr_key = Key(values=[93, 68, 76, 88, 96, 93, 80])
shifted_key = curr_key.shift_to_range(absolute_minimum,
absolute_maximum)
self.assertEqual(shifted_key.values, [3, 2, 1, 1, 3, 3, 2])
self.assertTrue(min(shifted_key.values) >= absolute_minimum)
self.assertTrue(max(shifted_key.values) <= absolute_maximum)
self.assertTrue(curr_key.length == shifted_key.length)
absolute_minimum = 12
absolute_maximum = 19
curr_key = Key(values=[20, 16, 13, 20, 16, 18, 20, 21, 17, 22])
shifted_key = curr_key.shift_to_range(absolute_minimum,
absolute_maximum)
self.assertEqual(shifted_key.values,
[12, 16, 13, 12, 16, 18, 12, 13, 17, 14])
self.assertTrue(min(shifted_key.values) >= absolute_minimum)
self.assertTrue(max(shifted_key.values) <= absolute_maximum)
def test_get_extended_key(self):
"""Test get_extended_key method of a Key object."""
curr_key: Key = Key(values=[1, 2, 3])
extended_key: Key = curr_key.get_extended_key(5)
self.assertEqual(extended_key.values, [1, 2, 3, 2, 3])
curr_key = Key(values=[19, 15, 12, 19, 15, 17, 19])
extended_key = curr_key.get_extended_key(17)
self.assertEqual(extended_key.values,
curr_key.values
+ [12, 16, 13, 12, 16, 18, 12, 13, 17, 14])
curr_key = Key(values=[3, 3, 2, 1])
extended_key = curr_key.get_extended_key(10)
self.assertEqual(extended_key.values,
curr_key.values + [1, 1, 3, 2, 2, 2])
if __name__ == "__main__":
unittest.main()
|
class SakuraIOBaseException(ValueError):
pass
class CommandError(SakuraIOBaseException):
def __init__(self, status):
self.status = status
def __str__(self):
return "Invalid response status '0x{0:02x}'".format(self.status)
class ParityError(SakuraIOBaseException):
def __init__(self):
pass
def __str__(self):
return "Invalid parity"
|
a1, b1, c1 = int(input()), int(input()), int(input())
a2, b2, c2 = int(input()), int(input()), int(input())
if b1 >= a1 >= c1:
(a1, b1, c1) = (b1, a1, c1)
elif c1 >= b1 >= a1:
(a1, b1, c1) = (c1, b1, a1)
elif b1 >= c1 >= a1:
(a1, b1, c1,) = (b1, c1, a1)
elif c1 >= a1 >= b1:
(a1, b1, c1) = (c1, a1, b1)
elif a1 >= c1 >= b1:
(a1, b1, c1) = (a1, c1, b1)
if b2 >= a2 >= c2:
(a2, b2, c2) = (b2, a2, c2)
elif c2 >= b2 >= a2:
(a2, b2, c2) = (c2, b2, a2)
elif b2 >= c2 >= a2:
(a2, b2, c2,) = (b2, c2, a2)
elif c2 >= a2 >= b2:
(a2, b2, c2) = (c2, a2, b2)
elif a2 >= c2 >= b2:
(a2, b2, c2) = (a2, c2, b2)
if a1 == a2 and b1 == b2 and c1 == c2:
print("Boxes are equal")
elif a1 <= a2 and b1 <= b2 and c1 <= c2:
print("The first box is smaller than the second one")
elif a1 >= a2 and b1 >= b2 and c1 >= c2:
print("The first box is larger than the second one")
else:
print("Boxes are incomparable")
|
from django.contrib import admin
from django.shortcuts import render_to_response
from rangefilter.filter import DateRangeFilter, DateTimeRangeFilter
from resid import views
# Register your models here.
from resid.models import Resid
class ResidAdmin(admin.ModelAdmin):
list_display = (
'id',
# 'taghaza',
'shomare',
'tarikh',
)
list_filter = (
'id',
'shomare',
# 'taghaza',
# 'tarikh',
('tarikh', DateRangeFilter),
)
search_fields = (
'id',
# 'taghaza',
'tarikh',
)
def get_actions(self, request):
actions = super().get_actions(request)
# if request.user.username[0].upper() != 'J':
# if 'delete_selected' in actions:
del actions['delete_selected']
return actions
actions = ['report']
def report(self, request, queryset):
views.report(request)
return render_to_response('reports/ResidReport.html', {'residha': queryset})
report.short_description = "تهیه گذارش قابل چاپ"
admin.site.register(Resid, ResidAdmin)
|
class Item:
def __init__(self, preset):
self.entity = None
self.name = preset.get('name', 'unnamed item')
self.desc = preset.get('name', '')
self.usable = preset.get('usable', False)
self.on_use = preset.get('on_use', lambda: None)
self.equippable = preset.get('equippable', False)
self.on_equip = preset.get('on_equip', lambda: None)
def set_entity(self, entity):
self.entity = entity
|
import sys
import time
def stuff():
print('Doing stuff ...')
time.sleep(1)
if __name__ == '__main__':
if sys.argv[1] == 'manhole':
from hunter import remote
remote.install()
while True:
stuff()
|
import numpy as np
from .plot import Plot, Dashboard
from .single_outcome import DescriptionPlot
def index_color_outcome(convention, outcomes):
for i, (c, o) in enumerate(zip(convention.yield_colors(len(outcomes)),
outcomes)):
yield i, c, o
class MultiOutputPlot(Plot):
def __call__(self, *outcomes):
for i, c, o in index_color_outcome(self.convention, outcomes):
self.plot_outcome(o, color=c, label="Outcome {:d}".format(i+1))
self.set_dates(o.dates)
self.pack()
return self
def pack(self):
pass
class ReproductionNumberMPlot(MultiOutputPlot):
def plot_outcome(self, outcome, color="k", label=None, **kwargs):
states = outcome.state_history
t = np.arange(len(states))
R = np.array([s.reproduction_number for s in states])
self.axes.plot(t, R, color=color, label=label)
def pack(self):
self.axes.set_title("Reproduction number")
self.axes.axhline(1, 0, 1, linestyle="--", alpha=.5)
self.axes.grid(True)
class InfectedMPlot(MultiOutputPlot):
def plot_outcome(self, outcome, color="k", label=None, **kwargs):
t = np.arange(len(outcome))
# rates
i = np.array([s.infected for s in outcome])
self.axes.plot(t, i, color=color, label=label)
def pack(self):
self.axes.set_title("Number of infections (E+I)")
self.axes.grid(True)
class InfectionNumberMPlot(MultiOutputPlot):
def plot_outcome(self, outcome, color="k", label=None, **kwargs):
states = outcome.state_history
N = outcome.population_size
t = np.arange(len(states))
R = np.array([s.n_infection for s in states]) / N
self.axes.plot(t, R, color=color)
def pack(self):
self.axes.set_title("Percentage of cumulative infection")
self.axes.grid(True)
class ComparatorDashboard(Dashboard):
def __call__(self, *outcomes):
if len(outcomes) > 3:
raise ValueError("At most 3 outcomtes for this dashboard")
all_axes = self.figure.subplots(3, 2, sharex=True)
# First Column
InfectedMPlot(all_axes[0, 0], self.convention)(*outcomes)
InfectionNumberMPlot(all_axes[1, 0], self.convention)(*outcomes)
ReproductionNumberMPlot(all_axes[2, 0], self.convention)(*outcomes)
for i, c, o in index_color_outcome(self.convention, outcomes):
title = o.name if o.name else "Outcome {:d}".format(i+1)
DescriptionPlot(all_axes[i, 1],
self.convention).plot_outcome(o, color=c, title=title)
for i in range(3-len(outcomes)+1, 3):
all_axes[i, 1].axis("off")
return self
|
#!/usr/bin/env python
#
# Copyright 2008-2012 NVIDIA Corporation
# Copyright 2009-2010 University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup
from distutils.errors import CompileError
from distutils.command.build_ext import build_ext as BuildExtCommand
from distutils.command.clean import clean as CleanCommand
from distutils.cmd import Command
import subprocess
# Call custom build routines to create Python extensions
class CopperheadBuildExt(Command):
user_options=[]
description = BuildExtCommand.description
def initialize_options(self):pass
def finalize_options(self):pass
def get_source_files(self): return []
def run(self):
try:
subprocess.check_call(['scons'], shell=True)
except subprocess.CalledProcessError:
raise CompileError("Error while building Python Extensions")
self.extensions=[]
# Call custom clean command to forward call to SCons
class CopperheadClean(CleanCommand):
def run(self):
CleanCommand.run(self)
try:
subprocess.check_call(['scons', '--remove'])
except subprocess.CalledProcessError:
raise CompileError("Error while cleaning Python Extensions")
##
# We need to create those now otherwise fist packaging fails. Not very clean...
import os, os.path
for d in [ 'runtime', 'compiler' ]:
try: os.makedirs(os.path.join('stage','copperhead',d))
except:pass
##
setup(name="copperhead",
version="0.2a1",
description="Data Parallel Python",
long_description="Copperhead is a Data Parallel Python dialect, with runtime code generation and execution for CUDA Graphics Processors.",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Code Generators',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X'],
zip_safe=False,
author="Bryan Catanzaro, Michael Garland",
author_email="bcatanzaro@nvidia.com, mgarland@nvidia.com",
license = "Apache 2.0",
package_dir = {'':'stage'}, # packages are under stage
packages=['copperhead', 'copperhead.runtime', 'copperhead.compiler'],
ext_modules=[('copperhead','')], # the name as no meaning as we override the build_ext command to call SCons
install_requires=["codepy>=2012.1.1"],
package_data={
'copperhead': ['prelude/*.hpp', 'prelude/cuda/*.h', 'prelude/cuda/thrust_wrappers/*.h'],
'copperhead.compiler' : ['backendcompiler.so',
'backendsyntax.so',
'backendtypes.so'],
'copperhead.runtime' : ['cudata.so', 'load.so', 'cuda_info.so',
'libcunp.*','libcopperhead.*']
},
url="http://code.google.com/p/copperhead",
cmdclass = { 'build_ext' : CopperheadBuildExt, 'clean': CopperheadClean },
test_suite = 'copperhead.tests.test_all',
)
|
from tests.func.testfunction import test_function
@test_function
def package_a_func1():
pass
|
import threading
import time
from src.optoforce.optoforce import *
class OptoforceThread(threading.Thread):
def __init__(self, optoforce, thread_rate=5000):
super(OptoforceThread, self).__init__()
assert isinstance(optoforce, OptoforceDriver), 'Must be using optoforcedriver'
self.opto = optoforce
self.thread_rate = thread_rate # (polling rate in Hz)
self.current_reading = None
self.running_lock = threading.Lock()
self.running = None
def run(self):
with self.running_lock:
self.running = True
while True:
with self.running_lock:
if not self.running:
break
start = time.time()
self.current_reading = self.opto.read().force[0]
end = time.time()
time_per_cycle = 1.0 / self.thread_rate # Number of seconds per cycle
remaining = start - end + time_per_cycle
if remaining > 0:
time.sleep(remaining)
def get_force(self):
return self.current_reading
def stop(self):
with self.running_lock:
self.running = False
if __name__ == "__main__":
test_opto = OptoforceDriver("/dev/ttyACM1", 's-ch/3-axis', [[1] * 3])
opto_thread = OptoforceThread(test_opto, 5000)
opto_thread.start()
while True:
print(opto_thread.get_force())
|
import checklist
import spacy
import itertools
import checklist.editor
#import checklist.text_generation
from checklist.test_types import MFT, INV, DIR
from checklist.expect import Expect
from checklist.test_suite import TestSuite
import numpy as np
import spacy
from checklist.perturb import Perturb
import collections
from collections import defaultdict, OrderedDict
import dataclasses
import logging
import os, math, re
import sys, copy, random
from dataclasses import dataclass, field
from typing import Callable, Dict, Optional
import numpy as np
import torch
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction, GlueDataset
from transformers import BertModel, BertConfig
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import (
HfArgumentParser,
TrainingArguments,
glue_compute_metrics,
glue_output_modes,
glue_tasks_num_labels,
set_seed,
)
from my_robustness import MyRandomTokenNoise
#from my_trainer import MyTrainer
from my_glue_dataset import MyGlueDataset
from my_modeling_roberta import MyRobertaForSequenceClassification, MyRobertaForNCESequenceClassification
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from my_utils import setLogger
logger = logging.getLogger()
editor = checklist.editor.Editor()
nlp = spacy.load('en_core_web_sm')
def wrap_apply_to_each(fn, both=False, *args, **kwargs):
def new_fn(qs, *args, **kwargs):
q1, q2 = qs
ret = []
fnq1 = fn(q1, *args, **kwargs)
fnq2 = fn(q2, *args, **kwargs)
if type(fnq1) != list:
fnq1 = [fnq1]
if type(fnq2) != list:
fnq2 = [fnq2]
ret.extend([(x, str(q2)) for x in fnq1])
ret.extend([(str(q1), x) for x in fnq2])
if both:
ret.extend([(x, x2) for x, x2 in itertools.product(fnq1, fnq2)])
return [x for x in ret if x[0] and x[1]]
return new_fn
def wrap_apply_to_both(fn, *args, **kwargs):
def new_fn(qs, *args, **kwargs):
q1, q2 = qs
ret = []
fnq1 = fn(q1, *args, **kwargs)
fnq2 = fn(q2, *args, **kwargs)
if type(fnq1) != list:
fnq1 = [fnq1]
if type(fnq2) != list:
fnq2 = [fnq2]
ret.extend([(x, x2) for x, x2 in itertools.product(fnq1, fnq2)])
return [x for x in ret if x[0] and x[1]]
return new_fn
def my_summary(self, types=None, capabilities=None, verbose = True, **kwargs):
"""Print stats and example failures for each test.
See summary in abstract_test.py
Parameters
----------
types : list(string)
If not None, will only show tests of these test types.
Options are MFT, INV, and DIR
capabilities : list(string)
If not None, will only show tests with these capabilities.
**kwargs : type
Will be passed as arguments to each test.summary()
"""
vals = collections.defaultdict(lambda: 100, {'MFT': 0, 'INV': 1, 'DIR': 2})
tests = self.tests.keys()
capability_order = ['Vocabulary', 'Taxonomy', 'Robustness', 'NER', 'Fairness', 'Temporal', 'Negation', 'Coref', 'SRL', 'Logic']
cap_order = lambda x:capability_order.index(x) if x in capability_order else 100
caps = sorted(set([x['capability'] for x in self.info.values()]), key=cap_order)
res_failrate = {}
for capability in caps:
if capabilities is not None and capability not in capabilities:
continue
if verbose:
print(capability)
print()
tests = [x for x in self.tests if self.info[x]['capability'] == capability]
for n in tests:
if types is not None and self.info[n]['type'] not in types:
continue
if verbose:
print(n)
if 'format_example_fn' not in kwargs:
kwargs['format_example_fn'] = self.info[n].get('format_example_fn', self.format_example_fn)
if 'print_fn' not in kwargs:
kwargs['print_fn'] = self.info[n].get('print_fn', self.print_fn)
if verbose:
self.tests[n].summary(**kwargs)
ss = self.tests[n].get_stats()
res_failrate[capability.upper()[:3] + '_' + n.replace(' ', '_')] = ss.fail_rate / 100.0
if verbose:
print()
print()
if verbose:
print()
print()
ll = []
for idx in res_failrate:
ll.append(res_failrate[idx])
res_failrate['AVG'] = np.mean(ll)
return res_failrate
def do_checklist_QQP(model, tokenizer, glue_dataset, all_args, only_construct_suite = False, given_suite = None, verbose = True):
logger.info('do_checklist_QQP')
model_args, data_args, training_args, my_args = all_args
if only_construct_suite == True or given_suite is None:
qs = []
labels = []
all_questions = set()
for x in open(data_args.data_dir + '/dev.tsv').readlines()[1:]:
try:
q1, q2, label = x.strip().split('\t')[3:]
except:
print('warning: discarded', x.strip())
continue
all_questions.add(q1)
all_questions.add(q2)
qs.append((q1, q2))
labels.append(label)
#if len(labels) > 1000:
# logger.info('DEBUG break')
# break
labels = np.array(labels).astype(int)
all_questions = list(all_questions)
parsed_questions = list(nlp.pipe(all_questions))
spacy_map = dict([(x, y) for x, y in zip(all_questions, parsed_questions)])
parsed_qs = [(spacy_map[q[0]], spacy_map[q[1]]) for q in qs]
logger.info('constructing test suite')
suite = TestSuite()
t = Perturb.perturb(qs, wrap_apply_to_each(Perturb.add_typos), nsamples= 2000)
test = INV(t.data, name='add one typo', capability='Robustness', description='')
# test.run(new_pp)
# test.summary(3)
suite.add(test, overwrite=True)
import itertools
def me_to_you(text):
t = re.sub(r'\bI\b', 'you', text)
t = re.sub(r'\bmy\b', 'your', t)
return re.sub(r'\bmine\b', 'yours', t)
def paraphrases(text):
ts = ['How do I ', 'How can I ', 'What is a good way to ', 'How should I ']
templates1 = ['How do I {x}?', 'How can I {x}?', 'What is a good way to {x}?', 'If I want to {x}, what should I do?',
'In order to {x}, what should I do?']
ts2 = ['Can you ', 'Can I ']#, 'Do I']
ts3 = ['Do I ']
templates2 = ['Can you {x}?', 'Can I {x}?', 'Do you think I can {x}?', 'Do you think you can {x}?',]
templates3 = ['Do I {x}?', 'Do you think I {x}?']
ret = []
for i, (tsz, templates) in enumerate(zip([ts, ts2, ts3], [templates1, templates2, templates3])):
for t in tsz:
if text.startswith(t):
x = text[len(t):].strip('?')
ts = editor.template(templates, x=x).data[0]
if i <= 1:
ts = ts + [me_to_you(x) for x in ts]
ret += ts
return ret
def paraphrases_product(text):
pr = paraphrases(text)
return list(itertools.product(pr, pr))
def paraphrase_each(pair):
p1 = paraphrases(pair[0])
p2 = paraphrases(pair[1])
return list(itertools.product(p1, p2))
t = Perturb.perturb(list(all_questions), paraphrases_product, nsamples= 2000, keep_original=False)
name = '(q, paraphrase(q))'
desc = 'For questions that start with "How do I X", "How can I X", etc'
test = DIR(t.data, expect=Expect.eq(1), agg_fn='all', name=name, description=desc, capability='Robustness')
suite.add(test)
t = Perturb.perturb(qs, paraphrase_each, nsamples= 2000, keep_original=True)
name = 'Product of paraphrases(q1) * paraphrases(q2)'
desc = 'For questions that start with "How do I X", "How can I X", etc'
test = INV(t.data, name=name, description=desc, capability='Robustness')
# test.run(new_pp)
# test.summary(n=5)
suite.add(test)
logger.info('constructing test suite complete')
else:
suite = given_suite
if only_construct_suite:
return suite
#from pattern.en import sentiment
def predict_proba(inputs):
features = glue_dataset.convert_checklist_input(inputs)
if verbose:
print('len(inputs)', len(inputs))
print('debug inputs[0] after conversion', tokenizer.decode(features[0].input_ids))
model.eval()
idx_now, bz, probs = 0, 8, []
while idx_now < len(features):
input_ids = torch.LongTensor([f.input_ids for f in features[idx_now:idx_now + bz]]).cuda()
attention_mask = torch.LongTensor([f.attention_mask for f in features[idx_now:idx_now + bz]]).cuda()
idx_now += bz
outputs = model(input_ids = input_ids, attention_mask = attention_mask)
logits = outputs[0]
prob = torch.softmax(logits, dim = -1).cpu().detach()
probs.append(prob)
"""
p1 = np.array([(sentiment(x)[0] + 1)/2. for x in inputs]).reshape(-1, 1)
p1 = np.random.uniform(size=p1.shape)
p0 = 1- p1
pp = np.hstack((p0, p1))
"""
pp = torch.cat(probs, dim = 0).numpy()
return pp
from checklist.pred_wrapper import PredictorWrapper
wrapped_pp = PredictorWrapper.wrap_softmax(predict_proba)
suite.run(wrapped_pp, overwrite=True)
#suite.summary()
res = my_summary(suite, verbose = verbose)
logger.info('do_checklist_QQP complete')
return res
def do_checklist_QNLI(model, tokenizer, glue_dataset, all_args, only_construct_suite = False, given_suite = None, verbose = True):
logger.info('do_checklist_QNLI')
model_args, data_args, training_args, my_args = all_args
if only_construct_suite == True or given_suite is None:
logger.info('label_list: %s', str(glue_dataset.label_list))
qs = []
labels = []
all_qs = set()
random_answers = set()
for x in open(data_args.data_dir + '/dev.tsv').readlines()[1:]:
try:
q1, q2, label = x.strip().split('\t')[1:]
except:
print('warning: discarded', x.strip())
continue
all_qs.add(q1); all_qs.add(q2)
random_answers.add(q2)
qs.append((q1, q2))
assert(label in ['entailment', 'not_entailment'])
labels.append(0 if label == 'entailment' else 1)
#if len(labels) > 1000:
# logger.info('DEBUG break')
# break
labels = np.array(labels).astype(int)
all_qs = list(all_qs)
random_answers = list(random_answers)
parsed_qs = list(nlp.pipe(all_qs))
spacy_map = dict([(x, y) for x, y in zip(all_qs, parsed_qs)])
processed_qs = [(spacy_map[q[0]], spacy_map[q[1]]) for q in qs]
logger.info('constructing test suite')
suite = TestSuite()
def question_typo(x):
return (Perturb.add_typos(x[0]), Perturb.add_typos(x[1]))
t = Perturb.perturb(qs, question_typo, nsamples=500)
test = INV(t.data, name='both typo', capability='Robustness', description='')
# test.run(new_pp)
# test.summary(3)
suite.add(test, overwrite=True)
def add_random_answer(x):
random_s = np.random.choice(random_answers)
while random_s in x[1]:
random_s = np.random.choice(random_answers)
return (x[0], x[1].strip() + ' ' + random_s)
from checklist.expect import Expect
monotonic_decreasing = Expect.monotonic(label=0, increasing=False, tolerance=0.1)
t = Perturb.perturb(qs, add_random_answer, nsamples = 1000)
test = DIR(**t, expect=monotonic_decreasing)
suite.add(test, name='add random answer', capability='Robustness')
def change_thing(change_fn):
def change_both(cq, **kwargs):
context, question = cq
a = change_fn(context, meta=True)
if not a:
return None
changed, meta = a
ret = []
for c, m in zip(changed, meta):
new_q = re.sub(r'\b%s\b' % re.escape(m[0]), m[1], question.text)
ret.append((c, new_q))
return ret, meta
return change_both
t = Perturb.perturb(processed_qs, change_thing(Perturb.change_names), nsamples=1000, meta=True)
test = INV(**t, name='Change name everywhere', capability='Robustness', description='')
suite.add(test)
t = Perturb.perturb(processed_qs, change_thing(Perturb.change_location), nsamples=1000, meta=True)
test = INV(**t, name='Change location everywhere', capability='Robustness', description='')
suite.add(test)
logger.info('constructing test suite complete')
else:
suite = given_suite
if only_construct_suite == True:
return suite
#from pattern.en import sentiment
def predict_proba(inputs):
features = glue_dataset.convert_checklist_input(inputs)
if verbose == True:
print('len(inputs)', len(inputs))
print('debug inputs[0] after conversion', tokenizer.decode(features[0].input_ids))
model.eval()
idx_now, bz, probs = 0, 8, []
while idx_now < len(features):
input_ids = torch.LongTensor([f.input_ids for f in features[idx_now:idx_now + bz]]).cuda()
attention_mask = torch.LongTensor([f.attention_mask for f in features[idx_now:idx_now + bz]]).cuda()
idx_now += bz
outputs = model(input_ids = input_ids, attention_mask = attention_mask)
logits = outputs[0]
prob = torch.softmax(logits, dim = -1).cpu().detach()
probs.append(prob)
"""
p1 = np.array([(sentiment(x)[0] + 1)/2. for x in inputs]).reshape(-1, 1)
p1 = np.random.uniform(size=p1.shape)
p0 = 1- p1
pp = np.hstack((p0, p1))
"""
pp = torch.cat(probs, dim = 0).numpy()
return pp
from checklist.pred_wrapper import PredictorWrapper
wrapped_pp = PredictorWrapper.wrap_softmax(predict_proba)
suite.run(wrapped_pp, overwrite=True)
#suite.summary()
res = my_summary(suite, verbose = verbose)
logger.info('do_checklist_QNLI complete')
return res
def do_checklist_SST2(model, tokenizer, glue_dataset, all_args, only_construct_suite = False, given_suite = None, verbose = True):
logger.info('do_checklist_SST2')
model_args, data_args, training_args, my_args = all_args
if only_construct_suite == True or given_suite == None:
logger.info('label_list: %s', str(glue_dataset.label_list))
qs = []
labels = []
all_qs = set()
random_qs = set()
for x in open(data_args.data_dir + '/dev.tsv').readlines()[1:]:
try:
q1, label = x.strip().split('\t')
except:
print('warning: discarded', x.strip())
continue
all_qs.add(q1);
random_qs.add(q1)
qs.append((q1,))
assert(label in ['0', '1'])
labels.append(int(label))
#if len(labels) > 1000:
# logger.info('DEBUG break')
# break
labels = np.array(labels).astype(int)
all_qs = list(all_qs)
random_qs = list(random_qs)
parsed_qs = list(nlp.pipe(all_qs))
spacy_map = dict([(x, y) for x, y in zip(all_qs, parsed_qs)])
processed_qs = [(spacy_map[q[0]]) for q in qs]
logger.info('constructing test suite')
suite = TestSuite()
def question_typo(x):
return (Perturb.add_typos(x[0]),)
t = Perturb.perturb(qs, question_typo, nsamples=500)
test = INV(t.data, name='typo', capability='Robustness', description='')
# test.run(new_pp)
# test.summary(3)
suite.add(test, overwrite=True)
""" #did not work much
def word_repeat(x):
tt = x[0].split()
k = random.randint(0, len(tt) - 1)
tt = tt[:k] + [tt[k]] + tt[k:]
return (' '.join(tt),)
t = Perturb.perturb(qs, word_repeat, nsamples=500)
test = INV(t.data, name='word repeat', capability='Robustness', description='')
# test.run(new_pp)
# test.summary(3)
suite.add(test, overwrite=True)
"""
pos_sentences = ["It 's hard to describe how much i enjoyed it .", 'I really want to watch it again .', 'I will argue with anyone who hates it .', 'How can anyone resist it .', 'I find it hard to describe how good it is .']
def add_random_pos(x):
random_s = np.random.choice(pos_sentences)
return (x[0] + ' ' + random_s, )
from checklist.expect import Expect
l0_monotonic_increasing = Expect.monotonic(label=0, increasing=True, tolerance=0.1)
l1_monotonic_increasing = Expect.monotonic(label=1, increasing=True, tolerance=0.1)
t = Perturb.perturb(qs, add_random_pos, nsamples = 1000)
test = DIR(**t, expect=l1_monotonic_increasing)
suite.add(test, name='add random positive', capability='Robustness')
editor = checklist.editor.Editor()
movie_noun = ['movie', 'film', 'shoot', 'experience', 'video']
editor.add_lexicon('movie_noun', movie_noun)
pos_adj = ['good', 'great', 'excellent', 'amazing', 'extraordinary', 'beautiful', 'fantastic', 'nice', 'incredible', 'exceptional', 'awesome', 'perfect', 'fun', 'happy', 'adorable', 'brilliant', 'exciting', 'sweet', 'wonderful']
neg_adj = ['awful', 'bad', 'horrible', 'weird', 'rough', 'lousy', 'unhappy', 'average', 'difficult', 'poor', 'sad', 'frustrating', 'hard', 'lame', 'nasty', 'annoying', 'boring', 'creepy', 'dreadful', 'ridiculous', 'terrible', 'ugly', 'unpleasant']
neutral_adj = ['American', 'international', 'commercial', 'British', 'private', 'Italian', 'Indian', 'Australian', 'Israeli', ]
editor.add_lexicon('pos_adj', pos_adj, overwrite=True)
editor.add_lexicon('neg_adj', neg_adj, overwrite=True )
editor.add_lexicon('neutral_adj', neutral_adj, overwrite=True)
pos_verb_present = ['like', 'enjoy', 'appreciate', 'love', 'recommend', 'admire', 'value', 'welcome']
neg_verb_present = ['hate', 'dislike', 'regret', 'abhor', 'dread', 'despise' ]
neutral_verb_present = ['see', 'find']
pos_verb_past = ['liked', 'enjoyed', 'appreciated', 'loved', 'admired', 'valued', 'welcomed']
neg_verb_past = ['hated', 'disliked', 'regretted', 'abhorred', 'dreaded', 'despised']
neutral_verb_past = ['saw', 'found']
editor.add_lexicon('pos_verb_present', pos_verb_present, overwrite=True)
editor.add_lexicon('neg_verb_present', neg_verb_present, overwrite=True)
editor.add_lexicon('neutral_verb_present', neutral_verb_present, overwrite=True)
editor.add_lexicon('pos_verb_past', pos_verb_past, overwrite=True)
editor.add_lexicon('neg_verb_past', neg_verb_past, overwrite=True)
editor.add_lexicon('neutral_verb_past', neutral_verb_past, overwrite=True)
editor.add_lexicon('pos_verb', pos_verb_present+ pos_verb_past, overwrite=True)
editor.add_lexicon('neg_verb', neg_verb_present + neg_verb_past, overwrite=True)
editor.add_lexicon('neutral_verb', neutral_verb_present + neutral_verb_past, overwrite=True)
intens_adj = ['very', 'really', 'absolutely', 'truly', 'extremely', 'quite', 'incredibly', 'amazingly', 'especially', 'exceptionally', 'unbelievably', 'utterly', 'exceedingly', 'rather', 'totally', 'particularly']
intens_verb = [ 'really', 'absolutely', 'truly', 'extremely', 'especially', 'utterly', 'totally', 'particularly', 'highly', 'definitely', 'certainly', 'genuinely', 'honestly', 'strongly', 'sure', 'sincerely']
t = editor.template('{it} {movie_noun} {nt} {pos_adj}.', it=['This', 'That', 'The'], nt=['is not', 'isn\'t'], save=True)
t += editor.template('{it} {benot} {a:pos_adj} {movie_noun}.', it=['It', 'This', 'That'], benot=['is not', 'isn\'t', 'was not', 'wasn\'t'], save=True)
neg = ['I can\'t say I', 'I don\'t', 'I would never say I', 'I don\'t think I', 'I didn\'t' ]
t += editor.template('{neg} {pos_verb_present} {the} {movie_noun}.', neg=neg, the=['this', 'that', 'the'], save=True)
t += editor.template('No one {pos_verb_present}s {the} {movie_noun}.', neg=neg, the=['this', 'that', 'the'], save=True)
test = MFT(t.data, labels=0, templates=t.templates)
suite.add(test, 'simple negations: negative', 'Negation', 'Very simple negations of positive statements')
t = editor.template('{it} {movie_noun} {nt} {neg_adj}.', it=['This', 'That', 'The'], nt=['is not', 'isn\'t'], save=True)
t += editor.template('{it} {benot} {a:neg_adj} {movie_noun}.', it=['It', 'This', 'That'], benot=['is not', 'isn\'t', 'was not', 'wasn\'t'], save=True)
neg = ['I can\'t say I', 'I don\'t', 'I would never say I', 'I don\'t think I', 'I didn\'t' ]
t += editor.template('{neg} {neg_verb_present} {the} {movie_noun}.', neg=neg, the=['this', 'that', 'the'], save=True)
t += editor.template('No one {neg_verb_present}s {the} {movie_noun}.', neg=neg, the=['this', 'that', 'the'], save=True)
# expectation: prediction is not 0
is_not_0 = lambda x, pred, *args: pred != 0
test = MFT(t.data, Expect.single(is_not_0), templates=t.templates)
suite.add(test, 'simple negations: not negative', 'Negation', 'Very simple negations of negative statements. Expectation requires prediction to NOT be negative (i.e. neutral or positive)')
t = editor.template('I thought {it} {movie_noun} would be {pos_adj}, but it {neg}.', neg=['was not', 'wasn\'t'], it=['this', 'that', 'the'], nt=['is not', 'isn\'t'], save=True)
t += editor.template('I thought I would {pos_verb_present} {the} {movie_noun}, but I {neg}.', neg=['did not', 'didn\'t'], the=['this', 'that', 'the'], save=True)
test = MFT(t.data, labels=0, templates=t.templates)
suite.add(test, 'simple negations: I thought x was positive, but it was not (should be negative)', 'Negation', '', overwrite=True)
"""did not work much
neg_sentences = ['No one will like it .', 'I will not watch it again .', 'I tell my friends not to watch it .', 'Really a waste of time .']
def add_random_neg(x):
random_s = np.random.choice(neg_sentences)
return (x[0] + ' ' + random_s, )
from checklist.expect import Expect
t = Perturb.perturb(qs, add_random_neg, nsamples = 1000)
test = DIR(**t, expect=l0_monotonic_increasing)
suite.add(test, name='add random negative', capability='Robustness')
"""
"""
def change_thing(change_fn):
def change_both(cq, **kwargs):
context, question = cq
a = change_fn(context, meta=True)
if not a:
return None
changed, meta = a
ret = []
for c, m in zip(changed, meta):
new_q = re.sub(r'\b%s\b' % re.escape(m[0]), m[1], question.text)
ret.append((c, new_q))
return ret, meta
return change_both
t = Perturb.perturb(processed_qs, change_thing(Perturb.change_names), nsamples=1000, meta=True)
test = INV(**t, name='Change name everywhere', capability='Robustness', description='')
suite.add(test)
t = Perturb.perturb(processed_qs, change_thing(Perturb.change_location), nsamples=1000, meta=True)
test = INV(**t, name='Change location everywhere', capability='Robustness', description='')
suite.add(test)
"""
logger.info('constructing test suite complete')
if only_construct_suite == True:
return suite
else:
suite = given_suite
#from pattern.en import sentiment
def predict_proba(inputs):
features = glue_dataset.convert_checklist_input(inputs)
if verbose == True:
print('len(inputs)', len(inputs))
print('debug inputs[0] after conversion', tokenizer.decode(features[0].input_ids))
model.eval()
idx_now, bz, probs = 0, 8, []
while idx_now < len(features):
input_ids = torch.LongTensor([f.input_ids for f in features[idx_now:idx_now + bz]]).cuda()
attention_mask = torch.LongTensor([f.attention_mask for f in features[idx_now:idx_now + bz]]).cuda()
idx_now += bz
outputs = model(input_ids = input_ids, attention_mask = attention_mask)
logits = outputs[0]
prob = torch.softmax(logits, dim = -1).cpu().detach()
probs.append(prob)
"""
p1 = np.array([(sentiment(x)[0] + 1)/2. for x in inputs]).reshape(-1, 1)
p1 = np.random.uniform(size=p1.shape)
p0 = 1- p1
pp = np.hstack((p0, p1))
"""
pp = torch.cat(probs, dim = 0).numpy()
return pp
from checklist.pred_wrapper import PredictorWrapper
wrapped_pp = PredictorWrapper.wrap_softmax(predict_proba)
suite.run(wrapped_pp, overwrite=True)
#suite.summary()
res = my_summary(suite, verbose = verbose)
logger.info('do_checklist_SST2 complete')
return res
def get_funct(task_name):
cfn = None
if task_name.lower() == 'qqp':
cfn = do_checklist_QQP
if task_name.lower() == 'qnli':
cfn = do_checklist_QNLI
if task_name.lower() == 'sst-2':
cfn = do_checklist_SST2
if cfn == None:
cfn = None
return cfn
def construct_checklist_suite(model, tokenizer, eval_dataset, all_args):
model_args, data_args, training_args, my_args = all_args
cfn = get_funct(data_args.task_name)
if cfn is None:
return None
suite = cfn(model, tokenizer, eval_dataset, all_args, only_construct_suite = True)
return suite
def run_checklist_suite(model, tokenizer, eval_dataset, all_args, given_suite = None, verbose = True):
model_args, data_args, training_args, my_args = all_args
cfn = get_funct(data_args.task_name)
if given_suite is not None:
res = cfn(model, tokenizer, eval_dataset, all_args, only_construct_suite = False, given_suite = given_suite, verbose = verbose)
else:
res = {'AVG': 0}
return res
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
import brewer2mpl
def _number_to_palette(ctype, n):
n -= 1
palettes = sorted(brewer2mpl.COLOR_MAPS[ctype].keys())
if n < len(palettes):
return palettes[n]
def _handle_shorthand(text):
abbrevs = {
"seq": "Sequential",
"qual": "Qualitative",
"div": "Diverging"
}
text = abbrevs.get(text, text)
text = text.title()
return text
class scale_color_brewer(scale):
"""
Use ColorBrewer (http://colorbrewer2.org/) style colors
Parameters
----------
type: string
One of seq (sequential), div (diverging) or qual (qualitative)
palette: string
If a string, will use that named palette. If a number, will index into
the list of palettes of appropriate type
Examples
--------
>>> from ggplot import *
>>> p = ggplot(aes(x='carat', y='price', colour='clarity'), data=diamonds)
>>> p += geom_point()
>>> print(p + scale_color_brewer(palette=4))
>>> print(p + scale_color_brewer(type='diverging'))
>>> print(p + scale_color_brewer(type='div'))
>>> print(p + scale_color_brewer(type='seq'))
>>> print(p + scale_color_brewer(type='seq', palette='Blues'))
"""
VALID_SCALES = ['type', 'palette']
def __radd__(self, gg):
# gg = deepcopy(gg)
if self.type:
ctype = self.type
else:
ctype = "Sequential"
ctype = _handle_shorthand(ctype)
if self.palette:
palette = self.palette
else:
palette = _number_to_palette(ctype, 1)
if isinstance(palette, int):
palette = _number_to_palette(ctype, palette)
# color brewer requires a minimum of 3 colors in a palette
try:
color_col = gg._aes.data.get('color', gg._aes.data.get('fill'))
n_colors = max(gg.data[color_col].nunique(), 3)
except:
# If we are neither using 'color' nor 'fill' then assume there is
# only one color used
n_colors = 3
try:
bmap = brewer2mpl.get_map(palette, ctype, n_colors)
except ValueError as e:
if not str(e).startswith('Invalid number for map type'):
raise e
palettes = brewer2mpl.COLOR_MAPS[_handle_shorthand(ctype).lower().capitalize()][palette]
n_colors = int(max(str(k) for k in palettes))
bmap = brewer2mpl.get_map(palette, ctype, n_colors)
gg.manual_color_list = bmap.hex_colors
return gg
|
from PySide import QtCore, QtGui
from networkident_ui import Ui_NetworkIdent
import os, sqlite3
from datetime import datetime
PLUGIN_NAME = "Network Identification"
import plugins_utils
# retrieve modules from ipba root directory
import plistutils
class NetworkIdentWidget(QtGui.QWidget):
def __init__(self, cursor, path, daemon = False):
QtGui.QWidget.__init__(self)
self.ui = Ui_NetworkIdent()
self.ui.setupUi(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.cursor = cursor
self.backup_path = path
self.filename = os.path.join(self.backup_path, plugins_utils.realFileName(self.cursor, filename="com.apple.network.identification.plist", domaintype="SystemPreferencesDomain"))
if (not os.path.isfile(self.filename)):
raise Exception("Network Identification file not found: \"%s\""%self.filename)
QtCore.QObject.connect(self.ui.networksTree, QtCore.SIGNAL("itemSelectionChanged()"), self.onTreeClick)
self.ui.networksTree.setColumnHidden(0,True)
if (daemon == False):
self.populateUI()
def populateUI(self):
signatures = plistutils.readPlist(self.filename)['Signatures']
index = 0
for element in signatures:
ident = element['Identifier']
identParts = ident.split(";")
if (len(identParts) == 1):
ident = identParts[0]
else:
ident = identParts[1].split("=")[1]
timestamp = element['Timestamp']
timestamp = timestamp.strftime('%b %d %Y %H:%M UTC')
newElement = QtGui.QTreeWidgetItem(None)
newElement.setText(0, str(index))
newElement.setText(1, ident)
newElement.setText(2, str(timestamp))
self.ui.networksTree.addTopLevelItem(newElement)
index += 1
def onTreeClick(self):
# retrieving selected network
currentSelectedElement = self.ui.networksTree.currentItem()
if (currentSelectedElement): pass
else: return
signatures = plistutils.readPlist(self.filename)['Signatures']
currentNetworkIndex = int(currentSelectedElement.text(0))
currentNetworkServices = signatures[currentNetworkIndex]['Services']
networkDescr = signatures[currentNetworkIndex]['Identifier']
networkDescrParts = networkDescr.split(";")
networkDescr = "\n".join(networkDescrParts)
self.ui.networkLabel.setText(networkDescr)
self.ui.servicesTree.clear()
for service in currentNetworkServices:
serviceNode = QtGui.QTreeWidgetItem(None)
serviceNode.setText(0, "service")
self.ui.servicesTree.addTopLevelItem(serviceNode)
serviceNode.setExpanded(True)
for serviceKey in service.keys():
subserviceNode = QtGui.QTreeWidgetItem(serviceNode)
subserviceNode.setText(0, serviceKey)
self.ui.servicesTree.addTopLevelItem(subserviceNode)
subserviceNode.setExpanded(True)
if (serviceKey == "ServiceID"):
subserviceNode.setText(1, service['ServiceID'])
continue
for element in service[serviceKey].keys():
elementNode = QtGui.QTreeWidgetItem(subserviceNode)
elementNode.setText(0, element)
text = service[serviceKey][element]
if (type(text) == type([1,2])):
text = ", ".join(text)
elementNode.setText(1, text)
self.ui.servicesTree.addTopLevelItem(elementNode)
elementNode.setExpanded(True)
def main(cursor, path):
return NetworkIdentWidget(cursor, path)
|
"""Data descriptors that provide special behaviors when attributes are
accessed."""
from enum import auto
from enum import Enum
from .exceptions import MarshallerBaseException
class MarshallingAttributeAccessError(MarshallerBaseException):
"""Generic error that arises from while accessing an attribute."""
class Placeholders(Enum):
"""Accessor placeholders.
Special behaviors can occur when the descriptor returns a value in
the Placeholder class. For example, when the `Placeholders.CALLBACK`
value is returned and cache=True, this indicates that the callback
function needs to be called and the result cached.
"""
DATA = auto() #: DATA accessor holder.
MARSHALL = auto() #: MARSHALL accessor holder.
CALLBACK = auto() #: CALLBACK accessor holder.
DEFAULT = auto() #: DEFAULT accessor holder.
class DataAccessor:
"""A descriptor that will dynamically access an instance's dictionary named
by the `accessor` key. If the key is not in the dictionary or the value
received from the dictionary is a :class:`Placeholders.DATA` enumerator, an
AttributeError is raised.
**Usage**:
This may be used to assign dynamic properties to a class method as in
the example below, or subclasses of the DataAccessor can be created
to create 'hooks' when the descriptor is accessed, set, or delete.
.. code-block::
model_class.x = DataAccessor('x')
model_class.y = DataAccessor('y')
instance = model_class()
instance.data # {'x': Placeholders.DATA, 'y': Placeholders.DATA}
# accessing the default values
try:
instance.x
except AttributeError:
print("x being tracked but is not set")
# setting and accessing a value
instance.x = 5
instance.data = {'x': 5, 'y': Placeholders.DATA}
instance.x # returns 5
# raising AttributeError
try:
instance.y
except AttributeError:
print("y is not set")
# setting the value
instance.y = 10
assert instance.data == {'x': 5, 'y': 10}
assert instance.y == 10
# deleting the value
del instance.y
assert instance.data == {'x': 5, 'y': Placeholders.DATA}
try:
instance.y
except AttributeError:
print("y is not set")
"""
__slots__ = ["name", "accessor", "default"]
HOLDER = Placeholders.DATA
def __init__(self, name, accessor=None, default=HOLDER):
self.name = name
self.accessor = accessor
if default is Placeholders.DEFAULT:
default = self.HOLDER
self.default = default
if self.name == self.accessor:
raise MarshallingAttributeAccessError(
"Descriptor name '{}' cannot be accessor name '{}'".format(
self.name, self.accessor
)
)
def get_val(self, obj):
access_data = getattr(obj, self.accessor)
return access_data.get(self.name, self.default)
def __get__(self, obj, objtype):
val = self.get_val(obj)
if val is self.HOLDER:
raise AttributeError(
"type object '{}' does not have attribute '{}'".format(
obj.__class__.__name__, self.name
)
)
else:
return val
def __set__(self, obj, val):
getattr(obj, self.accessor)[self.name] = val
def __delete__(self, obj):
getattr(obj, self.accessor)[self.name] = self.HOLDER
class MarshallingAccessor(DataAccessor):
"""A generic Marshalling descriptor."""
__slots__ = ["name", "accessor", "field", "deserialized_accessor", "default"]
HOLDER = Placeholders.MARSHALL
def __init__(self, name, field, accessor, deserialized_accessor, default=HOLDER):
super().__init__(name, accessor, default=default)
self.deserialized_accessor = deserialized_accessor
if self.accessor == self.deserialized_accessor:
raise MarshallingAttributeAccessError(
"Descriptor accessor '{}' cannot be deserialized accessor '{}'".format(
self.accessor, self.deserialized_accessor
)
)
self.field = field
def get_val(self, obj):
try:
return getattr(obj, self.deserialized_accessor).get(self.name, self.default)
except AttributeError as e:
raise e
except Exception as e:
raise MarshallingAttributeAccessError(
"Error retrieving attribute '{}' from '{}' because:\n".format(
self.name, obj.__class__
)
+ "{}: ".format(e.__class__.__name__)
+ str(e)
) from e
def __get__(self, obj, objtype):
val = self.get_val(obj)
if val is self.HOLDER:
val = getattr(obj, self.accessor).get(self.name, self.HOLDER)
if val is self.HOLDER:
raise AttributeError(
"type object '{}' does not have attribute '{}'".format(
obj.__class__.__name__, self.name
)
)
else:
return self.field.deserialize(obj, val)
return val
def __set__(self, obj, val):
try:
deserialized = self.field.deserialize(obj, val)
serialized = self.field.serialize(obj, deserialized)
getattr(obj, self.deserialized_accessor)[self.name] = deserialized
getattr(obj, self.accessor)[self.name] = serialized
except Exception as e:
from traceback import format_tb
print("__set__ traceback:")
print("\n".join(format_tb(e.__traceback__)))
raise MarshallingAttributeAccessError(
"can't set attribute '{}' for '{}' to '{}' due to:\n{}. "
"See the traceback printed above".format(self.name, obj, val, str(e))
) from e
def __delete__(self, obj):
del getattr(obj, self.accessor)[self.name]
del getattr(obj, self.deserialized_accessor)[self.name]
class CallbackAccessor(MarshallingAccessor):
"""A descriptor that uses a registered :class:`marshaller.fields.Callback`
to dynamically access the value of a callback by sending the instance to
the callback field's `fullfill` method if the descriptor is not yet set.
If the descriptor is already set, return that value. Deleting the
descriptor sets the value to the default
:class:`Placeholders.CALLBACK`, which will attempt to `fullfill` the
descriptor once accessed.
"""
__slots__ = ["name", "accessor", "field", "deserialized_accessor", "default"]
HOLDER = Placeholders.CALLBACK
def __init__(self, name, field, accessor, deserialized_accessor, default=HOLDER):
super().__init__(name, field, accessor, deserialized_accessor, default=default)
def __get__(self, obj, objtype):
val = self.get_val(obj)
if val is self.HOLDER:
val = self.field.fullfill(obj)
return val
def __set__(self, obj, val):
getattr(obj, self.deserialized_accessor)[self.name] = val
getattr(obj, self.accessor)[self.name] = self.field.serialize(obj, val)
class RelationshipAccessor(CallbackAccessor):
"""The descriptor for a :class:`pydent.marshaller.fields.Relationship`
field."""
def __set__(self, obj, val):
deserialized = self.field.deserialize(obj, val)
serialized = self.field.serialize(obj, deserialized)
getattr(obj, self.deserialized_accessor)[self.name] = deserialized
getattr(obj, self.accessor)[self.name] = serialized
|
# taken from here:
# https://stackoverflow.com/a/42580137/4698227
import sys
def is_venv():
"""
Checks whether we are in a virtual environment or not.
:return: whether within a virtual environment
:rtype: bool
"""
return (hasattr(sys, 'real_prefix')
or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix))
|
# -*- coding: utf-8 -*-
import argparse
import json
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from keras import backend as K
from axelerate.networks.yolo.frontend import create_yolo
from axelerate.networks.yolo.backend.utils.box import draw_scaled_boxes
from axelerate.networks.yolo.backend.utils.annotation import parse_annotation
from axelerate.networks.yolo.backend.utils.eval.fscore import count_true_positives, calc_score
from axelerate.networks.classifier.frontend_classifier import get_labels,create_classifier
import os
import glob
import tensorflow as tf
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.clear_session()
DEFAULT_THRESHOLD = 0.3
argparser = argparse.ArgumentParser(
description='Run inference script')
argparser.add_argument(
'-c',
'--conf',
help='path to configuration file')
argparser.add_argument(
'-t',
'--threshold',
default=DEFAULT_THRESHOLD,
help='detection threshold')
argparser.add_argument(
'-w',
'--weights',
help='trained weight files')
def show_image(filename):
image = mpimg.imread(filename)
plt.figure()
plt.imshow(image)
plt.show(block=False)
plt.pause(1)
plt.close()
print(filename)
def prepare_image(img_path, network):
orig_image = cv2.imread(img_path)
input_image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB)
input_image = cv2.resize(input_image, (network._input_size[1],network._input_size[0]))
input_image = network._norm(input_image)
input_image = np.expand_dims(input_image, 0)
return orig_image, input_image
def setup_inference(config,weights,threshold=0.3,path=None, dataset="testing"):
#added for compatibility with < 0.5.7 versions
try:
input_size = config['model']['input_size'][:]
except:
input_size = [config['model']['input_size'],config['model']['input_size']]
"""make directory to save inference results """
dirname = os.path.join(os.path.dirname(weights),'Inference_results')
if os.path.isdir(dirname):
print("Folder {} is already exists. Image files in directory might be overwritten".format(dirname))
else:
print("Folder {} is created.".format(dirname))
os.makedirs(dirname)
if config['model']['type']=='Detector':
# 2. create yolo instance & predict
yolo = create_yolo(config['model']['architecture'],
config['model']['labels'],
input_size,
config['model']['anchors'])
yolo.load_weights(weights)
# 3. read image
# 3. read image
if dataset == 'testing':
print("the dataset used for testing is:", config['test']['test_image_folder'], " the annotations are: ", config['test']['test_label_folder'])
# added testing directly in configuration
annotations = parse_annotation(config['test']['test_label_folder'],
config['test']['test_image_folder'],
config['model']['labels'],
is_only_detect=config['train']['is_only_detect'])
else:
print("the dataset used for testing is:", config['train']['valid_image_folder'], " the annotations are: ", config['train']['valid_annot_folder'])
annotations = parse_annotation(config['train']['valid_annot_folder'],
config['train']['valid_image_folder'],
config['model']['labels'],
is_only_detect=config['train']['is_only_detect'])
n_true_positives = 0
n_truth = 0
n_pred = 0
inference_time = []
for i in range(len(annotations)):
img_path = annotations.fname(i)
img_fname = os.path.basename(img_path)
true_boxes = annotations.boxes(i)
true_labels = annotations.code_labels(i)
orig_image, input_image = prepare_image(img_path, yolo)
height, width = orig_image.shape[:2]
prediction_time, boxes, probs = yolo.predict(input_image, height, width, float(threshold))
inference_time.append(prediction_time)
labels = np.argmax(probs, axis=1) if len(probs) > 0 else []
# 4. save detection result
orig_image = draw_scaled_boxes(orig_image, boxes, probs, config['model']['labels'])
output_path = os.path.join(dirname, os.path.split(img_fname)[-1])
cv2.imwrite(output_path, orig_image)
print("{}-boxes are detected. {} saved.".format(len(boxes), output_path))
show_image(output_path)
n_true_positives += count_true_positives(boxes, true_boxes, labels, true_labels)
n_truth += len(true_boxes)
n_pred += len(boxes)
print(calc_score(n_true_positives, n_truth, n_pred))
if len(inference_time)>1:
print("Average prediction time:{} ms".format(sum(inference_time[1:])/len(inference_time[1:])))
if __name__ == '__main__':
# 1. extract arguments
args = argparser.parse_args()
with open(args.conf) as config_buffer:
config = json.loads(config_buffer.read())
setup_inference(config,args.weights,args.threshold)
|
"""Test for our hash data structure."""
import pytest
import hash
PARAMS_TABLE = [
('donuts', 6),
('hashy', 'no'),
('firefly', 'canceled'),
('choochoo', [5, 6, 4, 3]),
("mylittlepony", {"isbrony": "ispony"})
]
PARAMS_TABLE_TYPE_ERRORS = [
(5, ),
([], ),
(True, ),
({}, ),
((), )
]
@pytest.mark.parametrize("key, value", PARAMS_TABLE)
def test_fnv_key_value(key, value):
"""Test if insertion works correctly with the fnv hash."""
test_table = hash.HashTable(1000)
test_table.set(key, value)
assert test_table.get(key) == value
@pytest.mark.parametrize("key, value", PARAMS_TABLE)
def test_additive_key_value(key, value):
"""Test if insertion works correctly with the additive hash."""
test_table = hash.HashTable(1000, 'add')
test_table.set(key, value)
assert test_table.get(key) == value
def test_additive_get_not__there():
"""Test if get returns none with additive hash."""
test_table = hash.HashTable(1000, 'add')
assert test_table.get('test') is None
def test_fnv_get_not__there():
"""Test if get returns none with fnv hash."""
test_table = hash.HashTable(1000)
assert test_table.get('test') is None
def test_additive_duplicate_value():
"""Test if values with duplicate keys are not stored with additive hash."""
test_table = hash.HashTable(1000, 'add')
test_table.set("key", "value")
test_table.set("key", "value2")
assert test_table.get("key") == "value"
def test_fnv_duplicate_value():
"""Test if values with duplicate keys are not stored with fnv hash."""
test_table = hash.HashTable(1000)
test_table.set("key", "value")
test_table.set("key", "value2")
assert test_table.get("key") == "value"
@pytest.mark.parametrize("value", PARAMS_TABLE_TYPE_ERRORS)
def test_additive_set_type_error_without_string(value):
"""Test function raises an error when a non-string is inserted in additive hash."""
test_table = hash.HashTable(1000, 'add')
with pytest.raises(TypeError):
test_table.set(value, "unicorns")
@pytest.mark.parametrize("value", PARAMS_TABLE_TYPE_ERRORS)
def test_fnv_set_type_error_without_string(value):
"""Test function raises an error when a non-string is inserted in fnv hash."""
test_table = hash.HashTable(1000)
with pytest.raises(TypeError):
test_table.set(value, "pony")
@pytest.mark.parametrize("value", PARAMS_TABLE_TYPE_ERRORS)
def test_additive_get_type_error_without_string(value):
"""Test function raises an error when a non-string is searched for in additive hash."""
test_table = hash.HashTable(1000, 'add')
with pytest.raises(TypeError):
test_table.get(value, "unicorns")
@pytest.mark.parametrize("value", PARAMS_TABLE_TYPE_ERRORS)
def test_fnv_get_type_error_without_string(value):
"""Test function raises an error when a non-string is searched for in fnv hash."""
test_table = hash.HashTable(1000)
with pytest.raises(TypeError):
test_table.get(value, "pony")
def test_with_huge_database_fnv():
"""Import a gigantic dictionary and asserts that it works properly in fnv hash."""
test_table = hash.HashTable(1000)
with open('/usr/share/dict/words') as dictionary:
data = dictionary.read()
data = data.split('\n')
if len(data) > 100000:
data = data[:100000]
for i in range(len(data)):
test_table.set(data[i], data[i])
assert test_table.get('dinosaur') == 'dinosaur'
assert test_table.get("qwertyuiop") is None
def test_with_huge_database_additive():
"""Import a gigantic dictionary and asserts that it works properly in additive hash."""
test_table = hash.HashTable(1000, 'add')
with open('/usr/share/dict/words') as dictionary:
data = dictionary.read()
data = data.split('\n')
if len(data) > 100000:
data = data[:100000]
for i in range(len(data)):
test_table.set(data[i], data[i])
assert test_table.get('dinosaur') == 'dinosaur'
assert test_table.get("qwertyuiop") is None
|
import argparse
from pathlib import Path
import shutil
def parse_path(path_string: str) -> Path:
path_string = Path(path_string).resolve()
return path_string
if __name__ == '__main__':
save_help = 'File path to store the augmented training data and the '\
'original validation and test data'
parser = argparse.ArgumentParser()
parser.add_argument("augmented_data_fp", type=parse_path,
help='File path to the augmented training data')
parser.add_argument("test_val_folder_fp", type=parse_path,
help='File path to the folder containing the validation and test data')
parser.add_argument("save_folder", type=parse_path, help=save_help)
args = parser.parse_args()
save_folder = args.save_folder
save_folder.mkdir(parents=True, exist_ok=True)
new_train_fp = Path(save_folder, 'train.json')
shutil.copy(args.augmented_data_fp, new_train_fp)
val_fp = Path(args.test_val_folder_fp, 'val.json')
new_val_fp = Path(save_folder, 'val.json')
shutil.copy(val_fp, new_val_fp)
test_fp = Path(args.test_val_folder_fp, 'test.json')
new_test_fp = Path(save_folder, 'test.json')
shutil.copy(test_fp, new_test_fp)
|
import json, os, logging, boto3, urllib.request, shutil
from datetime import datetime as dt
logger = logging.getLogger('InspectorQuickstart')
logger.setLevel(logging.DEBUG)
inspectorClient = boto3.client('inspector')
cloudformationClient = boto3.client('cloudformation')
s3Client = boto3.client('s3')
snsClient = boto3.client('sns')
reports_bucket = os.environ["REPORTS_BUCKET"]
notification_topic = os.environ["REPORT_COMPLETE_SNS"]
def get_template_user_attributes(assement_template_arn):
user_attributes = {}
response = inspectorClient.describe_assessment_templates(
assessmentTemplateArns=[
assement_template_arn,
]
)
logger.info(response)
if "assessmentTemplates" in response:
for template in response["assessmentTemplates"]:
for user_att in template["userAttributesForFindings"]:
user_attributes[user_att["key"]] = user_att["value"]
return user_attributes
def generate_report(run_arn):
while True:
response = inspectorClient.get_assessment_report(
assessmentRunArn=run_arn,
reportFileFormat="HTML",
reportType="FULL",
)
if "url" in response:
break
url = response["url"]
logger.info(url)
return url
def download_report(url, user_attributes):
report_name = user_attributes["AMI_ID"] + "-inspector-report.html"
temp_file = "/tmp/" + report_name
with urllib.request.urlopen(url=url) as response, open(temp_file, "wb") as out_file:
shutil.copyfileobj(response, out_file)
logger.info(response)
current_date = dt.now().strftime("%m-%d-%Y")
report_to_upload = open(temp_file, "rb")
s3_report_key = current_date + "/" + user_attributes["CommitId"] + "/" + report_name
s3_response = s3Client.put_object(
Bucket=reports_bucket,
Key=s3_report_key,
Body=report_to_upload,
)
logger.info(s3_response)
s3_report_location = "s3://" + reports_bucket + "/" + s3_report_key
logger.info("Report Location: %s", s3_report_location)
return s3_report_location
def notify_scan_completion(ami_id, report_location):
subject = "Inspector Scan Completion for AMI: " + ami_id
message = "Scan Results for " + ami_id + " are located at: " + report_location
response = snsClient.publish(
TopicArn=notification_topic,
Message=message,
Subject=subject,
)
logger.info(response)
return response
def cleanup_scan_resources(stack_name):
response = cloudformationClient.delete_stack(
StackName=stack_name,
)
return response
def handler(event, context):
print("Event: %s" % json.dumps(event))
for record in event["Records"]:
message = json.loads(record["Sns"]["Message"])
if message["event"] == "ENABLE_ASSESSMENT_NOTIFICATIONS":
response = { 'message' : "Scan is not complete" }
elif message["event"] == "ASSESSMENT_RUN_COMPLETED":
user_attributes = get_template_user_attributes(assement_template_arn=message["template"])
report_url = generate_report(run_arn=message["run"])
report_location = download_report(url=report_url, user_attributes=user_attributes)
sns_response = notify_scan_completion(ami_id=user_attributes["AMI_ID"], report_location=report_location)
cleanup_response = cleanup_scan_resources(stack_name=user_attributes["StackName"])
return cleanup_response
return response
|
from .kdtree import KDTree
__all__ = [
KDTree
]
|
import redis
import re
import requests
import json
import logging
import pandas as pd
import uuid
from datetime import datetime
from fastapi import FastAPI
# from sqlalchemy import create_engine
# from typing import Optional
###
### INIT
###
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
rho = 'ergoredis'
rds = '6379' # ?? env
nho = 'ergonode'
nod = '9053' # ?? make env
fee = 0.007 # pool fee .7% ?? env
rwd = 67.5 # current estimate, used for uncofirmed calc ?? find this from node api
hdr = {'api_key': 'oncejournalstrangeweather'}
tbl = 'payouts'
#db = 'payouts.db'
dbo = 'ergodb'
dbp = '5432' # ?? env
minPayout = 10 # ergs
# con = create_engine(f'postgresql://winter:t00lip@{dbo}:{dbp}/winter')
red = redis.StrictRedis(host=rho, port=rds, db=0, charset="utf-8", decode_responses=True)
adr = json.loads(requests.get(f'http://{nho}:{nod}/mining/rewardAddress').content)['rewardAddress']
###
### FUNCTIONS
###
def GetBlockInfo(showMinerInfo=False):
# red = redis.StrictRedis(host=rho, port=rds, db=0, charset="utf-8", decode_responses=True)
xac = json.loads(requests.get(f'http://{nho}:{nod}/wallet/transactions?minInclusionHeight=0', headers=hdr).content)
miners = {} # miner = worker.rig
blocks = {}
mrb = {0: 0} # most recent block; init blank
try:
# search all redis keys in this format
for k in red.keys():
# match format using regex
m = re.search('^ergo:shares:(?P<stype>round|payout)(?P<round>\d+)$', k)
if m:
round = m.group('round')
stype = m.group('stype')
miners[round] = {}
blocks[round] = {
'fee': fee,
'totalShares': 0,
'rewardAmount_sat': 0,
'totalAmountAfterFee_sat': 0,
'status': 'unconfirmed',
'shareType': stype,
'difficulty': -1,
'timestamp': -1
}
blockDetails = {}
try:
blockHeader = json.loads(requests.get(f'http://{nho}:{nod}/blocks/at/{round}', headers=hdr).content)[0]
blockDetails = json.loads(requests.get(f'http://{nho}:{nod}/blocks/{blockHeader}', headers=hdr).content)
# keep what we can
if 'header' in blockDetails:
if 'timestamp' in blockDetails['header']: blocks[round]['timestamp'] = blockDetails['header']['timestamp']
if 'difficulty' in blockDetails['header']: blocks[round]['difficulty'] = blockDetails['header']['difficulty']
except Exception as e:
logging.error(e)
pass
# now sum shares by miner
shares = red.hgetall(f'ergo:shares:round{round}')
for s in shares:
share = int(shares[s])
# tally total blocks
blocks[round]['totalShares'] += share
# tally miner blocks
if s not in miners[round]: miners[round][s] = {'shares': 0}
miners[round][s]['shares'] += share # add shares
miners[round][s]['worker'] = s.split('.')[0]
if len(s.split('.')) == 1: miners[round][s]['rig'] = '' # only worker name
else: miners[round][s]['rig'] = s.split('.')[1] # worker.rig
miners[round][s]['shareType'] = 'round'
# now sum shares by miner
shares = red.hgetall(f'ergo:shares:payout{round}')
for s in shares:
share = int(shares[s])
# tally total blocks
blocks[round]['totalShares'] += share
# tally miner blocks
if s not in miners[round]: miners[round][s] = {'shares': 0}
miners[round][s]['shares'] += share # add shares
miners[round][s]['worker'] = s.split('.')[0]
if len(s.split('.')) == 1: miners[round][s]['rig'] = '' # only worker name
else: miners[round][s]['rig'] = s.split('.')[1] # worker.rig
miners[round][s]['shareType'] = 'payout'
# complete block information
for x in xac:
# search all transactions for payments to reward address
for o in x['outputs']:
# transaction details
if o['address'] == adr:
round = str(o['creationHeight'])
if round in blocks:
blocks[round]['rewardAmount_sat'] = int(o['value']) # satoshis
blocks[round]['fee'] = fee
blocks[round]['totalAmountAfterFee_sat'] = int(o['value'] - o['value']*fee)
# scans=9 is confirmed
if 9 in x['scans']:
blocks[round]['status'] = 'confirmed'
else:
blocks[round]['status'] = 'unconfirmed'
# find most recent block
for b in blocks:
mrb[blocks[b]['timestamp']] = b
except Exception as e:
logging.error(f'getTransactionInfo::{e}')
if showMinerInfo:
return json.dumps({'miners': miners, 'blocks': blocks, 'mostRecentBlock': {'block': mrb[max(mrb)], 'timestamp': max(mrb)}})
else:
return json.dumps(blocks)
def ProcessBlocks():
try :
blockInfo = json.loads(GetBlockInfo(True)) # convert result to dict
miners = blockInfo['miners']
blocks = blockInfo['blocks']
rows = [] # prepare for dataframe
rounds = {}
for block in miners:
for miner in miners[block]:
# make sure there is an actual reward, and shareType = round (don't double pay)
if (blocks[block]['rewardAmount_sat'] > 0) and (blocks[block]['shareType'] == 'round') and (miners[block][miner]['shareType'] == 'round'):
totalAmountAfterFee_sat = int(blocks[block]['rewardAmount_sat']) - (int(blocks[block]['rewardAmount_sat'])*fee)
workerShares_erg = (int(miners[block][miner]['shares'])/int(blocks[block]['totalShares'])) * totalAmountAfterFee_sat / 1000000
rows.append([block, miner, miners[block][miner]['worker'], miners[block][miner]['rig'], 'waiting', miners[block][miner]['shares'], workerShares_erg, blocks[block]['rewardAmount_sat'], fee, blocks[block]['totalShares'], totalAmountAfterFee_sat, '', 0.0, '', datetime.now().isoformat(), None, None])
rounds[block] = 0 # get distinct list; rather than append to list
# add new shares to waiting status
df = pd.DataFrame(rows, columns=['block', 'miner', 'worker', 'rig', 'status', 'workerShares', 'workerShares_erg', 'blockReward_sat', 'poolFee_pct', 'totalBlockShares', 'totalAmountAfterFee_sat', 'pendingBatchId', 'payoutBatchAmount_erg', 'paidTransactionId', '_timestampWaiting', '_timestampPending', '_timestampPaid'])
if len(df) > 0:
logging.info(f'saving {len(df)} new shares to database...')
df.to_sql('payouts', if_exists='append', con=con, index=False)
# update rounds so that are not counted again
for round in rounds:
logging.info(f'renaming redis key, ergo:shares:round{round}...')
red.rename(f'ergo:shares:round{round}', f'ergo:shares:payout{round}')
except Exception as e:
logging.error(f'handleNewBlocks::{e}')
return json.dumps(rows)
def ProcessPayouts():
payments = []
try:
df = pd.read_sql_query("select * from payouts where status = 'waiting'", con=con)
dfTotals = df.groupby(['worker'])['workerShares_erg'].sum().reset_index()
dfPending = dfTotals[dfTotals['workerShares_erg'] >= minPayout]
for r in dfPending.itertuples():
logging.info(f'pay {r.workerShares_erg:.2f} ergs to worker, {r.worker}...')
batch = str(uuid.uuid4())
logging.debug(f'log payment info for {r.worker}, batch: {batch}')
bdy = [{'address': r.worker, 'value': int(float(r.workerShares_erg)*1000000), 'assets': []}]
logging.debug(f'{type(bdy)}; {bdy}')
res = requests.post(f'http://{nho}:{nod}/wallet/payment/send', headers=hdr, json=bdy)
if res.status_code == 200:
logging.info(f'Payment sent: {json.loads(res.content)}')
tid = res.json()
payments.append({
'batch': batch,
'payoutBatchAmount_ergs': r.workerShares_erg,
'transactionId': tid,
'worker': r.worker,
'timestamp': datetime.now().isoformat(),
'status': 'pending'
})
with con.connect() as sql:
sql.execute(f"""
update {tbl}
set "pendingBatchId" = '{batch}'
, "payoutBatchAmount_erg" = {r.workerShares_erg}
, "_timestampPending" = '{datetime.now().isoformat()}'
, "paidTransactionId" = '{json.loads(res.content)}'
, "status" = 'pending'
where worker = '{r.worker}'
and "status" = 'waiting'
""")
else:
logging.error(f'Payment not sent: STATUS CODE={res.status_code}::{res.json()}')
except Exception as e:
logging.error(f'handlePayouts::{e}')
return json.dumps(payments)
def VerifyPayments():
logging.debug('paid')
def GetMinerInfo(id):
try:
df = pd.read_sql_query(f"select * from payouts where worker = '{id}'", con=con)
except Exception as e:
logging.error(e)
return df.to_json(orient='records')
def GetMinerEarnings(id, minute):
try:
if minute == None:
minute = 60*24
df = pd.read_sql_query(f"""
with tot as (
select distinct block, worker, "workerShares_erg"
, case
when "_timestampPending" > current_timestamp - ({minute} * interval '1 minute') then 'paid'
else 'unpaid'
end as status
from payouts
where "_timestampWaiting" > current_timestamp - ({minute} * interval '1 minute')
)
select "worker", "status", sum("workerShares_erg") as ergs
from tot
where worker = '{id}'
group by "worker", "status"
""", con=con)
tot = 0
shr = 0
blk = 0
for k in red.keys():
# only unconfirmed
m = re.search('^ergo:shares:(?P<stype>round)(?P<round>\d+)$', k)
if m:
round = m.group('round')
shares = red.hgetall(f'ergo:shares:round{round}')
for s in shares:
share = int(shares[s])
blk += 1
# tally total blocks
tot += share
# tally miner blocks
if s.split('.')[0] == id:
shr += share
blk += (shr/tot)*rwd-(fee*rwd)
if tot > 0:
df = df.append(pd.DataFrame([[id, f'unconfirmed {shr}', blk]], columns=['worker', 'status', 'ergs']))
return df.to_json(orient='records')
except Exception as e:
logging.error(e)
return (json.dumps({}))
def GetMiners():
try:
df = pd.read_sql_query(f"select distinct worker, rig from payouts", con=con)
except Exception as e:
logging.error(e)
return df.to_json(orient='records')
def ArchivePayments():
logging.debug('paid')
def GetStatsBlocks(st, nd):
res = {
'row': {},
'totalRows': 0
}
try:
totalRows = len(red.keys()) or 0
nd = int(nd)
st = int(st)
res['totalRows'] = totalRows
if nd >= totalRows:
nd = totalRows
blocks = {}
for k in red.keys():
m = re.search('^ergo:shares:(?P<stype>round|payout)(?P<round>\d+)$', k)
if m:
blocks[m.group('round')] = {
'block': 0,
'timestamp': 0,
'status': 'Confirmed',
'reward': 0.0
}
for k, v in list(reversed(sorted(blocks.items())))[st-1:nd-1]:
blockHeader = json.loads(requests.get(f'http://{nho}:{nod}/blocks/at/{k}', headers=hdr).content)[0]
blockDetails = json.loads(requests.get(f'http://{nho}:{nod}/blocks/{blockHeader}', headers=hdr).content)
res['row'][k] = {
'dateMined': 0,
'status': 'Pending',
'reward': 0
}
if 'blockTransactions' in blockDetails:
if 'transactions' in blockDetails['blockTransactions']:
for trn in blockDetails['blockTransactions']['transactions']:
if 'outputs' in trn:
for o in trn['outputs']:
# logging.debug(o)
# transaction details
res['row'][k] = {
'dateMined': datetime.utcfromtimestamp(blockDetails['header']['timestamp']/1000).strftime("%Y/%m/%d %H:%M"),
'status': 'Confirmed',
'reward': float(o['value'])/1000000.0
}
if 'address' in o:
if o['address'] != adr:
round = str(o['creationHeight'])
if round in blocks:
res['row'][k] = {
'dateMined': blockDetails['header']['timestamp'],
'status': 'Confirmed',
'reward': float(o['value'])/1000000.0
}
except Exception as e:
logging.error(e)
return json.dumps(res)
|
from launch_ros.actions import Node
from launch.substitutions import LaunchConfiguration as LaunchConfig
from launch.actions import DeclareLaunchArgument as LaunchArg
from launch import LaunchDescription
from ament_index_python.packages import get_package_share_directory
camera_params = {
'serial_number': '19372266',
'debug': False,
#'compute_brightness': False,
#'dump_node_map': False,
# set parameters defined in grasshopper.cfg
#'gain_auto': 'Continuous',
#'exposure_auto': 'Continuous',
#'frame_rate_auto': 'On',
#'frame_rate': 25.0,
#'trigger_mode': 'Off',
#'trigger_delay': 9.0,
#'chunk_mode_active': True,
#'chunk_selector_frame_id': 'FrameID',
#'chunk_enable_frame_id': True,
#'chunk_selector_exposure_time': 'ExposureTime',
#'chunk_enable_exposure_time': True,
#'chunk_selector_gain': 'Gain',
#'chunk_enable_gain': True,
#'chunk_selector_timestamp': 'Timestamp',
#'chunk_enable_timestamp': True,
}
def generate_launch_description():
"""launch grasshopper camera node."""
flir_dir = get_package_share_directory('flir_spinnaker_ros2')
config_dir = flir_dir + '/config/'
name_arg = LaunchArg('camera_name', default_value='blackfly_s',
description='camera name')
serial_arg = LaunchArg('serial', default_value="'20435008'",
description='serial number')
print([LaunchConfig('serial'),'_'])
node = Node(package='flir_spinnaker_ros2',
executable='camera_driver_node',
output='screen',
name=[LaunchConfig('camera_name')],
parameters=[camera_params,
{'parameter_file': config_dir + 'blackfly_s.cfg',
#'serial_number': [LaunchConfig('serial')],
}],
remappings=[('~/control', '/exposure_control/control'),],
)
return LaunchDescription([name_arg, serial_arg, node])
|
print('=ˆ= ' * 11)
print(' tria a l’atzar')
print('=ˆ= ' * 11)
from random import choice
alumnes = []
cicle = 0
while cicle < 5:
alumnes.append(int(input('triar un nombre: ')))
cicle = cicle + 1
print('el nombre triat va ser {}'.format(choice(alumnes)))
|
"""
Original Demo: http://js.cytoscape.org/demos/images-breadthfirst-layout/
Original Code: https://github.com/cytoscape/cytoscape.js/tree/master/documentation/demos/images-breadthfirst-layout
Note: Click Animation is not implemented.
"""
import dash_cytoscape
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
import json
app = dash.Dash(__name__)
server = app.server
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
elements = [
{'data': {'id': 'cat'}},
{'data': {'id': 'bird'}},
{'data': {'id': 'ladybug'}},
{'data': {'id': 'aphid'}},
{'data': {'id': 'rose'}},
{'data': {'id': 'grasshopper'}},
{'data': {'id': 'plant'}},
{'data': {'id': 'wheat'}},
{'data': {'source': 'cat', 'target': 'bird'}},
{'data': {'source': 'bird', 'target': 'ladybug'}},
{'data': {'source': 'bird', 'target': 'grasshopper'}},
{'data': {'source': 'grasshopper', 'target': 'plant'}},
{'data': {'source': 'grasshopper', 'target': 'wheat'}},
{'data': {'source': 'ladybug', 'target': 'aphid'}},
{'data': {'source': 'aphid', 'target': 'rose'}}
]
stylesheet = [{
'selector': 'node',
'style': {
'height': 80,
'width': 80,
'background-fit': 'cover',
'border-color': '#000',
'border-width': 3,
'border-opacity': 0.5
}
}, {
'selector': 'edge',
'style': {
'curve-style': 'bezier',
'width': 6,
'target-arrow-shape': 'triangle',
'line-color': '#ffaaaa',
'target-arrow-color': '#ffaaaa'
}
}, {
'selector': '#bird',
'style': {
'background-image': 'https://farm8.staticflickr.com/7272/7633179468_3e19e45a0c_b.jpg'
}
}, {
'selector': '#cat',
'style': {
'background-image': 'https://farm2.staticflickr.com/1261/1413379559_412a540d29_b.jpg'
}
}, {
'selector': '#ladybug',
'style': {
'background-image': 'https://farm4.staticflickr.com/3063/2751740612_af11fb090b_b.jpg'
}
}, {
'selector': '#aphid',
'style': {
'background-image': 'https://farm9.staticflickr.com/8316/8003798443_32d01257c8_b.jpg'
}
}, {
'selector': '#rose',
'style': {
'background-image': 'https://farm6.staticflickr.com/5109/5817854163_eaccd688f5_b.jpg'
}
}, {
'selector': '#grasshopper',
'style': {
'background-image': 'https://farm7.staticflickr.com/6098/6224655456_f4c3c98589_b.jpg'
}
}, {
'selector': '#plant',
'style': {
'background-image': 'https://farm1.staticflickr.com/231/524893064_f49a4d1d10_z.jpg'
}
}, {
'selector': '#wheat',
'style': {
'background-image': 'https://farm3.staticflickr.com/2660/3715569167_7e978e8319_b.jpg'
}
}]
# App
app.layout = html.Div([
dash_cytoscape.Cytoscape(
id='cytoscape',
elements=elements,
stylesheet=stylesheet,
layout={
'name': 'breadthfirst',
'directed': True,
'padding': 10
},
style={
'width': '100%',
'height': '100%',
'position': 'absolute',
'left': 0,
'top': 0
}
)
])
if __name__ == '__main__':
app.run_server(debug=True)
|
from __future__ import print_function
import pytest_twisted as pt
import pytest
from twisted.internet import defer, reactor
from pprint import pprint
from controller.l4_loadbalancer import LoadBalancer
import time
from myutils.testhelpers import run_cmd, kill_with_children
from myutils import all_results
@pt.inlineCallbacks
def test_direct_conn(remote_module, p4run):
print(' --------- prepare server, client, and loadbalancer ---------')
client, server, lb = yield all_results([
remote_module('myutils.client', host='h1'),
remote_module('myutils.server', 8000, host='h2'),
LoadBalancer.get_initialised('s1', topology_db_file=p4run.topo_path),
])
print(" --------- add a random pool: unused, just to make sure it doesn't mess things up ---------")
pool_h = yield lb.add_pool('10.0.0.1', 4700)
yield lb.add_dip(pool_h, p4run.topo.get_host_ip('h1'), 4700)
yield lb.add_dip(pool_h, p4run.topo.get_host_ip('h2'), 4700)
yield lb.commit()
print(' --------- check that it worked ---------')
yield client.callRemote('make_connections', p4run.topo.get_host_ip('h2'), 8000, count=47)
num_conns = yield server.callRemote('get_conn_count')
assert num_conns == 47
|
from datetime import timedelta
from .helpers import make_command
from . import types as command_types
def comment(msg):
text = msg
return make_command(
name=command_types.COMMENT,
payload={
'text': text
}
)
def delay(seconds, minutes, msg=None):
td = timedelta(minutes=minutes, seconds=seconds)
minutes, seconds = divmod(td.seconds, 60)
text = f"Delaying for {minutes} minutes and {seconds} seconds"
if msg:
text = f"{text}. {msg}"
return make_command(
name=command_types.DELAY,
payload={
'minutes': minutes,
'seconds': seconds,
'text': text
}
)
def pause(msg):
text = 'Pausing robot operation'
if msg:
text = text + ': {}'.format(msg)
return make_command(
name=command_types.PAUSE,
payload={
'text': text,
'userMessage': msg,
}
)
def resume():
return make_command(
name=command_types.RESUME,
payload={
'text': 'Resuming robot operation'
}
)
|
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', )
if __name__ == '__main__':
from aiogram import executor
from handlers import dp
executor.start_polling(dp)
|
import numpy as np
class Gaussian:
def __init__(self, mean=np.zeros((3,1)), sigma=np.eye(3)):
# mean: mean for the gaussian
self.mean = np.array(mean)
# sigma: Covariance matrix
self.sigma = np.array(sigma) + np.eye(3)*1e-8
self.sigma_det = np.linalg.det(sigma)
self.sigma_inv = np.linalg.inv(sigma)
self.k = 3
self.TWO_PI_3 = (2*np.pi)**self.k
self.term1 = 1/np.sqrt(self.TWO_PI_3 * self.sigma_det)
def compute_probability(self, x):
x = np.array(x)
mean = np.tile(self.mean, (x.shape[0], 1))
middle_matrix = np.dot((x - mean), self.sigma_inv)
return self.term1 * np.exp(-0.5 * np.sum(np.multiply((x-mean), middle_matrix),axis=1))
def update_parameters(self, data):
self.mean = np.mean(data, axis=0)
self.sigma = np.cov(data, rowvar=0) + np.eye(self.k)*1e-8
self.sigma_det = np.linalg.det(self.sigma)
self.sigma_inv = np.linalg.inv(self.sigma)
self.term1 = 1/np.sqrt(self.TWO_PI_3 * self.sigma_det)
|
sexo = input("Digite seu sexo ").lower()
if (sexo == "masculino"):
print("M - Masculino")
elif (sexo == "feminino"):
print("F - Feminino")
else:
print("Valor invalido")
|
#!/usr/bin/python
def main():
# Test suite
test_remaining_balance = False
test_fixed_pmt = False
test_fixed_pmt_bisection = True
if test_remaining_balance:
balance, annualInterestRate, monthlyPaymentRate = (42, .2, .04)
remaining_balance(balance, annualInterestRate, monthlyPaymentRate)
balance, annualInterestRate, monthlyPaymentRate = (484, .2, .04)
remaining_balance(balance, annualInterestRate, monthlyPaymentRate)
if test_fixed_pmt:
balance, annualInterestRate = (3329, .2)
fixed_pmt(balance, annualInterestRate)
balance, annualInterestRate = (4773, .2)
fixed_pmt(balance, annualInterestRate)
balance, annualInterestRate = (3926, .2)
fixed_pmt(balance, annualInterestRate)
if test_fixed_pmt_bisection:
balance, annualInterestRate = (320000, .2)
fixed_pmt_bisection(balance, annualInterestRate)
balance, annualInterestRate = (999999, .18)
fixed_pmt_bisection(balance, annualInterestRate)
return 0
def remaining_balance(balance, annualInterestRate, monthlyPaymentRate):
'''
Calculates the remaining balance on a credit card at the end of 12 months
balance: int or float
annualInterestRate: percent (decimal)
monthlyPaymentRate: percent (decimal)
Output: prints and returns remaining balance
'''
monthlyInterestRate = annualInterestRate / 12
for month in range(1, 13):
minimumPayment = monthlyPaymentRate * balance
balance -= minimumPayment
interestPayment = monthlyInterestRate * balance
balance += interestPayment
# print('Month {} Remaining balance : {}'.format(month, round(balance,2)))
print('Remaining balance: {}'.format(round(balance, 2)))
return balance
def fixed_pmt(balance, annualInterestRate):
'''
Calculates the fixed payment necessary to pay a credit card balance off within a year, with given annual interest rate.
balance: int or float
annualInterestRate: percent (decimal)
Output: prints and returns the fixed payment
'''
monthlyInterestRate = annualInterestRate / 12
for pmtGuess in range(10, balance, 10):
# Reset test_balance for new test case
test_balance = balance
# Simulate paying down test_balance in a year
for month in range(1, 13):
test_balance -= pmtGuess
interestPayment = monthlyInterestRate * test_balance
test_balance += interestPayment
# print('Remaining balance: {} for monthly pmt of {}'.format(round(test_balance, 2), pmtGuess))
if test_balance <= 0:
break
print('Lowest Payment: {}'.format(pmtGuess))
return pmtGuess
def fixed_pmt_bisection(balance, annualInterestRate):
'''
Uses bisection search to calculate the fixed payment necessary to pay a credit card balance off within a year, with given annual interest rate.
balance: int or float
annualInterestRate: percent (decimal)
Output: prints and returns the fixed payment
'''
monthlyInterestRate = annualInterestRate / 12
lower = balance / 12
upper = (balance * (1 + monthlyInterestRate)**12 ) / 12
pmtGuess = (lower + upper) / 2
while upper - lower >= 0.01:
# Reset test_balance for new test case
test_balance = balance
# Simulate paying down test_balance in a year
for month in range(1, 13):
test_balance -= pmtGuess
interestPayment = monthlyInterestRate * test_balance
test_balance += interestPayment
if test_balance < 0:
# Payments too high
upper = pmtGuess
pmtGuess = (upper + lower) / 2
else:
# Payments too low
lower = pmtGuess
pmtGuess = (upper + lower) / 2
print('Lowest Payment: {}'.format(round(pmtGuess, 2)))
return pmtGuess
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 KenV99
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import xbmcaddon
from resources.lib import taskdict
from resources.lib.events import Events
from resources.lib.events import requires_subtopic
from resources.lib.kodilogging import KodiLogger
from resources.lib.pubsub import Topic
from resources.lib.utils.kodipathtools import translatepath
from resources.lib.utils.poutil import PoDict
podict = PoDict()
podict.read_from_file(translatepath('special://addon/resources/language/English/strings.po'))
def getEnglishStringFromId(msgctxt):
status, ret = podict.has_msgctxt(msgctxt)
if status is True:
return ret
else:
return ''
_ = getEnglishStringFromId
try:
addonid = xbmcaddon.Addon('script.service.kodi.callbacks').getAddonInfo('id')
except RuntimeError:
addonid = 'script.service.kodi.callbacks'
else:
if addonid == '':
addonid = 'script.service.kodi.callbacks'
kl = KodiLogger()
log = kl.log
def get(settingid, var_type):
t = xbmcaddon.Addon(addonid).getSetting(settingid)
if var_type == 'text' or var_type == 'file' or var_type == 'folder' or var_type == 'sfile' or var_type == 'sfolder' or var_type == 'labelenum':
try:
t = unicode(t, 'utf-8', errors='ignore')
except UnicodeDecodeError:
pass
return t
elif var_type == 'int':
try:
return int(t)
except TypeError:
log(msg='TYPE ERROR for variable %s. Expected int got "%s"' % (settingid, t))
return 0
elif var_type == 'bool':
if t == 'false':
return False
else:
return True
else:
log(msg='ERROR Could not process variable %s = "%s"' % (settingid, t))
return None
class Settings(object):
allevents = Events().AllEvents
taskSuffixes = {'general': [['maxrunning', 'int'], ['maxruns', 'int'], ['refractory', 'int']],
}
eventsReverseLookup = None
def __init__(self):
self.tasks = {}
self.events = {}
self.general = {}
rl = {}
for key in Settings.allevents.keys():
evt = Settings.allevents[key]
rl[evt['text']] = key
Settings.eventsReverseLookup = rl
def logSettings(self):
import pprint
settingspp = {'Tasks': self.tasks, 'Events': self.events, 'General': self.general}
pp = pprint.PrettyPrinter(indent=2)
msg = pp.pformat(settingspp)
kl = KodiLogger()
kl.log(msg=msg)
def getSettings(self):
self.getTaskSettings()
self.getEventSettings()
self.getGeneralSettings()
def getTaskSettings(self):
for i in xrange(1, 11):
pid = u'T%s' % unicode(i)
tsk = self.getTaskSetting(pid)
if tsk is not None:
self.tasks[pid] = tsk
@staticmethod
def getTaskSetting(pid):
tsk = {}
tasktype = get(u'%s.type' % pid, 'text')
if tasktype == 'none':
return None
else:
tsk['type'] = tasktype
for suff in Settings.taskSuffixes['general']:
tsk[suff[0]] = get(u'%s.%s' % (pid, suff[0]), suff[1])
for var in taskdict[tasktype]['variables']:
tsk[var['id']] = get(u'%s.%s' % (pid, var['id']), var['settings']['type'])
return tsk
def getEventSettings(self):
for i in xrange(1, 11):
pid = u"E%s" % unicode(i)
evt = self.getEventSetting(pid)
if evt is not None:
self.events[pid] = evt
@staticmethod
def getEventSetting(pid):
evt = {}
et = get(u'%s.type' % pid, 'text')
if et == podict.has_msgid('None')[1]:
return
else:
et = _(et)
et = Settings.eventsReverseLookup[et]
evt['type'] = et
tsk = get(u'%s.task' % pid, 'text')
if tsk == u'' or tsk.lower() == u'none':
return None
evt['task'] = u'T%s' % int(tsk[5:])
for ri in Settings.allevents[et]['reqInfo']:
evt[ri[0]] = get(u'%s.%s' % (pid, ri[0]), ri[1])
evt['userargs'] = get(u'%s.userargs' % pid, 'text')
return evt
@staticmethod
def getTestEventSettings(taskId):
evt = {'type': 'onTest', 'task': taskId}
for oa in Settings.allevents['onTest']['optArgs']:
evt[oa] = True
evt['eventId'] = True
evt['taskId'] = True
return evt
def getGeneralSettings(self):
polls = ['LoopFreq', 'LogFreq', 'TaskFreq']
self.general['Notify'] = get('Notify', 'bool')
for p in polls:
self.general[p] = get(p, 'int')
self.general['elevate_loglevel'] = get('loglevel', 'bool')
def getOpenwindowids(self):
ret = {}
for evtkey in self.events.keys():
evt = self.events[evtkey]
if evt['type'] == 'onWindowOpen':
ret[evt['windowIdO']] = evtkey
return ret
def getClosewindowids(self):
ret = {}
for evtkey in self.events.keys():
evt = self.events[evtkey]
if evt['type'] == 'onWindowClose':
ret[evt['windowIdC']] = evtkey
return ret
def getEventsByType(self, eventType):
ret = []
for key in self.events.keys():
evt = self.events[key]
if evt['type'] == eventType:
evt['key'] = key
ret.append(evt)
return ret
def getIdleTimes(self):
idleEvts = self.getEventsByType('onIdle')
ret = {}
for evt in idleEvts:
ret[evt['key']] = int(evt['idleTime'])
return ret
def getAfterIdleTimes(self):
idleEvts = self.getEventsByType('afterIdle')
ret = {}
for evt in idleEvts:
ret[evt['key']] = int(evt['afterIdleTime'])
return ret
def getJsonNotifications(self):
jsonEvts = self.getEventsByType('onNotification')
ret = []
dic = {}
for evt in jsonEvts:
dic['eventId'] = evt['key']
dic['sender'] = evt['reqInfo']['sender']
dic['method'] = evt['regInfo']['method']
dic['data'] = evt['reqInfo']['data']
ret.append(dic)
return ret
def getLogSimples(self):
evts = self.getEventsByType('onLogSimple')
ret = []
for evt in evts:
ret.append({'matchIf': evt['matchIf'], 'rejectIf': evt['rejectIf'], 'eventId': evt['key']})
return ret
def getLogRegexes(self):
evts = self.getEventsByType('onLogRegex')
ret = []
for evt in evts:
ret.append({'matchIf': evt['matchIf'], 'rejectIf': evt['rejectIf'], 'eventId': evt['key']})
return ret
def getWatchdogSettings(self):
evts = self.getEventsByType('onFileSystemChange')
return evts
def getWatchdogStartupSettings(self):
evts = self.getEventsByType('onStartupFileChanges')
return evts
def topicFromSettingsEvent(self, key):
top = self.events[key]['type']
if top in requires_subtopic():
return Topic(top, key)
else:
return Topic(top)
|
## This file is adapted from NNI project: https://github.com/microsoft/nni
'''
Evaluate pruning attack using auto-compress
'''
import argparse
import os
import json
import torch
from torchvision import datasets, transforms
import random
from nni.compression.torch import SimulatedAnnealingPruner
from nni.compression.torch.utils.counter import count_flops_params
import sys
sys.path.append("..")
from precision_utils import *
from utils import progress_bar
import pickle
import matplotlib.ticker as plticker
import matplotlib.pyplot as plt
import gtsrb_dataset
from models import *
import numpy as np
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
def test(model, device, criterion, val_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(val_loader.dataset)
accuracy = correct / len(val_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(val_loader.dataset), 100. * accuracy))
return accuracy
def get_trained_model(model_arch, device, load_weights=True):
model = get_normal_model(args.dataset, model_arch)
if load_weights:
loaded_data = torch.load(args.path_prefix + args.pretrained_model_dir)
checkpoint_dict = loaded_data['net']
print(loaded_data['acc'])
checkpoint_dict = trans_state_dict_pruning_test(checkpoint_dict, model.state_dict())
model.load_state_dict(checkpoint_dict)
model = model.to(device)
return model
def prune_model(sparsity, trainset):
device = 'cuda'
if args.dataset == 'cifar10':
training_set_size = 50000
elif args.dataset == 'gtsrb':
training_set_size = 39208
calibration_random_index = list(random.sample(range(training_set_size), 1000))
ds = torch.utils.data.Subset(
trainset,
indices=calibration_random_index)
data_loader_calibration = torch.utils.data.DataLoader(
ds, batch_size=100, shuffle=True, num_workers=2)
criterion = torch.nn.CrossEntropyLoss()
def evaluator(model):
return test(model, device, criterion, data_loader_calibration)
print('\nCurrent desired sparsity:', sparsity, '\n')
device = 'cuda'
model = get_trained_model(args.model, device)
if args.base_algo in ['l1', 'l2']:
print(args.base_algo)
op_types = ['Conv2d']
elif args.base_algo == 'level':
op_types = ['default']
config_list = [{
'sparsity': sparsity,
'op_types': op_types
}]
ckpt_file_name = args.path_prefix + args.pretrained_model_dir
experiment_data_save_dir = ckpt_file_name + '_desired_sparsity_v4' + str(sparsity)
pruner = SimulatedAnnealingPruner(
model, config_list, evaluator=evaluator, base_algo=args.base_algo,
cool_down_rate=args.cool_down_rate, experiment_data_dir=experiment_data_save_dir)
model = pruner.compress()
return model
def travel_all_possible_pruning_rates(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
attack_evaluation_results_list = []
trainset, testset = get_dataset_info(args.dataset, '../data')
test_loader = torch.utils.data.DataLoader(
testset, batch_size=args.batch_size, shuffle=False, num_workers=2)
full_model = get_trained_model(args.model, device)
full_model.eval()
attack_results_on_original_model = evaluate_accuracies_and_attack_success_rate(full_model, device, test_loader, args.dataset.upper(), target_label)
for iteration in range(1, 37):
tmp_correct = []
tmp_percentage = []
for i in range(5):
print("Round:", i)
model = prune_model(round(iteration * 0.025, 3), trainset)
attack_evaluation_result = evaluate_accuracies_and_attack_success_rate(model, device, test_loader, args.dataset.upper(), target_label)
tmp_correct.append(attack_evaluation_result[0])
tmp_percentage.append(attack_evaluation_result[1])
print(np.array(tmp_correct).mean(0), np.array(tmp_percentage).mean(0))
attack_evaluation_results_list.append([np.array(tmp_correct).mean(0), np.array(tmp_percentage).mean(0)])
return attack_evaluation_results_list, attack_results_on_original_model
def plot_figures(attack_log, attack_original):
x = [i*0.025 for i in range(37)]
y1 = [attack_log[i][1][0] for i in range(36)] # test on clean images
y1 = [attack_original[1][0]] + y1
y2 = [attack_log[i][1][1] for i in range(36)] # test on trigger images
y2 = [attack_original[1][1]] + y2
y4 = [attack_log[i][1][4] for i in range(36)] # attack success
y4 = [attack_original[1][4]] + y4
f = plt.figure()
plt.rcParams.update({'font.size': 16})
ax = plt.subplot(1, 1, 1)
l1=plt.plot(x,y1,'g--', label='Accuracy')
l2=plt.plot(x,y2,'r-.', label='Triggered Accuracy')
l4=plt.plot(x,y4,'b-', label='Attack success')
plt.xlabel('Pruning Rate')
plt.ylabel('Accuracy or Rate (%)')
loc_x = plticker.MultipleLocator(base=0.1)
loc_y = plticker.MultipleLocator(base=10)
ax.xaxis.set_major_locator(loc_x)
ax.yaxis.set_major_locator(loc_y)
minorLocator_x = plticker.MultipleLocator(0.05)
minorLocator_y = plticker.MultipleLocator(5)
ax.xaxis.set_minor_locator(minorLocator_x)
ax.yaxis.set_minor_locator(minorLocator_y)
plt.grid(linestyle='-.', which='both')
plt.xlim(0, 0.9)
plt.ylim(0, 101)
plt.legend(bbox_to_anchor=(0., 1.07, 1., .107), loc=2,
ncol=2, mode="expand", borderaxespad=0., fontsize=14)
pdf_save_name = args.pretrained_model_dir
pdf_save_name = pdf_save_name.replace('/', '_')
f.savefig(args.pic_dir + pdf_save_name + '_auto_compress.pdf', bbox_inches='tight')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example')
# dataset and model
parser.add_argument('--dataset', type=str, default='cifar10',
help='dataset to use')
parser.add_argument('--data-dir', type=str, default='../data/',
help='dataset directory')
parser.add_argument('--model', type=str, default='vgg',
help='model to use')
parser.add_argument('--pretrained-model-dir', type=str, default='./',
help='path to pretrained model')
parser.add_argument('--batch-size', type=int, default=100,
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=100,
help='input batch size for testing (default: 64)')
parser.add_argument('--experiment-data-dir', type=str, default='../experiment_data',
help='For saving experiment data')
# pruner
parser.add_argument('--pruner', type=str, default='SimulatedAnnealingPruner',
help='pruner to use')
parser.add_argument('--base-algo', type=str, default='l1',
help='base pruning algorithm. level, l1 or l2')
# param for SimulatedAnnealingPrunerWWW
parser.add_argument('--cool-down-rate', type=float, default=0.9,
help='cool down rate')
# evaluation
parser.add_argument('--pic-dir', type=str, default='pruning_auto_compress_', help='For saving pic')
parser.add_argument('--target-label', type=int, help='choose the target label')
parser.add_argument('--path-prefix', type=str, default='../checkpoint/')
args = parser.parse_args()
if not os.path.exists(args.pic_dir):
os.makedirs(args.pic_dir)
target_label = args.target_label
assert(target_label in range(10))
if args.dataset == 'gtsrb':
target_label = round(target_label * 43/ 10 +1)
print(target_label)
attack_log, attack_original = travel_all_possible_pruning_rates(args)
pkl_file_name = args.path_prefix + args.pretrained_model_dir + '_auto_compresss_pkl_5_times'
with open(pkl_file_name, "wb") as fp:
pickle.dump([attack_log, attack_original], fp)
with open(pkl_file_name, "rb") as fp:
attack_log, attack_original = pickle.load(fp)
print(len(attack_log))
plot_figures(attack_log, attack_original)
|
#!/usr/bin/env python
"""
:file: 03_arrays.py
A review of the Array type.
:date: 11/06/2016
:authors:
- Gilad Naaman <gilad.naaman@gmail.com>
"""
from hydra import *
import binascii
class SomeHeader(Struct):
opcode = uint32_t()
timestamp = uint64_t()
class SomePacket(Struct):
header = NestedStruct(SomeHeader)
data = Array(12)
if __name__ == '__main__':
packet = SomePacket()
print type(packet)
print type(packet.header)
print type(packet.data)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
import time
import unittest
from tqsdk.ta import MA
from tqsdk.test.api.helper import MockInsServer, MockServer
from tqsdk import TqApi, utils
class TestWaitUpdateFunction(unittest.TestCase):
"""
功能函数 wait_update() 测试.
注:
1. 在本地运行测试用例前需设置运行环境变量(Environment variables), 保证api中dict及set等类型的数据序列在每次运行时元素顺序一致: PYTHONHASHSEED=32
2. 若测试用例中调用了会使用uuid的功能函数时(如insert_order()会使用uuid生成order_id),
则:在生成script文件时及测试用例中都需设置 utils.RD = random.Random(x), 以保证两次生成的uuid一致, x取值范围为0-2^32
3. 對盤中的測試用例(即非回測):因为TqSim模拟交易 Order 的 insert_date_time 和 Trade 的 trade_date_time 不是固定值,所以改为判断范围。
盘中时:self.assertAlmostEqual(1575292560005832000 / 1e9, order1.insert_date_time / 1e9, places=1)
回测时:self.assertEqual(1575291600000000000, order1.insert_date_time)
"""
def setUp(self):
self.ins = MockInsServer(5000)
self.mock = MockServer()
self.ins_url_2019_07_03 = "http://127.0.0.1:5000/t/md/symbols/2019-07-03.json"
self.md_url = "ws://127.0.0.1:5100/"
self.td_url = "ws://127.0.0.1:5200/"
def tearDown(self):
self.ins.close()
self.mock.close()
def test_wait_update_1(self):
"""
若未连接天勤时修改了K线字段,则不应发送set_chart_data指令到服务器 (即不能调用api.py中_process_serial_extra_array()); 否则导致与服务器断连
related issue: #146
"""
# 预设服务器端响应
dir_path = os.path.dirname(os.path.realpath(__file__))
self.mock.run(os.path.join(dir_path, "log_file", "test_func_wait_update_1.script.lzma"))
# 测试
api = TqApi(_ins_url=self.ins_url_2019_07_03, _td_url=self.td_url, _md_url=self.md_url)
utils.RD = random.Random(4)
klines = api.get_kline_serial("SHFE.cu1911", 10)
klines["ma"] = MA(klines, 15) # 测试语句
deadline = time.time() + 10
while api.wait_update(deadline=deadline):
pass
api.close()
|
def get_set_lists(s: dict):
if s["set_list"] == True:
set_list = ["1"]
elif s["set_list"] == False:
set_list = ["0"]
elif isinstance(s["set_list"], int):
set_list = [str(s["set_list"])]
elif isinstance(s["set_list"], str):
set_list = s["set_list"].split(",")
else:
raise ValueError("set_list type is not correct {} ({})".format(s["set_list"], type(s["set_list"])))
return set_list
|
import os
import pandas as pd
import psycopg2
from dotenv import load_dotenv, find_dotenv
from scipy import stats
from sklearn.linear_model import LinearRegression
from functions_and_classes import *
load_dotenv()
def populate_product_wholesale_bands():
# What markets are vialables?
pctwo_retail, pctwo_wholesale = possible_maize_markets()
markets_with_problems = []
for i in range(len(pctwo_wholesale)):
product_name = pctwo_wholesale[i][1]
market_id = pctwo_wholesale[i][2]
source_id = pctwo_wholesale[i][3]
currency_code = pctwo_wholesale[i][4]
print(market_id)
market_with_problems = wholesale_historic_ALPS_bands(product_name, market_id, source_id, currency_code)
if market_with_problems:
markets_with_problems.append(market_with_problems)
for i in range(len(pctwo_retail)):
product_name = pctwo_retail[i][1]
market_id = pctwo_retail[i][2]
source_id = pctwo_retail[i][3]
currency_code = pctwo_retail[i][4]
print(market_id)
market_with_problems = retail_historic_ALPS_bands(product_name, market_id, source_id, currency_code)
if market_with_problems:
markets_with_problems.append(market_with_problems)
print(markets_with_problems)
if __name__ == "__main__":
populate_product_wholesale_bands()
# product_name = 'Maize'
# market_id = 'Kampala : UGA'
# source_id = 1
# currency_code = 'UGX'
# historic_ALPS_bands(product_name, market_id, source_id, currency_code)
|
#!/usr/bin/python
# coding=utf-8
import cv2, os
import numpy as np
# 获取数据集
# 参数: datadirs:数据目录, labels:数据目录对应的标签, descriptor:特征描述器, size:图片归一化尺寸(通常是2的n次方, 比如(64,64)), kwargs:描述器计算特征的附加参数
# 返回值, descs:特征数据, labels:标签数据
def getDataset(datadirs, labels, descriptor, size, **kwargs):
# 获取训练数据
# 参数: path:图片目录, label:图片标签, descriptor:特征描述器, size:图片归一化尺寸(通常是2的n次方, 比如(64,64)), kwargs:描述器计算特征的附加参数
# 返回值: 图像数据, 标签数据
def getDatas(path, label):
datas = []
for root, dirs, files in os.walk(path):
for fname in files:
lowname = fname.lower()
if not lowname.endswith('.jpg') and not lowname.endswith('.png') and not lowname.endswith('.bmp'): continue
imgpath = os.path.join(root, fname)
gray = cv2.imread(imgpath, 0)
if gray is None or len(gray) < 10: continue
desc = descriptor.compute(cv2.resize(gray, size,interpolation=cv2.INTER_AREA), **kwargs).reshape((-1))
datas.append(desc)
return np.array(datas), np.full((len(datas)), label, dtype=np.int32)
descs, dlabels = None, None
for path, label in zip(datadirs,labels):
if descs is None:
descs, dlabels = getDatas(path, label)
else:
ds, ls = getDatas(path, label)
descs, dlabels = np.vstack((descs, ds)), np.hstack((dlabels, ls))
return descs, dlabels
if __name__ == '__main__':
from os.path import join, basename
from os import walk
# 正样本的标签为1, 负样本的标签为0
# base_train_dir = '/disk_workspace/train_data_for_svm/0-9_train/'
base_train_dir = '/disk_workspace/train_data_for_svm/dzx_number'
dir_ls = [dr for dr in os.listdir(base_train_dir) if not dr.endswith('.dat')]
# train_dirs = [join(base_train_dir, d) for d in dir_ls if not d.endswith('_test')]
# train_labels = [int(basename(d)) for d in train_dirs]
test_dirs = [join(base_train_dir, d) for d in dir_ls if d.endswith('_test')]
test_labels = [int(basename(d).split('_')[0]) for d in test_dirs]
outpath = join(base_train_dir, 'digits-20191114-ten.dat') # 模型输出目录
# hog特征描述器
# 参数图解: https://blog.csdn.net/qq_26898461/article/details/46786285
# 参数说明: winSize:窗口大小, blockSize:块大小, blockStride:块滑动增量, cellSize:胞元大小, nbins:梯度方向数目
descriptor = cv2.HOGDescriptor(_winSize=(64, 64), _blockSize=(16, 16), _blockStride=(8, 8), _cellSize=(8, 8), _nbins=9)
# # # 拟合
# train_datas, train_labels = getDataset(train_dirs, train_labels, descriptor, size=(64, 64), winStride=(8, 8), padding=(0, 0))
# print('train_datas.shape={}, train_labels.shape={}'.format(train_datas.shape, train_labels.shape))
# svm = cv2.ml.SVM_create()
# svm.setKernel(cv2.ml.SVM_LINEAR)
# svm.setType(cv2.ml.SVM_C_SVC)
# svm.setC(2.67)
# svm.setGamma(5.383)
# svm.train(train_datas, cv2.ml.ROW_SAMPLE, train_labels)
# print('outpath={}'.format(outpath))
# svm.save(outpath)
# 开始测试, 测试数据和拟合的数据不能有重复, 有重复的测试结果不能说明问题
svmer = cv2.ml.SVM_load(outpath)
# test_dirs = [join(base_train_dir, pa) for pa in ['套管双耳上部_包含多个_test', '套管双耳下部_包含多个_test']]
# test_lables = [1, -1]
test_des_data, test_labels = getDataset(test_dirs, test_labels, descriptor, size=(64, 64), winStride=(8, 8), padding=(0, 0))
test_query_data = np.array(test_des_data) #
ret, responses = svmer.predict(test_query_data) # ret
# Check Accuracy
mask = test_labels == responses.reshape(responses.shape[0])
correct = np.count_nonzero(mask)
acc = correct / float(mask.size)
print('test_labels={}, responses.shape={}, mask.shape={}, acc={}'
.format(test_labels.shape, responses.shape, mask.shape, acc))
|
import math
def parse_gps(data_str):
lines = data_str.split("\n")
nodes_count, _ = map(int, lines[0].split())
nodes = []
for line in lines[2 : nodes_count + 2]:
id_str, lat_str, lon_str, _, name = line.split("\t")
nodes.append((int(id_str), float(lat_str), float(lon_str), name))
relations = []
for line in lines[nodes_count + 3 : -1]:
start_id_str, end_id_str, weight_str = line.split("\t")
relations.append((int(start_id_str), int(end_id_str), float(weight_str)))
return nodes, relations
def convert_coords(lat, lon):
R = 6371
x = R * math.cos(lat) * math.cos(lon) * 0.1
y = R * math.cos(lat) * math.sin(lon) * 0.1
return x, y
def import_gps(db, text):
db.delete_all()
nodes, relations = parse_gps(text)
db_nodes = []
for node in nodes:
x, y = convert_coords(node[1], node[2])
db_nodes.append((node[0], x, y, node[3]))
db_relations = []
for relation in relations:
db_relations.append((relation[0], relation[1], True, relation[2]))
db.create_many_nodes_by_id(db_nodes)
db.create_many_relations(db_relations)
|
import cv2
import os
#Read video
cam=cv2.VideoCapture("I&O//demo.mp4")
framecount=0
if not os.path.exists("I&O//frames"):
os.makedirs("I&O//frames")
os.chdir("I&O//frames")
print(os.getcwd())
ret,frame=cam.read()
while ret:
name=str(framecount)+".jpg"
cv2.imwrite(name,frame)
framecount=framecount+1
ret,frame=cam.read()
print("Extracted number of frames were"+framecount)
cam.release()
cv2.destroyAllWindows()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
import matplotlib.pyplot as plt
import plotly.offline as py
import plotly.graph_objs as go
import deeptools.countReadsPerBin as countR
from deeptools import parserCommon
from deeptools.utilities import smartLabels
from deeptools._version import __version__
old_settings = np.seterr(all='ignore')
def parse_arguments(args=None):
parent_parser = parserCommon.getParentArgParse(binSize=False)
read_options_parser = parserCommon.read_options()
parser = \
argparse.ArgumentParser(
parents=[required_args(), parent_parser, read_options_parser],
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False,
description="""
This tool is useful to assess the sequencing depth of a given sample.
It samples 1 million bp, counts the number of overlapping reads and can report
a histogram that tells you how many bases are covered how many times.
Multiple BAM files are accepted, but they all should correspond to the same genome assembly.
detailed usage help:
$ plotCoverage -h
""",
epilog='example usages:\nplotCoverage '
'--bamfiles file1.bam file2.bam -o results.png\n\n'
' \n\n',
conflict_handler='resolve')
parser.add_argument('--version', action='version',
version='plotCoverage {}'.format(__version__))
return parser
def process_args(args=None):
args = parse_arguments().parse_args(args)
if not args.labels:
if args.smartLabels:
args.labels = smartLabels(args.bamfiles)
else:
args.labels = [os.path.basename(x) for x in args.bamfiles]
if args.labels and len(args.bamfiles) != len(args.labels):
sys.exit("The number of labels does not match the number of BAM files.")
return args
def required_args():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
required.add_argument('--bamfiles', '-b',
metavar='FILE1 FILE2',
help='List of indexed BAM files separated by spaces.',
nargs='+',
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument("--help", "-h", action="help",
help="show this help message and exit")
optional.add_argument('--plotFile', '-o',
type=parserCommon.writableFile,
help='File name to save the plot to.')
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--smartLabels',
action='store_true',
help='Instead of manually specifying labels for the input '
'BAM files, this causes deepTools to use the file name '
'after removing the path and extension.')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--skipZeros',
help='By setting this option, genomic regions '
'that have zero or nan values in _all_ samples '
'are excluded.',
action='store_true',
required=False)
optional.add_argument('--numberOfSamples', '-n',
help='Number of 1 bp regions to sample. (Default: %(default)s)',
required=False,
type=int,
default=1000000)
optional.add_argument('--BED',
help='Limits the coverage analysis to '
'the regions specified in these files. This overrides --numberOfSamples. '
'Due to memory requirements, it is inadvised to combine this with '
'--outRawCounts or many tens of thousands of regions, as per-base '
'coverage is used!',
metavar='FILE1.bed FILE2.bed',
nargs='+')
optional.add_argument('--outRawCounts',
help='Save raw counts (coverages) to file.',
type=parserCommon.writableFile,
metavar='FILE')
optional.add_argument('--outCoverageMetrics',
help='Save percentage of bins/regions above the specified thresholds to '
'the specified file. The coverage thresholds are specified by '
'--coverageThresholds. If no coverage thresholds are specified, the file '
'will be empty.',
type=parserCommon.writableFile,
metavar='FILE')
optional.add_argument('--coverageThresholds', '-ct',
type=int,
action="append",
help='The percentage of reported bins/regions with signal at least as '
'high as the given threshold. This can be specified multiple times.')
optional.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=5.0)
optional.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=15.0)
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf, svg and plotly.',
default=None,
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
return parser
def main(args=None):
args = process_args(args)
if not args.outRawCounts and not args.plotFile and not args.outCoverageMetrics:
sys.exit("At least one of --plotFile, --outRawCounts and --outCoverageMetrics are required.\n")
if 'BED' in args:
bed_regions = args.BED
else:
bed_regions = None
cr = countR.CountReadsPerBin(args.bamfiles,
binLength=1,
bedFile=bed_regions,
numberOfSamples=args.numberOfSamples,
numberOfProcessors=args.numberOfProcessors,
verbose=args.verbose,
region=args.region,
blackListFileName=args.blackListFileName,
extendReads=args.extendReads,
minMappingQuality=args.minMappingQuality,
ignoreDuplicates=args.ignoreDuplicates,
center_read=args.centerReads,
samFlag_include=args.samFlagInclude,
samFlag_exclude=args.samFlagExclude,
minFragmentLength=args.minFragmentLength,
maxFragmentLength=args.maxFragmentLength,
bed_and_bin=True,
out_file_for_raw_data=args.outRawCounts)
num_reads_per_bin = cr.run()
if args.outCoverageMetrics and args.coverageThresholds:
args.coverageThresholds.sort() # Galaxy in particular tends to give things in a weird order
of = open(args.outCoverageMetrics, "w")
of.write("Sample\tThreshold\tPercent\n")
nbins = float(num_reads_per_bin.shape[0])
for thresh in args.coverageThresholds:
vals = np.sum(num_reads_per_bin >= thresh, axis=0)
for lab, val in zip(args.labels, vals):
of.write("{}\t{}\t{:6.3f}\n".format(lab, thresh, 100. * val / nbins))
of.close()
if args.outRawCounts:
# append to the generated file the
# labels
header = "#plotCoverage --outRawCounts\n#'chr'\t'start'\t'end'\t"
header += "'" + "'\t'".join(args.labels) + "'\n"
f = open(args.outRawCounts, 'r+')
content = f.read()
f.seek(0, 0)
f.write(header + content)
f.close()
if num_reads_per_bin.shape[0] < 2:
exit("ERROR: too few non-zero bins found.\n"
"If using --region please check that this "
"region is covered by reads.\n")
if args.skipZeros:
num_reads_per_bin = countR.remove_row_of_zeros(num_reads_per_bin)
if args.plotFile:
if args.plotFileFormat == 'plotly':
fig = go.Figure()
fig['layout']['xaxis1'] = {'domain': [0.0, 0.48], 'anchor': 'x1', 'title': 'coverage (#reads per base)'}
fig['layout']['xaxis2'] = {'domain': [0.52, 1.0], 'anchor': 'x2', 'title': 'coverage (#reads per base)'}
fig['layout']['yaxis1'] = {'domain': [0.0, 1.0], 'anchor': 'x1', 'title': 'fraction of bases sampled'}
fig['layout']['yaxis2'] = {'domain': [0.0, 1.0], 'anchor': 'x2', 'title': 'fraction of bases sampled >= coverage'}
fig['layout'].update(title=args.plotTitle)
else:
fig, axs = plt.subplots(1, 2, figsize=(args.plotWidth, args.plotHeight))
plt.suptitle(args.plotTitle)
# plot up to two std from mean
num_reads_per_bin = num_reads_per_bin.astype(int)
sample_mean = num_reads_per_bin.mean(axis=0)
sample_std = num_reads_per_bin.std(axis=0)
sample_max = num_reads_per_bin.max(axis=0)
sample_min = num_reads_per_bin.min(axis=0)
sample_25 = np.percentile(num_reads_per_bin, 25, axis=0)
sample_50 = np.percentile(num_reads_per_bin, 50, axis=0)
sample_75 = np.percentile(num_reads_per_bin, 75, axis=0)
# use the largest 99th percentile from all samples to set the x_max value
x_max = np.max(np.percentile(num_reads_per_bin, 99, axis=0))
# plot coverage
# print headers for text output
print("sample\tmean\tstd\tmin\t25%\t50%\t75%\tmax")
# the determination of a sensible value for y_max of the first plot (fraction of bases sampled vs.
# coverage) is important because, depending on the data,
# it becomes very difficult to see the lines in the plot. For example, if the coverage of a sample
# is a nice gaussian curve with a large mean of 50. Then a sensible range for the y axis (fraction of
# reads having coverage=x) is (0, 0.02) which nicely shows the coverage curve. If instead the coverage is
# very por and centers close to 1 then a good y axis range is (0,1).
# the current implementation aims to find the y_value for which 50% of the reads >= x (coverage) and
# sets that as the x_axis range.
y_max = []
data = []
# We need to manually set the line colors so they're shared between the two plots.
plotly_colors = ["#d73027", "#fc8d59", "#f33090", "#e0f3f8", "#91bfdb", "#4575b4"]
plotly_styles = sum([6 * ["solid"], 6 * ["dot"], 6 * ["dash"], 6 * ["longdash"], 6 * ["dashdot"], 6 * ["longdashdot"]], [])
for idx, col in enumerate(num_reads_per_bin.T):
if args.plotFile:
frac_reads_per_coverage = np.bincount(col.astype(int)).astype(float) / num_reads_per_bin.shape[0]
csum = np.bincount(col.astype(int))[::-1].cumsum()
csum_frac = csum.astype(float)[::-1] / csum.max()
if args.plotFileFormat == 'plotly':
color = plotly_colors[idx % len(plotly_colors)]
dash = plotly_styles[idx % len(plotly_styles)]
trace = go.Scatter(x=np.arange(0, int(x_max) - 1),
y=frac_reads_per_coverage[:int(x_max)],
mode='lines',
xaxis='x1',
yaxis='y1',
line=dict(color=color, dash=dash),
name="{}, mean={:.1f}".format(args.labels[idx], sample_mean[idx]),
legendgroup="{}".format(idx))
data.append(trace)
trace = go.Scatter(x=np.arange(0, int(x_max) - 1),
y=csum_frac[:int(x_max)],
mode='lines',
xaxis='x2',
yaxis='y2',
line=dict(color=color, dash=dash),
name=args.labels[idx],
showlegend=False,
legendgroup="{}".format(idx))
data.append(trace)
else:
axs[0].plot(frac_reads_per_coverage, label="{}, mean={:.1f}".format(args.labels[idx], sample_mean[idx]))
axs[1].plot(csum_frac, label=args.labels[idx])
# find the indexes (i.e. the x values) for which the cumulative distribution 'fraction of bases
# sampled >= coverage' where fraction of bases sampled = 50%: `np.flatnonzero(csum_frac>0.5)`
# then find the fraction of bases sampled that that have the largest x
y_max.append(frac_reads_per_coverage[max(np.flatnonzero(csum_frac > 0.5))])
print("{}\t{:0.2f}\t{:0.2f}\t{}\t{}\t{}\t{}\t{}\t".format(args.labels[idx],
sample_mean[idx],
sample_std[idx],
sample_min[idx],
sample_25[idx],
sample_50[idx],
sample_75[idx],
sample_max[idx],
))
if args.plotFile:
# Don't clip plots
y_max = max(y_max)
if args.plotFileFormat == "plotly":
fig.add_traces(data)
fig['layout']['yaxis1'].update(range=[0.0, min(1, y_max + (y_max * 0.10))])
fig['layout']['yaxis2'].update(range=[0.0, 1.0])
py.plot(fig, filename=args.plotFile, auto_open=False)
else:
axs[0].set_ylim(0, min(1, y_max + (y_max * 0.10)))
axs[0].set_xlim(0, x_max)
axs[0].set_xlabel('coverage (#reads per bp)')
axs[0].legend(fancybox=True, framealpha=0.5)
axs[0].set_ylabel('fraction of bases sampled')
# plot cumulative coverage
axs[1].set_xlim(0, x_max)
axs[1].set_xlabel('coverage (#reads per bp)')
axs[1].set_ylabel('fraction of bases sampled >= coverage')
axs[1].legend(fancybox=True, framealpha=0.5)
plt.savefig(args.plotFile, format=args.plotFileFormat)
plt.close()
if __name__ == "__main__":
main()
|
from gym import spaces
import numpy as np
from .pybullet_evo.gym_locomotion_envs import HalfCheetahBulletEnv
import copy
from utils import BestEpisodesVideoRecorder
class HalfCheetahEnv(object):
def __init__(self, config = {'env' : {'render' : True, 'record_video': False}}):
self._config = config
self._render = self._config['env']['render']
self._record_video = self._config['env']['record_video']
self._current_design = [1.0] * 6
self._config_numpy = np.array(self._current_design)
self.design_params_bounds = [(0.8, 2.0)] * 6
self._env = HalfCheetahBulletEnv(render=self._render, design=self._current_design)
self.init_sim_params = [
[1.0] * 6,
[1.41, 0.96, 1.97, 1.73, 1.97, 1.17],
[1.52, 1.07, 1.11, 1.97, 1.51, 0.99],
[1.08, 1.18, 1.39, 1.76 , 1.85, 0.92],
[0.85, 1.54, 0.97, 1.38, 1.10, 1.49],
]
self.observation_space = spaces.Box(-np.inf, np.inf, shape=[self._env.observation_space.shape[0] + 6], dtype=np.float32)#env.observation_space
self.action_space = self._env.action_space
self._initial_state = self._env.reset()
if self._record_video:
self._video_recorder = BestEpisodesVideoRecorder(path=config['data_folder_experiment'], max_videos=5)
# Which dimensions in the state vector are design parameters?
self._design_dims = list(range(self.observation_space.shape[0] - len(self._current_design), self.observation_space.shape[0]))
assert len(self._design_dims) == 6
def render(self):
pass
def step(self, a):
info = {}
state, reward, done, _ = self._env.step(a)
state = np.append(state, self._config_numpy)
info['orig_action_cost'] = 0.1 * np.mean(np.square(a))
info['orig_reward'] = reward
if self._record_video:
self._video_recorder.step(env=self._env, state=state, reward=reward, done=done)
return state, reward, False, info
def reset(self):
state = self._env.reset()
self._initial_state = state
state = np.append(state, self._config_numpy)
if self._record_video:
self._video_recorder.reset(env=self._env, state=state, reward=0, done=False)
return state
def set_new_design(self, vec):
self._env.reset_design(vec)
self._current_design = vec
self._config_numpy = np.array(vec)
if self._record_video:
self._video_recorder.increase_folder_counter()
def get_random_design(self):
optimized_params = np.random.uniform(low=0.8, high=2.0, size=6)
return optimized_params
def get_current_design(self):
return copy.copy(self._current_design)
def get_design_dimensions(self):
return copy.copy(self._design_dims)
|
from apps import db
class Describable(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255))
slug = db.Column(db.String(255))
description = db.Column(db.Text)
class Timestampable(db.Model):
__abstract__ = True
created_date = db.Column(db.DateTime, default=db.func.now())
modified_date = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
|
import argparse
import pickle
import time
import pandas as pd
from sklearn.model_selection import train_test_split
from product_utils import *
from willump.evaluation.willump_executor import willump_execute
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cascades", action="store_true", help="Cascade threshold")
args = parser.parse_args()
if args.cascades:
cascades_dict = pickle.load(open(base_directory + "lazada_training_cascades.pk", "rb"))
else:
cascades_dict = None
@willump_execute(predict_function=product_predict,
confidence_function=product_confidence,
predict_cascades_params=cascades_dict)
def product_eval_pipeline(input_x, model, title_vect, color_vect, brand_vect):
title_result = transform_data(input_x, title_vect)
color_result = transform_data(input_x, color_vect)
brand_result = transform_data(input_x, brand_vect)
return product_predict(model, [title_result, color_result, brand_result])
if __name__ == '__main__':
df = pd.read_csv(base_directory + "lazada_data_train.csv", header=None,
names=['country', 'sku_id', 'title', 'category_lvl_1', 'category_lvl_2', 'category_lvl_3',
'short_description', 'price', 'product_type'])
y = np.loadtxt(base_directory + "conciseness_train.labels", dtype=int)
_, test_df, _, test_y = train_test_split(df, y, test_size=0.2, random_state=42)
title_vectorizer, color_vectorizer, brand_vectorizer = pickle.load(
open(base_directory + "lazada_vectorizers.pk", "rb"))
model = pickle.load(open(base_directory + "lazada_model.pk", "rb"))
product_eval_pipeline(test_df, model, title_vectorizer, color_vectorizer, brand_vectorizer)
product_eval_pipeline(test_df, model, title_vectorizer, color_vectorizer, brand_vectorizer)
start_time = time.time()
preds = product_eval_pipeline(test_df, model, title_vectorizer, color_vectorizer, brand_vectorizer)
time_elapsed = time.time() - start_time
print("Elapsed Time %fs Num Rows %d Throughput %f rows/sec" %
(time_elapsed, len(test_df), len(test_df) / time_elapsed))
print("1 - RMSE Score: %f" % product_score(preds, test_y))
|
from __future__ import division
"""
Test cases for the colorClass definedColors module
"""
import colorClass.definedColors as dc
if 'COLORS' in dir(dc):
COLORS = dc.COLORS
a = COLORS.RED
a = 'fdsfds'
if a == COLORS.RED:
print 'I was able to change an enumerated value in the COLORS variable!'
COLORS.whiteFromString = '255,255 255'
if COLORS.whiteFromString != '#FFFFFF':
print 'Adding a new color using a string with RGB values did not work!'
COLORS.whiteFromList = [255,255, 255]
if COLORS.whiteFromList != '#FFFFFF':
print 'Adding a new color using a list with RGB values did not work!'
COLORS.whiteFromDict = {'red': 255, 'g': 255, 'blu': 255}
if COLORS.whiteFromDict != '#FFFFFF':
print 'Adding a new color using a dict with RGB values did not work!'
COLORS.whiteFromDictPct = {'red': .9, 'g': .9, 'blu': .9}
if COLORS.whiteFromDictPct != '#E6E6E6':
print 'Adding a new color using a dict with RGB percentage values did not work!'
COLORS.whiteFromShortHex = 'fff'
if COLORS.whiteFromShortHex != '#FFFFFF':
print 'Adding a new color using a partial hex string did not work!'
COLORS.whiteFromHex = 'ffffff'
if COLORS.whiteFromHex != '#FFFFFF':
print 'Adding a new color using a hex string did not work!'
print 'All tests completed!'
else:
print 'The COLORS variable does not exist!'
|
#!/usr/bin/env python3.5
from http.server import CGIHTTPRequestHandler, HTTPServer
import base64
import logging
import argparse
requestHandler = CGIHTTPRequestHandler
server = HTTPServer
class AuthHTTPRequestHandler(requestHandler):
KEY = ''
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def send_authhead(self):
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="Security Realm"')
self.end_headers()
def do_GET(self):
authheader = self.headers.get('authorization')
if self.KEY:
if authheader is None:
self.send_authhead()
elif authheader == 'Basic ' + self.KEY:
requestHandler.do_GET(self)
else:
self.send_authhead()
self.wfile.write('Not authenticated')
else:
requestHandler.do_GET(self)
def run(port = 8080, host = "", server_class = server, handler_class = AuthHTTPRequestHandler):
logging.basicConfig(level=logging.INFO)
serverAddress = (host, port)
httpd = server_class(serverAddress, handler_class)
logging.info('Starting server...\n')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info('Stopping server...\n')
def configServer():
parser = argparse.ArgumentParser()
parser.add_argument('port', type=int, help='set port number (as 8000, etc.)')
parser.add_argument('--key', help='set username and password as username:password for Base Auth')
args = parser.parse_args()
if args.key:
AuthHTTPRequestHandler.KEY = base64.b64encode(args.key.encode()).decode('ascii')
run(port= args.port)
if __name__ == '__main__':
configServer()
|
from pwn import *
from sympy.ntheory.modular import crt
from gmpy2 import iroot
from Crypto.Util.number import long_to_bytes
ns = []
cs = []
for _ in range(3):
s = remote(sys.argv[1], int(sys.argv[2]))
s.recvuntil("n: ")
ns.append(int(s.recvline().decode()))
s.sendlineafter("opt: ", "2")
s.recvuntil("c: ")
cs.append(int(s.recvline().decode()))
s.close()
ptc = int(crt(ns, cs)[0])
print(long_to_bytes(int(iroot(ptc, 3)[0])).decode())
|
from flaskext.wtf import regexp
from flaskext.babel import lazy_gettext as _
USERNAME_RE = r'^[\w.+-]+$'
is_username = regexp(USERNAME_RE,
message=_("You can only use letters, numbers or dashes"))
|
# Generated by Django 2.2 on 2019-11-01 13:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import rmgweb.pdep.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Network',
fields=[
('id', models.CharField(default=rmgweb.pdep.models._createId, max_length=32, primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('input_file', models.FileField(upload_to=rmgweb.pdep.models.uploadTo, verbose_name='Input file')),
('input_text', models.TextField(blank=True, verbose_name='')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.