code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# coding: utf-8
# Sublist3r v1.0
# By Ahmed Aboul-Ela - twitter.com/aboul3la
# modules in standard library
import re
import sys
import os
import argparse
import time
import hashlib
import random
import multiprocessing
import threading
import socket
import json
from collections import Counter
# external modules
from subbrute import subbrute
import dns.resolver
import requests
# Python 2.x and 3.x compatiablity
if sys.version > '3':
import urllib.parse as urlparse
import urllib.parse as urllib
else:
import urlparse
import urllib
# In case you cannot install some of the required development packages
# there's also an option to disable the SSL warning:
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass
# Check if we are running this on windows platform
is_windows = sys.platform.startswith('win')
# Console Colors
if is_windows:
# Windows deserves coloring too :D
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
try:
import win_unicode_console , colorama
win_unicode_console.enable()
colorama.init()
#Now the unicode will work ^_^
except:
print("[!] Error: Coloring libraries not installed, no coloring will be used [Check the readme]")
G = Y = B = R = W = G = Y = B = R = W = ''
else:
G = '\033[92m' # green
Y = '\033[93m' # yellow
B = '\033[94m' # blue
R = '\033[91m' # red
W = '\033[0m' # white
def banner():
print("""%s
____ _ _ _ _ _____
/ ___| _ _| |__ | (_)___| |_|___ / _ __
\___ \| | | | '_ \| | / __| __| |_ \| '__|
___) | |_| | |_) | | \__ \ |_ ___) | |
|____/ \__,_|_.__/|_|_|___/\__|____/|_|%s%s
# Coded By Ahmed Aboul-Ela - @aboul3la
""" % (R, W, Y))
def parser_error(errmsg):
banner()
print("Usage: python " + sys.argv[0] + " [Options] use -h for help")
print(R + "Error: " + errmsg + W)
sys.exit()
def parse_args():
# parse the arguments
parser = argparse.ArgumentParser(epilog='\tExample: \r\npython ' + sys.argv[0] + " -d google.com")
parser.error = parser_error
parser._optionals.title = "OPTIONS"
parser.add_argument('-d', '--domain', help="Domain name to enumerate it's subdomains", required=True)
parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module', nargs='?', default=False)
parser.add_argument('-p', '--ports', help='Scan the found subdomains against specified tcp ports')
parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime', nargs='?', default=False)
parser.add_argument('-t', '--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=30)
parser.add_argument('-e', '--engines', help='Specify a comma-separated list of search engines')
parser.add_argument('-o', '--output', help='Save the results to text file')
return parser.parse_args()
def write_file(filename, subdomains):
# saving subdomains results to output file
print("%s[-] Saving results to file: %s%s%s%s" % (Y, W, R, filename, W))
with open(str(filename), 'wt') as f:
for subdomain in subdomains:
f.write(subdomain + os.linesep)
def subdomain_sorting_key(hostname):
"""Sorting key for subdomains
This sorting key orders subdomains from the top-level domain at the right
reading left, then moving '^' and 'www' to the top of their group. For
example, the following list is sorted correctly:
[
'example.com',
'www.example.com',
'a.example.com',
'www.a.example.com',
'b.a.example.com',
'b.example.com',
'example.net',
'www.example.net',
'a.example.net',
]
"""
parts = hostname.split('.')[::-1]
if parts[-1] == 'www':
return parts[:-1], 1
return parts, 0
class enumratorBase(object):
def __init__(self, base_url, engine_name, domain, subdomains=None, silent=False, verbose=True):
subdomains = subdomains or []
self.domain = urlparse.urlparse(domain).netloc
self.session = requests.Session()
self.subdomains = []
self.timeout = 25
self.base_url = base_url
self.engine_name = engine_name
self.silent = silent
self.verbose = verbose
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Encoding': 'gzip',
}
self.print_banner()
def print_(self, text):
if not self.silent:
print(text)
return
def print_banner(self):
""" subclass can override this if they want a fancy banner :)"""
self.print_(G + "[-] Searching now in %s.." % (self.engine_name) + W)
return
def send_req(self, query, page_no=1):
url = self.base_url.format(query=query, page_no=page_no)
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def get_response(self, response):
if response is None:
return 0
return response.text if hasattr(response, "text") else response.content
def check_max_subdomains(self, count):
if self.MAX_DOMAINS == 0:
return False
return count >= self.MAX_DOMAINS
def check_max_pages(self, num):
if self.MAX_PAGES == 0:
return False
return num >= self.MAX_PAGES
# override
def extract_domains(self, resp):
""" chlid class should override this function """
return
# override
def check_response_errors(self, resp):
""" chlid class should override this function
The function should return True if there are no errors and False otherwise
"""
return True
def should_sleep(self):
"""Some enumrators require sleeping to avoid bot detections like Google enumerator"""
return
def generate_query(self):
""" chlid class should override this function """
return
def get_page(self, num):
""" chlid class that user different pagnation counter should override this function """
return num + 10
def enumerate(self, altquery=False):
flag = True
page_no = 0
prev_links = []
retries = 0
while flag:
query = self.generate_query()
count = query.count(self.domain) # finding the number of subdomains found so far
# if they we reached the maximum number of subdomains in search query
# then we should go over the pages
if self.check_max_subdomains(count):
page_no = self.get_page(page_no)
if self.check_max_pages(page_no): # maximum pages for Google to avoid getting blocked
return self.subdomains
resp = self.send_req(query, page_no)
# check if there is any error occured
if not self.check_response_errors(resp):
return self.subdomains
links = self.extract_domains(resp)
# if the previous page hyperlinks was the similar to the current one, then maybe we have reached the last page
if links == prev_links:
retries += 1
page_no = self.get_page(page_no)
# make another retry maybe it isn't the last page
if retries >= 3:
return self.subdomains
prev_links = links
self.should_sleep()
return self.subdomains
class enumratorBaseThreaded(multiprocessing.Process, enumratorBase):
def __init__(self, base_url, engine_name, domain, subdomains=None, q=None, lock=threading.Lock(), silent=False, verbose=True):
subdomains = subdomains or []
enumratorBase.__init__(self, base_url, engine_name, domain, subdomains, silent=silent, verbose=verbose)
multiprocessing.Process.__init__(self)
self.lock = lock
self.q = q
return
def run(self):
domain_list = self.enumerate()
for domain in domain_list:
self.q.append(domain)
class GoogleEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://google.com/search?q={query}&btnG=Search&hl=en-US&biw=&bih=&gbv=1&start={page_no}&filter=0"
self.engine_name = "Google"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 200
super(GoogleEnum, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
link_regx = re.compile('<cite.*?>(.*?)<\/cite>')
try:
links_list = link_regx.findall(resp)
for link in links_list:
link = re.sub('<span.*>', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def check_response_errors(self, resp):
if 'Our systems have detected unusual traffic' in resp:
self.print_(R + "[!] Error: Google probably now is blocking our requests" + W)
self.print_(R + "[~] Finished now the Google Enumeration ..." + W)
return False
return True
def should_sleep(self):
time.sleep(5)
return
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS - 2])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class YahooEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = "https://search.yahoo.com/search?p={query}&b={page_no}"
self.engine_name = "Yahoo"
self.MAX_DOMAINS = 10
self.MAX_PAGES = 0
super(YahooEnum, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
link_regx2 = re.compile('<span class=" fz-.*? fw-m fc-12th wr-bw.*?">(.*?)</span>')
link_regx = re.compile('<span class="txt"><span class=" cite fw-xl fz-15px">(.*?)</span>')
links_list = []
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub("<(\/)?b>", "", link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def should_sleep(self):
return
def get_page(self, num):
return num + 10
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -domain:www.{domain} -domain:{found}'
found = ' -domain:'.join(self.subdomains[:77])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain}".format(domain=self.domain)
return query
class AskEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'http://www.ask.com/web?q={query}&page={page_no}&qid=8D6EE6BF52E0C04527E51F64F22C4534&o=0&l=dir&qsrc=998&qo=pagination'
self.engine_name = "Ask"
self.MAX_DOMAINS = 11
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def extract_domains(self, resp):
link_regx = re.compile('<p class="web-result-url">(.*?)</p>')
try:
links_list = link_regx.findall(resp)
for link in links_list:
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def get_page(self, num):
return num + 1
def generate_query(self):
if self.subdomains:
fmt = 'site:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "site:{domain} -www.{domain}".format(domain=self.domain)
return query
class BingEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.bing.com/search?q={query}&go=Submit&first={page_no}'
self.engine_name = "Bing"
self.MAX_DOMAINS = 30
self.MAX_PAGES = 0
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent)
self.q = q
self.verbose = verbose
return
def extract_domains(self, resp):
link_regx = re.compile('<li class="b_algo"><h2><a href="(.*?)"')
link_regx2 = re.compile('<div class="b_title"><h2><a href="(.*?)"')
try:
links = link_regx.findall(resp)
links2 = link_regx2.findall(resp)
links_list = links + links2
for link in links_list:
link = re.sub('<(\/)?strong>|<span.*?>|<|>', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
def generate_query(self):
if self.subdomains:
fmt = 'domain:{domain} -www.{domain} -{found}'
found = ' -'.join(self.subdomains[:self.MAX_DOMAINS])
query = fmt.format(domain=self.domain, found=found)
else:
query = "domain:{domain} -www.{domain}".format(domain=self.domain)
return query
class BaiduEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.baidu.com/s?pn={page_no}&wd={query}&oq={query}'
self.engine_name = "Baidu"
self.MAX_DOMAINS = 2
self.MAX_PAGES = 760
enumratorBaseThreaded.__init__(self, base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.querydomain = self.domain
self.q = q
return
def extract_domains(self, resp):
found_newdomain = False
subdomain_list = []
link_regx = re.compile('<a.*?class="c-showurl".*?>(.*?)</a>')
try:
links = link_regx.findall(resp)
for link in links:
link = re.sub('<.*?>|>|<| ', '', link)
if not link.startswith('http'):
link = "http://" + link
subdomain = urlparse.urlparse(link).netloc
if subdomain.endswith(self.domain):
subdomain_list.append(subdomain)
if subdomain not in self.subdomains and subdomain != self.domain:
found_newdomain = True
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
if not found_newdomain and subdomain_list:
self.querydomain = self.findsubs(subdomain_list)
return links
def findsubs(self, subdomains):
count = Counter(subdomains)
subdomain1 = max(count, key=count.get)
count.pop(subdomain1, "None")
subdomain2 = max(count, key=count.get) if count else ''
return (subdomain1, subdomain2)
def check_response_errors(self, resp):
return True
def should_sleep(self):
time.sleep(random.randint(2, 5))
return
def generate_query(self):
if self.subdomains and self.querydomain != self.domain:
found = ' -site:'.join(self.querydomain)
query = "site:{domain} -site:www.{domain} -site:{found} ".format(domain=self.domain, found=found)
else:
query = "site:{domain} -site:www.{domain}".format(domain=self.domain)
return query
class NetcraftEnum(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
self.base_url = 'https://searchdns.netcraft.com/?restriction=site+ends+with&host={domain}'
self.engine_name = "Netcraft"
self.lock = threading.Lock()
super(NetcraftEnum, self).__init__(self.base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
self.q = q
return
def req(self, url, cookies=None):
cookies = cookies or {}
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout, cookies=cookies)
except Exception as e:
self.print_(e)
resp = None
return resp
def get_next(self, resp):
link_regx = re.compile('<A href="(.*?)"><b>Next page</b></a>')
link = link_regx.findall(resp)
link = re.sub('host=.*?%s' % self.domain, 'host=%s' % self.domain, link[0])
url = 'http://searchdns.netcraft.com' + link
return url
def create_cookies(self, cookie):
cookies = dict()
cookies_list = cookie[0:cookie.find(';')].split("=")
cookies[cookies_list[0]] = cookies_list[1]
cookies['netcraft_js_verification_response'] = hashlib.sha1(urllib.unquote(cookies_list[1])).hexdigest()
return cookies
def get_cookies(self, headers):
if 'set-cookie' in headers:
cookies = self.create_cookies(headers['set-cookie'])
else:
cookies = {}
return cookies
def enumerate(self):
start_url = self.base_url.format(domain='example.com')
resp = self.req(start_url)
cookies = self.get_cookies(resp.headers)
url = self.base_url.format(domain=self.domain)
while True:
resp = self.get_response(self.req(url, cookies))
self.extract_domains(resp)
if 'Next page' not in resp:
return self.subdomains
break
url = self.get_next(resp)
def extract_domains(self, resp):
link_regx = re.compile('<a href="http://toolbar.netcraft.com/site_report\?url=(.*)">')
try:
links_list = link_regx.findall(resp)
for link in links_list:
subdomain = urlparse.urlparse(link).netloc
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
return links_list
class DNSdumpster(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://dnsdumpster.com/'
self.live_subdomains = []
self.engine_name = "DNSdumpster"
self.threads = 70
self.lock = threading.BoundedSemaphore(value=self.threads)
self.q = q
super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def check_host(self, host):
is_valid = False
Resolver = dns.resolver.Resolver()
Resolver.nameservers = ['8.8.8.8', '8.8.4.4']
self.lock.acquire()
try:
ip = Resolver.query(host, 'A')[0].to_text()
if ip:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, host))
is_valid = True
self.live_subdomains.append(host)
except:
pass
self.lock.release()
return is_valid
def req(self, req_method, url, params=None):
params = params or {}
headers = dict(self.headers)
headers['Referer'] = 'https://dnsdumpster.com'
try:
if req_method == 'GET':
resp = self.session.get(url, headers=headers, timeout=self.timeout)
else:
resp = self.session.post(url, data=params, headers=headers, timeout=self.timeout)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
def get_csrftoken(self, resp):
csrf_regex = re.compile("<input type='hidden' name='csrfmiddlewaretoken' value='(.*?)' />", re.S)
token = csrf_regex.findall(resp)[0]
return token.strip()
def enumerate(self):
resp = self.req('GET', self.base_url)
token = self.get_csrftoken(resp)
params = {'csrfmiddlewaretoken': token, 'targetip': self.domain}
post_resp = self.req('POST', self.base_url, params)
self.extract_domains(post_resp)
for subdomain in self.subdomains:
t = threading.Thread(target=self.check_host, args=(subdomain,))
t.start()
t.join()
return self.live_subdomains
def extract_domains(self, resp):
tbl_regex = re.compile('<a name="hostanchor"><\/a>Host Records.*?<table.*?>(.*?)</table>', re.S)
link_regex = re.compile('<td class="col-md-4">(.*?)<br>', re.S)
links = []
try:
results_tbl = tbl_regex.findall(resp)[0]
except IndexError:
results_tbl = ''
links_list = link_regex.findall(results_tbl)
links = list(set(links_list))
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain and subdomain not in self.subdomains and subdomain != self.domain:
self.subdomains.append(subdomain.strip())
return links
class Virustotal(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.virustotal.com/en/domain/{domain}/information/'
self.engine_name = "Virustotal"
self.lock = threading.Lock()
self.q = q
super(Virustotal, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
# the main send_req need to be rewritten
def send_req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception as e:
self.print_(e)
resp = None
return self.get_response(resp)
# once the send_req is rewritten we don't need to call this function, the stock one should be ok
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.send_req(url)
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
link_regx = re.compile('<div class="enum.*?">.*?<a target="_blank" href=".*?">(.*?)</a>', re.S)
try:
links = link_regx.findall(resp)
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception:
pass
class ThreatCrowd(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://www.threatcrowd.org/searchApi/v2/domain/report/?domain={domain}'
self.engine_name = "ThreatCrowd"
self.lock = threading.Lock()
self.q = q
super(ThreatCrowd, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
links = json.loads(resp)['subdomains']
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain):
continue
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class CrtSearch(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://crt.sh/?q=%25.{domain}'
self.engine_name = "SSL Certificates"
self.lock = threading.Lock()
self.q = q
super(CrtSearch, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if resp:
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
link_regx = re.compile('<TD>(.*?)</TD>')
try:
links = link_regx.findall(resp)
for link in links:
subdomain = link.strip()
if not subdomain.endswith(self.domain) or '*' in subdomain:
continue
if '@' in subdomain:
subdomain = subdomain[subdomain.find('@')+1:]
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class PassiveDNS(enumratorBaseThreaded):
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
subdomains = subdomains or []
base_url = 'https://api.sublist3r.com/search.php?domain={domain}'
self.engine_name = "PassiveDNS"
self.lock = threading.Lock()
self.q = q
super(PassiveDNS, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
return
def req(self, url):
try:
resp = self.session.get(url, headers=self.headers, timeout=self.timeout)
except Exception as e:
resp = None
return self.get_response(resp)
def enumerate(self):
url = self.base_url.format(domain=self.domain)
resp = self.req(url)
if not resp:
return self.subdomains
self.extract_domains(resp)
return self.subdomains
def extract_domains(self, resp):
try:
subdomains = json.loads(resp)
for subdomain in subdomains:
if subdomain not in self.subdomains and subdomain != self.domain:
if self.verbose:
self.print_("%s%s: %s%s" % (R, self.engine_name, W, subdomain))
self.subdomains.append(subdomain.strip())
except Exception as e:
pass
class portscan():
def __init__(self, subdomains, ports):
self.subdomains = subdomains
self.ports = ports
self.threads = 20
self.lock = threading.BoundedSemaphore(value=self.threads)
def port_scan(self, host, ports):
openports = []
self.lock.acquire()
for port in ports:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
result = s.connect_ex((host, int(port)))
if result == 0:
openports.append(port)
s.close()
except Exception:
pass
self.lock.release()
if len(openports) > 0:
print("%s%s%s - %sFound open ports:%s %s%s%s" % (G, host, W, R, W, Y, ', '.join(openports), W))
def run(self):
for subdomain in self.subdomains:
t = threading.Thread(target=self.port_scan, args=(subdomain, self.ports))
t.start()
def main(domain, threads, savefile, ports, silent, verbose, enable_bruteforce, engines):
bruteforce_list = set()
search_list = set()
if is_windows:
subdomains_queue = list()
else:
subdomains_queue = multiprocessing.Manager().list()
# Check Bruteforce Status
if enable_bruteforce or enable_bruteforce is None:
enable_bruteforce = True
# Validate domain
domain_check = re.compile("^(http|https)?[a-zA-Z0-9]+([\-\.]{1}[a-zA-Z0-9]+)*\.[a-zA-Z]{2,}$")
if not domain_check.match(domain):
if not silent:
print(R + "Error: Please enter a valid domain" + W)
return []
if not domain.startswith('http://') or not domain.startswith('https://'):
domain = 'http://' + domain
parsed_domain = urlparse.urlparse(domain)
if not silent:
print(B + "[-] Enumerating subdomains now for %s" % parsed_domain.netloc + W)
if verbose and not silent:
print(Y + "[-] verbosity is enabled, will show the subdomains results in realtime" + W)
supported_engines = {'baidu': BaiduEnum,
'yahoo': YahooEnum,
'google': GoogleEnum,
'bing': BingEnum,
'ask': AskEnum,
'netcraft': NetcraftEnum,
'dnsdumpster': DNSdumpster,
'virustotal': Virustotal,
'threatcrowd': ThreatCrowd,
'ssl': CrtSearch,
'passivedns': PassiveDNS
}
chosenEnums = []
if engines is None:
chosenEnums = [
BaiduEnum, YahooEnum, GoogleEnum, BingEnum, AskEnum,
NetcraftEnum, DNSdumpster, Virustotal, ThreatCrowd,
CrtSearch, PassiveDNS
]
else:
engines = engines.split(',')
for engine in engines:
if engine.lower() in supported_engines:
chosenEnums.append(supported_engines[engine.lower()])
# Start the engines enumeration
enums = [enum(domain, [], q=subdomains_queue, silent=silent, verbose=verbose) for enum in chosenEnums]
for enum in enums:
enum.start()
for enum in enums:
enum.join()
subdomains = set(subdomains_queue)
for subdomain in subdomains:
search_list.add(subdomain)
if enable_bruteforce:
if not silent:
print(G + "[-] Starting bruteforce module now using subbrute.." + W)
record_type = False
path_to_file = os.path.dirname(os.path.realpath(__file__))
subs = os.path.join(path_to_file, 'subbrute', 'names.txt')
resolvers = os.path.join(path_to_file, 'subbrute', 'resolvers.txt')
process_count = threads
output = False
json_output = False
bruteforce_list = subbrute.print_target(parsed_domain.netloc, record_type, subs, resolvers, process_count, output, json_output, search_list, verbose)
subdomains = search_list.union(bruteforce_list)
if subdomains:
subdomains = sorted(subdomains, key=subdomain_sorting_key)
if savefile:
write_file(savefile, subdomains)
if not silent:
print(Y + "[-] Total Unique Subdomains Found: %s" % len(subdomains) + W)
if ports:
if not silent:
print(G + "[-] Start port scan now for the following ports: %s%s" % (Y, ports) + W)
ports = ports.split(',')
pscan = portscan(subdomains, ports)
pscan.run()
elif not silent:
for subdomain in subdomains:
print(G + subdomain + W)
return subdomains
if __name__ == "__main__":
args = parse_args()
domain = args.domain
threads = args.threads
savefile = args.output
ports = args.ports
enable_bruteforce = args.bruteforce
verbose = args.verbose
engines = args.engines
if verbose or verbose is None:
verbose = True
banner()
res = main(domain, threads, savefile, ports, silent=False, verbose=verbose, enable_bruteforce=enable_bruteforce, engines=engines)
|
Yukinoshita47/Yuki-Chan-The-Auto-Pentest
|
Module/sublist3r/sublist3r.py
|
Python
|
mit
| 35,832
|
import re
from waliki.signals import page_saved
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from waliki.models import Page
from django.utils.translation import ugettext_lazy as _
from django.utils.text import get_text_list
try:
from waliki.attachments.models import Attachment
except ImportError:
Attachment = None
try:
from sh import pandoc, echo
pandoc = pandoc.bake(_tty_out=False)
echo = echo.bake(_tty_out=False)
except ImportError:
pandoc = None
def clean_meta(rst_content):
"""remove moinmoin metada from the top of the file"""
rst = rst_content.split('\n')
for i, line in enumerate(rst):
if line.startswith('#'):
continue
break
return '\n'.join(rst[i:])
def delete_relative_links(rst_content):
"""remove links relatives. Waliki point them correctly implicitly"""
return re.sub(r'^(\.\. .*: \.\./.*)\n$', '', rst_content, flags=re.MULTILINE)
def attachments(rst_content, slug):
def rep(matchobj):
for filename in matchobj.groups(1):
try:
a = Attachment.objects.filter(file__endswith=filename, page__slug=slug)[0]
except IndexError:
print('Cant find %s in %s' % (filename, slug))
return None
return '`%s <%s>`_' % (filename, a.get_absolute_url())
return re.sub(r'`attachment:(.*)`_', rep, rst_content, flags=re.MULTILINE)
def directives(rst_content):
for directive in re.findall(r':(\w+):`.*`', rst_content, flags=re.MULTILINE):
rst_content += """
.. role:: {directive}
:class: {directive}
""".format(directive=directive)
return rst_content
def emojis(rst_content):
# require
emojis_map = {
':)': 'smile',
':-)': 'smile',
';)': 'wink',
';-)': 'wink',
':-?': 'smirk',
':?': 'smirk',
':(': 'confused',
':-(': 'confused',
':D': 'laughing',
':-D': 'laughing',
':-P': 'stuck_out_tongue_closed_eyes',
':P': 'stuck_out_tongue_closed_eyes',
":'(": 'cry',
":'-(": 'cry',
}
def replace_emoji(match):
replacement = emojis_map.get(match.groups()[0], '')
if replacement:
return '|%s|' % replacement
return ''
result = re.sub(r'\|((?:\:|;).{1,3})\|', replace_emoji, rst_content, flags=re.MULTILINE)
return result
def email(rst_content):
pattern = r'`\[\[MailTo\((.*)\)\]\]`_(?:\.\.)?'
return re.sub(pattern, r'``\1``', rst_content)
def title_level(rst_content):
def dashrepl(matchobj):
return '-' * len(matchobj.group(0))
pattern = r'^~+$'
return re.sub(pattern, dashrepl, rst_content, flags=re.MULTILINE)
def code(rst_content):
if not pandoc:
return rst_content
pattern = r'^\:\:\n\s+\.\. raw:: html\n\s+(<span class\=\"line\"\>.*?|\s+?<\/span\>)\n\s*$'
def convert(match):
source = match.groups()[0]
source = '\n'.join(l.strip() for l in source.split('\n'))
source = "<pre>%s</pre>" % source
rst_source = pandoc(echo(source), f='html', t='rst').stdout.decode('utf8')
# rst_source = rst_source.strip().replace('\n', '\n ') + '\n'
return rst_source
result = re.sub(pattern, convert, rst_content, flags=re.DOTALL | re.MULTILINE)
return result
class Command(BaseCommand):
help = 'Cleanup filters for a moin2git import'
option_list = (
make_option('--limit-to',
dest='slug',
default='',
help="optional namespace"),
make_option('--filters',
dest='filters',
default='all',
help="comma separated list of filter functions to apply"),
make_option('--message',
dest='message',
default=_("RestructuredText clean up"),
help="log message"),
) + BaseCommand.option_list
def handle(self, *args, **options):
valid_filters = ['meta', 'links',
'attachments', 'directives',
'emojis', 'title', 'email', 'code', 'title_level']
slug = options['slug']
filters = options['filters']
if filters == 'all':
filters = valid_filters
else:
filters = [f.strip() for f in filters.split(',')]
if not set(filters).issubset(valid_filters):
valid = get_text_list(valid_filters, 'and')
raise CommandError("At least one filter is unknown. Valid filters are:\n %s" % valid)
if slug:
pages = Page.objects.filter(slug__startswith=slug)
else:
pages = Page.objects.all()
for page in pages:
title = None
print('\nApplying filter/s %s to %s' % (get_text_list(filters, 'and'), page.slug))
raw = page.raw
if 'meta' in filters:
raw = clean_meta(raw)
if 'links' in filters:
raw = delete_relative_links(raw)
if 'attachments' in filters:
raw = attachments(raw, page.slug)
if 'directives' in filters:
raw = directives(raw)
if 'emojis' in filters:
raw = emojis(raw)
if 'email' in filters:
raw = email(raw)
if 'title_level' in filters:
raw = title_level(raw)
if 'code' in filters:
if not pandoc:
print('The filter "code" need Pandoc installed in your system. Ignoring')
else:
raw = code(raw)
if 'title' in filters and not page.title:
title = page._get_part('get_document_title')
if raw != page.raw or title:
if title:
page.title = title
if raw != page.raw:
page.raw = raw
page.save()
page_saved.send_robust(sender='moin',
page=page,
author=None,
message=options['message'],
form_extra_data={})
else:
print('Nothing changed. Ignoring update')
|
RobertoMaurizzi/waliki
|
waliki/management/commands/moin_migration_cleanup.py
|
Python
|
bsd-3-clause
| 6,418
|
import hypertrack
hypertrack.secret_key = 'c237rtyfeo9893u2t4ghoevslsd'
customer = hypertrack.Customer.create(
name='John Doe',
email='john@customer.com',
phone='+15555555555',
)
print(customer)
|
hypertrack/hypertrack-python
|
example.py
|
Python
|
mit
| 211
|
#!/usr/bin/env python
#########################################################################
# Reinforcement Learning with PGPE on the Acrobot Environment
#
# The Acrobot Environment is a 1 DoF system.
# The goal is to swing up the pole and balance it.
# The motor is underpowered so that the pole can not go directly to the upright position.
# It has to swing several times to gain enough momentum.
#
# Control/Actions:
# The agent can control 1 joint.
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
# Author: Frank Sehnke, sehnke@in.tum.de
#########################################################################
__author__ = "Frank Sehnke"
__version__ = '$Id$'
from pybrain.tools.example_tools import ExTools
from pybrain.rl.environments.ode import AcrobotEnvironment
from pybrain.rl.environments.ode.tasks import GradualRewardTask
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import PGPE
from pybrain.rl.experiments import EpisodicExperiment
from time import sleep
batch=1 #number of samples per learning step
prnts=1 #number of learning steps after results are printed
epis=4000/batch/prnts #number of roleouts
numbExp=40 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
env = None
for runs in range(numbExp):
# create environment
#Options: Bool(OpenGL), Bool(Realtime simu. while client is connected), ServerIP(default:localhost), Port(default:21560)
if env != None: env.closeSocket()
env = AcrobotEnvironment()
# create task
task = GradualRewardTask(env)
# create controller network
net = buildNetwork(len(task.getObservation()), env.actLen, outclass=TanhLayer)
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, PGPE(storeAllEvaluations = True,
learningRate = 0.05,
sigmaLearningRate = 0.025,
momentum = 0.0,
epsilon = 6.0,
rprop = False,))
et.agent = agent
#agent.learner.bestEvaluation = 1500
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.showExps()
#To view what the simulation is doing at the moment, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
|
affordablewindurbines/jarvisproject
|
pybrain/examples/rl/environments/ode/acrobot_pgpe.py
|
Python
|
gpl-3.0
| 2,788
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016 Jonathan Labéjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Read module."""
from collections import Iterable
from six import string_types
from .base import CRUDElement
from .join import Join
__all__ = ['Read', 'Cursor']
class Read(CRUDElement):
"""In charge of parameterize a reading request.
Execution is done in calling it or in using the getslice method.
Result is a Cursor."""
__slots__ = [
'_select', '_offset', '_limit', '_orderby', '_groupby', '_join'
] + CRUDElement.__slots__
def __init__(
self,
select=None, offset=None, limit=None, orderby=None, groupby=None,
join=None, *args, **kwargs
):
"""
:param list select: data to select.
:param int offset: data to avoid.
:param int limit: max number of data to retrieve.
:param str orderby: data sorting.
:param list groupby: data field group.
:param join: join type (INNER, LEFT, etc.).
:type join: str or Join
"""
super(Read, self).__init__(*args, **kwargs)
# initialize protected properties
self._select = ()
self._offset = None
self._limit = None
self._orderby = ()
self._groupby = None
self._join = None
# set parameters
if select is not None:
self.select(*select)
if offset is not None:
self.offset(offset)
if limit is not None:
self.limit(limit)
if orderby is not None:
self.orderby(*orderby)
if groupby is not None:
self.groupby(groupby)
if join is not None:
self.join(join)
def offset(self, *value):
"""Get or set offset if value is not None.
:param int value: value to set. Default is None.
:return: depending on value. If None, return this offset, otherwise
this.
:rtype: int or Read
"""
if value:
value = value[0]
if not isinstance(value, int):
raise TypeError(
'Wrong value {0}. {1} expected'.format(value, int)
)
self._offset = value
result = self
else:
result = self._offset
return result
def limit(self, *value):
"""Get or set limit if value is not None.
:param int value: value to set. Default is None.
:return: depending on value. If None, return this offset, otherwise
this.
:rtype: int or Read
"""
if value:
value = value[0]
if not isinstance(value, int):
raise TypeError(
'Wrong value {0}. {1} expected'.format(value, int)
)
result = self
self._limit = value
else:
result = self._limit
return result
def orderby(self, *values):
"""Get or set orderby if value is not None.
:param tuple value: value to set. Default is None.
:return: depending on value. If None, return this offset, otherwise
this.
:rtype: tuple or Read
"""
if values:
self._orderby = values
result = self
else:
result = self._orderby
return result
def groupby(self, *value):
"""Get or set groupby if value exists.
:param int value: value to set. Default is None.
:return: depending on value. If None, return this offset, otherwise
this.
:rtype: int or Read
"""
if value:
value = value[0]
if not isinstance(value, string_types):
raise TypeError(
'Wrong value {0}. {1} expected'.format(value, str)
)
result = self
self._groupby = value
else:
result = self._groupby
return result
def select(self, *values):
"""Get or set select if value is not None.
:param tuple value: value to set. Default is None.
:return: depending on value. If None, return this offset, otherwise
this.
:rtype: tuple or Read
"""
if values:
self._select = values
result = self
else:
result = self._select
return result
def join(self, *value):
"""Get or set join if value is not None.
:param value: value to set. Default is None.
:type value: str or Join
:return: depending on value. If None, return this offset, otherwise
this.
:rtype: str or Join or Read
"""
if value:
value = value[0]
if not isinstance(value, string_types + (Join,)):
raise TypeError(
'Wrong value {0}. {1} expected'.format(
value, string_types + (Join,)
)
)
self._join = value.name if isinstance(value, Join) else value
result = self
else:
result = self._join
return result
def __getslice__(self, start, stop):
"""Set offset and limit and execute the selection.
:param int start: offset property.
:param int stop: limit property.
:return: selection execution result.
:rtype: Cursor"""
if start is not None:
self._offset = start
if stop is not None:
self._limit = stop
return self()
def __getitem__(self, key):
if not isinstance(key, slice):
key = slice(key, key + 1)
return self.__getslice__(key.start, key.stop)
def __repr__(self):
if self._select:
items = [repr(item) for item in self._select]
select = ', '.join(items)
else:
select = 'ALL'
result = 'READ {0} '.format(select)
if self._limit or self._offset or self._groupby or self._orderby:
if self._limit is not None:
result += 'LIMIT {0} '.format(repr(self._limit))
if self._offset is not None:
result += 'OFFSET {0} '.format(repr(self._offset))
if self._groupby:
result += 'GROUP BY {0} '.format(self._groupby)
if self._orderby:
items = [repr(item) for item in self._orderby]
result += 'ORDER BY {0} '.format(', '.join(items))
if self.query:
result += 'WHERE {0} '.format(repr(self.query))
if self.dparams:
result += 'WITH {0} '.format(repr(self.dparams))
if self.alias:
result += 'AS {0}'.format(self.alias)
if result[-1] == ' ':
result = result[:-1]
return result
class Cursor(Iterable):
"""Read request result."""
def __init__(self, cursor, *args, **kwargs):
super(Cursor, self).__init__(*args, **kwargs)
self._cursor = cursor
def __len__(self):
return len(self._cursor)
def __iter__(self):
return iter(self._cursor)
def __getitem__(self, key):
return self._cursor[key]
def __getslice__(self, i, j):
return self._cursor[i:j]
|
b3j0f/requester
|
b3j0f/requester/request/crud/read.py
|
Python
|
mit
| 8,583
|
import logging
from datetime import datetime
from email.message import EmailMessage
from smtplib import SMTP_SSL, SMTPException
__author__ = 'Miel Donkers <miel.donkers@gmail.com>'
log = logging.getLogger(__name__)
class Mailer:
def __init__(self, mail_backoff_timeout_in_sec, smtp_address, smtp_user, smtp_password, mail_from_address, mail_to_address):
self.backoff_timeout_in_sec = mail_backoff_timeout_in_sec
self.smtp_address = smtp_address
self.smtp_user = smtp_user
self.smtp_password = smtp_password
self.mail_from_address = mail_from_address
self.mail_to_address = mail_to_address
self.last_mail_timestamp = datetime.min
def get_last_mail_timestamp(self):
return self.last_mail_timestamp
def send_mail(self, message, ignore_last_mail_timestamp=False):
if not ignore_last_mail_timestamp and (
datetime.utcnow() - self.last_mail_timestamp).total_seconds() < self.backoff_timeout_in_sec:
log.warning('Not mailing, recently already sent a mail')
return
log.info('Trying to send mail with message: {}'.format(str(message)))
try:
with SMTP_SSL(self.smtp_address, timeout=30) as smtp: # 30 sec timeout should be sufficient
# smtp.set_debuglevel(2)
smtp.login(self.smtp_user, self.smtp_password)
msg = EmailMessage()
msg.set_content('Alarm notification!\nSome unexpected event:\n\n{}'.format(str(message)))
msg['Subject'] = 'ALARM NOTIFICATION!'
msg['From'] = self.mail_from_address
msg['To'] = self.mail_to_address
smtp.send_message(msg)
if not ignore_last_mail_timestamp:
self.last_mail_timestamp = datetime.utcnow()
except SMTPException:
log.exception('Failed to send email!')
|
mdonkers/AlarmListener
|
alarmlistener/mailer.py
|
Python
|
mit
| 1,928
|
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Tsartsaris Sotiris"
__copyright__ = "Copyright 2014, The TSP Project"
__credits__ = ["Tsartsaris Sotiris"]
__license__ = "APACHE 2.0"
__version__ = "1.0.1"
__maintainer__ = "Tsartsaris Sotiris"
__email__ = "info@tsartsaris.gr"
__status__ = "Development"
"""
Provided with the initial population first we apply a fitness function
which will be the division of the tour distance divided by the best distance
we have after each iteration. Then we apply a roulette-wheel selection on the population
to get from the accumulated fitness of each tour a random pick
"""
from operator import itemgetter
import random
import collections
import copy
from tsp_distance import *
class TSPGeneticAlgo(object):
def __init__(self, initial_population, city_tour_init, total_best):
self.children_dirty = []
self.groups_of_two = []
self.selected_for_breeding = [] # here we store the population selected from the tournament selection
self.population_for_crossover = [] # based on the probability of crossover a equal random sample appended
self.population_for_mutation = [] # the remainders from the above go here for mutation operation
self.all_fitness = []
self.offsprings = [] # this is where we are going to store all offsprings for later breeding
self.initial_population = initial_population
self.tour_init = city_tour_init
self.total_best = total_best[0]
self.calculate_fitness(self.initial_population)
# self.tournament_selection(self.all_fitness)
self.best_selection()
for each in self.selected_for_breeding:
self.offsprings.append(each[1])
# print self.selected_for_breeding
# self.divide_breeding_mut_cross(self.selected_for_breeding,
# 0) # produces population for crossover and mutation
# self.children_dirty = self.one_point_crossover(self.population_for_crossover)
# self.remove_duplicate_cities(self.children_dirty)
# #self.mutate_elitism()
# for each in self.population_for_mutation:
# self.offsprings.append(each[1])
def fitness_function(self, city_cost):
"""
We apply the fitness function to each distance by
dividing the lowest with each one of them
"""
return round(float(float(self.total_best) / float(city_cost)), 7)
def calculate_fitness(self, in_list):
"""
Given a list of distances we apply the fitness function to each
one of them
"""
for city_distance in in_list:
self.all_fitness.append([self.fitness_function(city_distance[0]), city_distance[1]])
def sorted_all_fintess(self):
self.sorted_fitness = sorted(self.all_fitness, key=itemgetter(0))
def tournament_selection(self, in_list):
"""
We iterate the selected population and we create groups of 2
to make the breeding in the next step.
"""
if len(in_list)%2 != 0:
in_list.pop()
while in_list:
local = random.sample(in_list, 2)
for i in local:
in_list.remove(i)
best = max(i[0] for i in local)
for dub in local:
if dub[0] == best:
self.selected_for_breeding.append(dub)
def random_pick_doubles(self, in_list):
"""
We iterate the selected population and we create groups of 2
to make the breeding in the next step.
"""
if (len(in_list) % 2) != 0:
in_list.pop()
while in_list:
local = random.sample(in_list, 2)
for i in local:
in_list.remove(i)
self.groups_of_two.append(local)
def divide_breeding_mut_cross(self, in_list, percentage_crossover):
"""
Based on the percentage crossover we separate the breeding list
to a list with chromosomes for crossover and a list with chromosomes
for mutation. If the percentage is 0.8 and the breeding population
is 100 the 80 chromosomes will be selected for crossover and the rest
20 for mutation.
"""
total = len(in_list)
amount_for_crossover = int(total * percentage_crossover)
self.population_for_crossover = random.sample(in_list, amount_for_crossover)
self.population_for_mutation = [x for x in in_list if x not in self.population_for_crossover]
def one_point_crossover(self, in_list):
"""
Given the list of chromosomes we first create random pairs of doubles
and the we apply a simple point crossover by choosing a random point in
the operator is going to take place
"""
local_children = []
self.random_pick_doubles(in_list)
local_doubles = self.groups_of_two
while local_doubles:
double = local_doubles.pop()
ind1 = double[0][1]
ind2 = double[1][1]
size = min(len(ind1), len(ind2))
cxpoint = random.randint(1, size - 1)
child1 = ind1[cxpoint:] + ind2[:cxpoint]
child2 = ind2[cxpoint:] + ind1[:cxpoint]
local_children.append(child1)
local_children.append(child2)
return local_children
def pmx_crossover(self, in_list):
"""
Given the list of chromosomes we first create random pairs of doubles
and the we apply a simple point crossover by choosing a random point in
the operator is going to take place
"""
local_children = []
self.random_pick_doubles(in_list)
local_doubles = self.groups_of_two
while local_doubles:
double = local_doubles.pop()
mom = double[0][1]
dad = double[1][1]
size = len(mom)
points = random.sample(range(size), 2)
x, y = min(points), max(points)
bro = copy.copy(dad)
bro[x:y + 1] = mom[x:y + 1]
sis = copy.copy(mom)
sis[x:y + 1] = dad[x:y + 1]
for parent, child in zip([dad, mom], [bro, sis]):
for i in range(x, y + 1):
if parent[i] not in child[x:y + 1]:
spot = i
while x <= spot <= y:
spot = parent.index(child[spot])
child[spot] = parent[i]
local_children.append(bro)
local_children.append(sis)
return local_children
def best_selection(self):
self.selected_for_breeding = self.all_fitness # [:len(self.all_fitness) / 2]
def remove_duplicate_cities(self, in_list):
"""
The offsprings from the crossover contain duplicate cities which must
be removed by replacing them with cities that are not in the offspring
"""
for dirty in in_list:
# coin = random.randint(1, 10)
# if coin == 1:
self.random_cities = []
self.random_cities[:] = []
self.differs = [x for x in self.tour_init if x not in dirty]
uniq = [x for x, y in collections.Counter(dirty).items() if y > 1]
if self.differs and uniq:
coin = random.randint(0, 3)
if coin == 0:
self.random_remaining_cities = self.differs[:]
for unique in uniq:
index = dirty.index(unique)
dirty.pop(index)
city = self.pick_random_city()
best_nn_tour = self.create_nearest_tour(city)
dirty = dirty + best_nn_tour
elif coin == 1:
self.random_remaining_cities = self.differs[:]
for unique in uniq:
index = dirty.index(unique)
dirty.pop(index)
city = self.pick_random_city()
best_nn_tour = self.create_nearest_tour(city)
dirty = best_nn_tour + dirty
elif coin == 2:
self.random_remaining_cities = self.differs[:]
for unique in uniq:
index = dirty.index(unique)
dirty.pop(index)
city = self.pick_random_city()
best_nn_tour = self.create_nearest_tour(city)
randominsert = random.randint(1, len(dirty) - 1)
dirty = dirty[:randominsert] + best_nn_tour + dirty[randominsert:]
else:
for unique in uniq:
index = dirty.index(unique)
dirty.pop(index)
dirty.insert(index, self.differs[-1])
self.differs.pop()
self.offsprings.append(dirty) # at this point we have all the children from the crossover operation
# cleaned from duplicates in the self.offsprings list
# else:
# differs = [x for x in self.tour_init if x not in dirty]
# uniq = [x for x, y in collections.Counter(dirty).items() if y > 1]
#
# self.offsprings.append(dirty) # at this point we have all the children from the crossover operation
# cleaned from duplicates in the self.offsprings list
def find_nn(self, city, list):
"""
Given a city we find the next nearest city
"""
start_city = self.get_coordinates_from_city(city)
return min((euclidean_distance(start_city, self.get_coordinates_from_city(rest)), rest) for rest in
list)
def get_coordinates_from_city(self, city):
"""
Given a city return the coordinates (x,y)
"""
return self.city_coords.get(city)
def pick_random_city(self):
"""
Random pick of a city. Persist of uniqueness each time
the city is added to the random city list and removed
from remaining cities. Each time we pick a new one from
the eliminated list of remaining cities
"""
if self.random_remaining_cities:
self.random_city = random.choice(self.random_remaining_cities)
self.random_remaining_cities.remove(self.random_city)
self.random_cities.append(self.random_city)
return self.random_city
def create_nearest_tour(self, city):
prov_list = self.differs[:]
nearest_tour = [city]
if city in prov_list: prov_list.remove(city)
while prov_list:
current_city = nearest_tour[-1]
next_city = self.find_nn(current_city, prov_list)
nearest_tour.append(next_city[1])
prov_list.remove(next_city[1])
return nearest_tour
@staticmethod
def insertion_mutation(in_list):
tour_range = len(in_list)
randominsert = random.randint(0, tour_range - 1)
randomip = random.randint(0, tour_range - 1)
city_to_insert = in_list.pop(randomip)
in_list.insert(randominsert, city_to_insert)
return in_list
@staticmethod
def reciprocal_exchange_mutation(in_list):
a = random.randint(0, len(in_list) - 1)
b = random.randint(0, len(in_list) - 1)
in_list[b], in_list[a] = in_list[a], in_list[b]
return in_list
@staticmethod
def two_opt_mutation(in_list):
a = random.randint(0, len(in_list) - 2)
b = a + 1
in_list[b], in_list[a] = in_list[a], in_list[b]
return in_list
@staticmethod
def inversion_mutation(in_list):
a = random.randint(0, len(in_list) - 1)
b = random.randint(0, len(in_list) - 1)
if a < b:
a = a
b = b
elif a > b:
a = b
b = a
else:
pass
first, second, third = in_list[:a], in_list[a:b], in_list[b:]
in_list = first + second[::-1] + third
return in_list
@staticmethod
def inverse(in_list):
in_list.reverse()
return in_list
def mutate_elitism(self):
for tour in self.population_for_mutation:
coin = random.randint(1, 3)
if coin == 1:
mutated = self.insertion_mutation(tour[1])
self.offsprings.append(mutated)
elif coin == 2:
mutated = self.reciprocal_exchange_mutation(tour[1])
self.offsprings.append(mutated)
else:
mutated = self.inversion_mutation(tour[1])
self.offsprings.append(mutated)
class circleGA(TSPGeneticAlgo):
def __init__(self, temp, local_temp, city_tour_init, total_best, city_coords, pop_size, p):
self.prop = p
self.rpopsize = pop_size
if self.prop == 1:
self.mutsize = 1
else:
self.mutsize = self.rpopsize - self.rpopsize * self.prop
self.children_dirty = []
self.groups_of_two = []
self.population_for_crossover = []
self.population_for_mutation = []
self.children_dirty[:] = []
self.groups_of_two[:] = []
self.population_for_crossover[:] = []
self.population_for_mutation[:] = []
self.offsprings = []
self.offsprings[:] = []
self.temp = temp
self.local_temp = local_temp
self.tour_init = city_tour_init
self.total_best = total_best[0]
self.city_coords = city_coords
self.pre_temp = []
self.entire_population = []
self.all_fitness = []
self.all_fitness[:] = []
self.initial_population = []
self.initial_population[:] = []
self.selected_for_breeding = [] # here we store the population selected from the tournament selection
self.selected_for_breeding[:] = []
self.add_init_offsprings()
self.calculate_fitness(self.entire_population)
self.all_fitness_temp = []
self.all_fitness_temp[:] = self.all_fitness
# num = random.randint(1,2)
# if num == 1:
#self.tournament_selection(self.all_fitness)
# else:
self.best_selection()
self.complete_initial_exchanged_population()
self.normalize_initial_population()
self.initial_population[:] = self.temp
self.selected_for_breeding[:] = []
self.calculate_fitness(self.initial_population)
# num = random.randint(1,2)
# if num == 1:
#self.tournament_selection(self.all_fitness)
# else:
self.best_selection()
self.divide_breeding_mut_cross(self.selected_for_breeding,
self.prop) # produces population for crossover and mutation
crosscoin = random.randint(0, 4)
if crosscoin == 0:
self.children_dirty = self.pmx_crossover(self.population_for_crossover)
else:
self.children_dirty = self.one_point_crossover(self.population_for_crossover)
self.remove_duplicate_cities(self.children_dirty)
self.complete_population_for_mutation()
self.normalise_lists(self.population_for_mutation)
self.mutate_elitism()
self.fin()
def add_init_offsprings(self):
self.entire_population[:] = []
self.entire_population = self.temp + self.local_temp
self.entire_population = sorted(self.entire_population, key=lambda x: x[0])
self.entire_population = self.entire_population[:self.rpopsize]
def complete_initial_exchanged_population(self):
while len(self.selected_for_breeding) < self.rpopsize:
tour_to_add = random.choice(self.all_fitness_temp)
if tour_to_add not in self.selected_for_breeding:
self.selected_for_breeding.append(tour_to_add)
def complete_population_for_mutation(self):
if len(self.population_for_mutation) > self.mutsize:
while len(self.population_for_mutation) != self.mutsize:
todel = random.choice(self.population_for_mutation)
self.population_for_mutation.remove(todel)
else:
while len(self.population_for_mutation) != self.mutsize:
toadd = random.choice(self.all_fitness_temp)
coin = random.randint(0, 3)
if coin == 0:
mutated = self.inversion_mutation(toadd)
self.population_for_mutation.append(mutated)
elif coin == 2:
mutated = self.insertion_mutation(toadd)
self.population_for_mutation.append(mutated)
elif coin == 1:
mutated = self.reciprocal_exchange_mutation(toadd)
self.population_for_mutation.append(mutated)
else:
mutated = self.two_opt_mutation(toadd)
self.population_for_mutation.append(mutated)
def normalize_initial_population(self):
for each in self.selected_for_breeding:
self.pre_temp.append(each[1])
pre_temp_distances_list = []
for offspring in self.pre_temp:
offspring_distance = TSPDistance(offspring, self.city_coords)
pre_temp_distances_list.append((offspring_distance.distance_cost, offspring_distance.tourlist))
self.temp = sorted(pre_temp_distances_list, key=lambda x: x[0])
@staticmethod
def normalise_lists(in_list):
for eachone in in_list:
if type(eachone[1]) == float:
eachone.reverse()
def fin(self):
self.initial_population[:] = []
self.initial_population = sorted(self.temp, key=lambda x: x[0])
return self.initial_population
|
tsartsaris/TSP
|
tsp_ga.py
|
Python
|
apache-2.0
| 17,995
|
#!/usr/bin/env python
# encoding: utf8
from setuptools import setup
setup(
name="SmallScrewdriver",
version="1.0.2",
packages=['SmallScrewdriver', 'ScreamingMercury'],
scripts=['ScreamingMercury.py', 'SundaysIron.py'],
install_requires=['PySide>=1.2.1',
'SillyCrossbow>=1.0.8'],
package_data={
'': ['*.txt', '*.rst', 'ScreamingMercury/*.png']
},
author="Shnaider Pavel",
author_email="shnaiderpasha@gmail.com",
description="""
SmallScrewdriver is python texture packer library, with frontend's on PySide GUI, Flask/React.js, and console
""",
license="LGPL",
keywords="texture",
url="https://github.com/Ingener74/Small-Screwdriver",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"License :: Freeware",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Natural Language :: English",
"Natural Language :: Russian",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Multimedia",
"Topic :: Multimedia :: Graphics",
"Topic :: Multimedia :: Graphics :: Editors",
]
)
|
Ingener74/Small-Screwdriver
|
setup.py
|
Python
|
lgpl-3.0
| 1,340
|
'''
Created on Jun 14, 2011
@author: lebleu1
'''
import unittest
from sccp.messagefactory import MessageFactory
from sccp.sccpregisterack import SCCPRegisterAck
from sccp.sccpcapabilitiesreq import SCCPCapabilitiesReq
from sccp.sccpkeepaliveack import SCCPKeepAliveAck
from sccp.sccpdefinetimedate import SCCPDefineTimeDate
from sccp.sccplinestat import SCCPLineStat
class TestMessageFactory(unittest.TestCase):
def setUp(self):
self.messageFactory = MessageFactory()
def testCreateRegisterAck(self):
receivedBuffer = "\x00\x00\x00\x00\x81\x00\x00\x00\x00\x0b\x00\x00"
msg = self.messageFactory.create(receivedBuffer)
self.assertTrue(isinstance(msg, SCCPRegisterAck))
def testCreateCapabilitiesReq(self):
receivedBuffer = "\x00\x00\x00\x00\x9b\x00\x00\x00\x00\x0b\x00\x00"
msg = self.messageFactory.create(receivedBuffer)
self.assertTrue(isinstance(msg, SCCPCapabilitiesReq))
def testCreateKeepAliveAck(self):
receivedBuffer = "\x00\x00\x00\x00\x00\x01\x00\x00\x00\x0b\x00\x00"
msg = self.messageFactory.create(receivedBuffer)
self.assertTrue(isinstance(msg, SCCPKeepAliveAck))
def testCreateDefineTimeDate(self):
receivedBuffer = "\x00\x00\x00\x00\x94\x00\x00\x00\x00\x0b\x00\x00"
msg = self.messageFactory.create(receivedBuffer)
self.assertTrue(isinstance(msg, SCCPDefineTimeDate))
def testCreateLineStat(self):
receivedBuffer = "\x00\x00\x00\x00\x92\x00\x00\x00\x00\x0b\x00\x00"
msg = self.messageFactory.create(receivedBuffer)
self.assertTrue(isinstance(msg, SCCPLineStat))
def testCreateUnkownType(self):
receivedBuffer = "\x00\x00\x00\x00\xFF\xFF\x00\x00\x00\x0b\x00\x00"
msg = self.messageFactory.create(receivedBuffer)
self.assertEquals(0xFFFF,msg.sccpmessageType)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
mwicat/sccp
|
sccp/tests/test_messsagefactory.py
|
Python
|
gpl-3.0
| 2,059
|
#!/usr/bin/env python
# coding=utf-8
"""
Created on April 15 2017
@author: yytang
"""
from scrapy import Selector
from libs.misc import get_spider_name_from_domain
from libs.polish import *
from novelsCrawler.spiders.novelSpider import NovelSpider
class YushuwuNetMobileSpider(NovelSpider):
"""
classdocs
example: http://m.yushuwu.net/novel/list/35797/1.html
"""
allowed_domains = ['m.yushuwu.net']
name = get_spider_name_from_domain(allowed_domains[0])
custom_settings = {
'DOWNLOAD_DELAY': 0.2,
}
def parse_title(self, response):
sel = Selector(response)
title = sel.xpath('//title/text()').extract()[0]
title = polish_title(title, self.name)
return title
def parse_episodes(self, response):
sel = Selector(response)
episodes = []
subtitle_selectors = sel.xpath('//ul/li/a')
subtitle_selectors = subtitle_selectors[1:-1]
def cmp(item):
text = item.xpath('text()').extract()[0]
p = '[^\d]+([\d]+)'
return int(re.search(p, text).group(1))
subtitle_selectors.sort(key=cmp)
for page_id, subtitle_selector in enumerate(subtitle_selectors):
subtitle_url = subtitle_selector.xpath('@href').extract()[0]
m = re.search("javascript:[t|g]oChapter\(([\d]+),([\d]+)\);", subtitle_url)
if m is not None:
subtitle_url = '/novel/{0}/{1}.html'.format(m.group(1), m.group(2))
subtitle_url = response.urljoin(subtitle_url.strip())
subtitle_name = subtitle_selector.xpath('text()').extract()[0]
subtitle_name = polish_subtitle(subtitle_name)
episodes.append((page_id, subtitle_name, subtitle_url))
return episodes
def parse_content(self, response):
sel = Selector(response)
content = sel.xpath('//div[@id="nr1"]/text()').extract()
content += sel.xpath('//div[@id="nr1"]/p/text()').extract()
content = polish_content(content)
return content
def get_next_page_url(self, response):
# if 'javascript' not in next_page_url:
sel = Selector(response)
next_page_url_list = sel.xpath('//div[@id="page"]/a[contains(text(), "下一页")]/@href').extract()
if len(next_page_url_list) != 0:
next_page_url = next_page_url_list[0]
return next_page_url
else:
return None
#
# # -*- coding: utf-8 -*-
#
# import scrapy
# from scrapy import Selector
#
# from libs.misc import get_spider_name_from_domain
# from libs.polish import *
# from novelsCrawler.items import NovelsCrawlerItem
#
#
# class YushuwuNetMobileSpider(scrapy.Spider):
# """
# classdocs
#
# example: http://m.yushuwu.net/novel/list/35797/1.html
# """
#
# dom = 'm.yushuwu.net'
# name = get_spider_name_from_domain(dom)
# allowed_domains = [dom]
# custom_settings = {
# 'DOWNLOAD_DELAY': 0.2,
# }
#
# # tmp_root_dir = os.path.expanduser(settings['TMP_DIR'])
#
# def __init__(self, *args, **kwargs):
# super(YushuwuNetMobileSpider, self).__init__(*args, **kwargs)
# self.start_urls = kwargs['start_urls']
# self.tmp_novels_dir = kwargs['tmp_novels_dir']
# print(self.start_urls)
#
# # def start_requests(self):
# # for url in self.start_urls:
# # yield self.make_requests_from_url(url)
#
# def parse(self, response):
# start_page_key = 'startPage'
# title_key = 'title'
# index_key = 'index'
# if start_page_key in response.meta:
# start_page = response.meta[start_page_key]
# else:
# start_page = 1
# if index_key in response.meta:
# page_index = response.meta[index_key]
# else:
# page_index = []
#
# sel = Selector(response)
# if title_key in response.meta:
# title = response.meta[title_key]
# else:
# title = sel.xpath('//title/text()').extract()[0]
# title = polish_title(title, self.name)
# print(title)
#
# tmp_spider_root_dir = os.path.join(self.tmp_novels_dir, title)
# if not os.path.isdir(tmp_spider_root_dir):
# os.makedirs(tmp_spider_root_dir)
#
# subtitle_selectors = sel.xpath('//ul/li/a')
# subtitle_selectors = subtitle_selectors[1:-1]
#
# def cmp(item):
# text = item.xpath('text()').extract()[0]
# p = '[^\d]+([\d]+)'
# return int(re.search(p, text).group(1))
#
# subtitle_selectors.sort(key=cmp)
# all_pages = [i + start_page for i in range(0, len(subtitle_selectors))]
# page_index += all_pages
# download_pages = polish_pages(tmp_spider_root_dir, all_pages)
#
# # Traverse the subtitle_selectors only crawler the pages that haven't been downloaded yet
# for i, subtitle_selector in enumerate(subtitle_selectors):
# page_id = i + start_page
# if page_id not in set(download_pages):
# continue
# else:
# subtitle_url = subtitle_selector.xpath('@href').extract()[0]
# m = re.search("javascript:[t|g]oChapter\(([\d]+),([\d]+)\);", subtitle_url)
# if m is not None:
# subtitle_url = '/novel/{0}/{1}.html'.format(m.group(1), m.group(2))
# subtitle_url = response.urljoin(subtitle_url.strip())
# subtitle_name = subtitle_selector.xpath('text()').extract()[0]
# subtitle_name = polish_subtitle(subtitle_name)
#
# item = NovelsCrawlerItem()
# item['title'] = title
# item['id'] = page_id
# item['subtitle'] = subtitle_name
# item['root_dir'] = tmp_spider_root_dir
# request = scrapy.Request(subtitle_url, callback=self.parse_page)
# request.meta['item'] = item
# yield request
#
# """ The following is useful only when multiple pages are downloaded """
# next_page_url = sel.xpath('//div[@id="page"]/a[contains(text(), "下一页")]/@href').extract()[0]
# if 'javascript' not in next_page_url:
# request = scrapy.Request(response.urljoin(next_page_url.strip()), callback=self.parse)
# request.meta[start_page_key] = len(subtitle_selectors) + start_page
# request.meta[title_key] = title
# request.meta[index_key] = page_index
# yield request
# else:
# save_index(title, response.url, tmp_spider_root_dir, page_index)
#
# def parse_page(self, response):
# item = response.meta['item']
# sel = Selector(response)
# content = sel.xpath('//div[@id="nr1"]/text()').extract()
# content += sel.xpath('//div[@id="nr1"]/p/text()').extract()
# content = polish_content(content)
# item['content'] = content
# return item
|
yytang2012/novels-crawler
|
novelsCrawler/spiders/m-yushuwu-net.py
|
Python
|
mit
| 7,055
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-19 20:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('suite', '0002_auto_20170219_0729'),
]
operations = [
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(default=True),
),
]
|
fsxfreak/club-suite
|
clubsuite/suite/migrations/0003_auto_20170219_2009.py
|
Python
|
mit
| 452
|
#!/usr/bin/python
import logging as log
import tornado.ioloop
import tornado.web
from WebService import WebService
log.basicConfig(filename='mms2.log',level=log.DEBUG)
handler_urls = WebService.loadHandlers('.')
application = tornado.web.Application(handler_urls)
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
OpenFilter/MediaManagerService
|
MMS.py
|
Python
|
gpl-2.0
| 370
|
"""
Base class for ensemble-based estimators.
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from ..base import clone
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..externals.joblib import cpu_count
class BaseEnsemble(BaseEstimator, MetaEstimatorMixin):
"""Base class for all ensemble classes.
Warning: This class should not be used directly. Use derived classes
instead.
Parameters
----------
base_estimator : object, optional (default=None)
The base estimator from which the ensemble is built.
n_estimators : integer
The number of estimators in the ensemble.
estimator_params : list of strings
The list of attributes to use as parameters when instantiating a
new base estimator. If none are given, default parameters are used.
Attributes
----------
`base_estimator_`: list of estimators
The base estimator from which the ensemble is grown.
`estimators_`: list of estimators
The collection of fitted base estimators.
"""
def __init__(self, base_estimator, n_estimators=10,
estimator_params=tuple()):
# Set parameters
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.estimator_params = estimator_params
# Don't instantiate estimators now! Parameters of base_estimator might
# still change. Eg., when grid-searching with the nested object syntax.
# This needs to be filled by the derived classes.
self.estimators_ = []
def _validate_estimator(self, default=None):
"""Check the estimator and set the `base_estimator_` attribute."""
if self.base_estimator is not None:
self.base_estimator_ = self.base_estimator
else:
self.base_estimator_ = default
if self.base_estimator_ is None:
raise ValueError("base_estimator cannot be None")
def _make_estimator(self, append=True):
"""Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.base_estimator_)
estimator.set_params(**dict((p, getattr(self, p))
for p in self.estimator_params))
if append:
self.estimators_.append(estimator)
return estimator
def __len__(self):
"""Returns the number of estimators in the ensemble."""
return len(self.estimators_)
def __getitem__(self, index):
"""Returns the index'th estimator in the ensemble."""
return self.estimators_[index]
def __iter__(self):
"""Returns iterator over estimators in the ensemble."""
return iter(self.estimators_)
def _partition_estimators(ensemble):
"""Private function used to partition estimators between jobs."""
# Compute the number of jobs
if ensemble.n_jobs == -1:
n_jobs = min(cpu_count(), ensemble.n_estimators)
else:
n_jobs = min(ensemble.n_jobs, ensemble.n_estimators)
# Partition estimators between jobs
n_estimators = (ensemble.n_estimators // n_jobs) * np.ones(n_jobs,
dtype=np.int)
n_estimators[:ensemble.n_estimators % n_jobs] += 1
starts = np.cumsum(n_estimators)
return n_jobs, n_estimators.tolist(), [0] + starts.tolist()
|
Tong-Chen/scikit-learn
|
sklearn/ensemble/base.py
|
Python
|
bsd-3-clause
| 3,513
|
# -*- encoding: utf-8 -*-
# Module iasobel
def iasobel(f):
import numpy as np
from ia636 import iaimginfo, iapconv
wx = np.array([[1.,2.,1.],
[0.,0.,0.],
[-1.,-2.,-1.]])
wy = np.array([[1.,0.,-1.],
[2.,0.,-2.],
[1.,0.,-1.]])
gx = iapconv(f, wx)
gy = iapconv(f, wy)
mag = np.abs(gx + gy*1j)
theta = np.arctan2(gy,gx)
return mag,theta
|
robertoalotufo/ia636
|
ia636/iasobel.py
|
Python
|
bsd-3-clause
| 448
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetParticipant
# Retrieves details for an individual participant of a conference.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetParticipant(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetParticipant Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Twilio/Conferences/GetParticipant')
def new_input_set(self):
return GetParticipantInputSet()
def _make_result_set(self, result, path):
return GetParticipantResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetParticipantChoreographyExecution(session, exec_id, path)
class GetParticipantInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetParticipant
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
InputSet._set_input(self, 'AccountSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
InputSet._set_input(self, 'AuthToken', value)
def set_CallSID(self, value):
"""
Set the value of the CallSID input for this Choreo. ((required, string) The call id associated with the participant to retrieve.)
"""
InputSet._set_input(self, 'CallSID', value)
def set_ConferencesSID(self, value):
"""
Set the value of the ConferencesSID input for this Choreo. ((required, string) The id of the conference that the participant is in.)
"""
InputSet._set_input(self, 'ConferencesSID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
InputSet._set_input(self, 'ResponseFormat', value)
def set_SubAccountSID(self, value):
"""
Set the value of the SubAccountSID input for this Choreo. ((optional, string) The SID of the subaccount associated with the conference. If not specified, the main AccountSID used to authenticate is used in the request.)
"""
InputSet._set_input(self, 'SubAccountSID', value)
class GetParticipantResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetParticipant Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class GetParticipantChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetParticipantResultSet(response, path)
|
egetzel/wecrow
|
truehand2014/temboo/Library/Twilio/Conferences/GetParticipant.py
|
Python
|
apache-2.0
| 3,809
|
import os
import re
import nltk
from nltk.tag import tnt
from modules import cleaner
class Location:
def __init__(self):
train_data = []
with open(os.path.join(os.path.dirname(__file__), 'tagged_locations.txt'), 'r') as f:
for line in f:
train_data.append([nltk.tag.str2tuple(t) for t in line.split()])
self.tnt_pos_tagger = tnt.TnT()
self.tnt_pos_tagger.train(train_data)
grammar = r"""
LOC: {(<PRFX><PRFX>*<B-LOC><I-LOC>*)|(<B-LOC><I-LOC>*)}
"""
self.cp = nltk.RegexpParser(grammar)
def get_locations(self, tweet):
tweet = cleaner.clean(tweet)
tagged_chunked_tweet = self.cp.parse(self.tnt_pos_tagger.tag(nltk.word_tokenize(tweet)))
locations = []
for subtree in tagged_chunked_tweet.subtrees():
if subtree.label() == 'LOC':
location = []
for leave in subtree.leaves():
location.append(leave[0])
locations.append(' '.join(location))
return locations
def is_first_loc_similar(self, text1, text2):
try:
loc1 = self.get_locations(text1)[0]
except IndexError as e:
loc1 = ''
try:
loc2 = self.get_locations(text2)[0]
except IndexError as e:
loc2 = ''
return loc1 == loc2
|
dwiajik/twit-macet-mining-v2
|
modules/location.py
|
Python
|
mit
| 1,393
|
# -*- encoding: utf-8 -*-
"""
django-thumbs by Antonio Melé
http://django.es
"""
from django.db.models import ImageField
from django.db.models.fields.files import ImageFieldFile
from PIL import Image
from django.core.files.base import ContentFile
import cStringIO
def generate_thumb(img, thumb_size, format):
"""
Generates a thumbnail image and returns a ContentFile object with the thumbnail
Parameters:
===========
img File object
thumb_size desired thumbnail size, ie: (200,120)
format format of the original image ('jpeg','gif','png',...)
(this format will be used for the generated thumbnail, too)
"""
img.seek(0) # see http://code.djangoproject.com/ticket/8222 for details
image = Image.open(img)
# Convert to RGB if necessary
if image.mode not in ('L', 'RGB', 'RGBA'):
image = image.convert('RGB')
# get size
thumb_w, thumb_h = thumb_size
# If you want to generate a square thumbnail
if thumb_w == thumb_h:
# quad
xsize, ysize = image.size
# get minimum size
minsize = min(xsize,ysize)
# largest square possible in the image
xnewsize = (xsize-minsize)/2
ynewsize = (ysize-minsize)/2
# crop it
image2 = image.crop((xnewsize, ynewsize, xsize-xnewsize, ysize-ynewsize))
# load is necessary after crop
image2.load()
# thumbnail of the cropped image (with ANTIALIAS to make it look better)
image2.thumbnail(thumb_size, Image.ANTIALIAS)
else:
# not quad
image2 = image
image2.thumbnail(thumb_size, Image.ANTIALIAS)
io = cStringIO.StringIO()
# PNG and GIF are the same, JPG is JPEG
if format.upper()=='JPG':
format = 'JPEG'
image2.save(io, format)
return ContentFile(io.getvalue())
class ImageWithThumbsFieldFile(ImageFieldFile):
"""
See ImageWithThumbsField for usage example
"""
def __init__(self, *args, **kwargs):
super(ImageWithThumbsFieldFile, self).__init__(*args, **kwargs)
if self.field.sizes:
def get_size(self, size):
if not self:
return ''
else:
split = self.url.rsplit('.',1)
thumb_url = '%s.%sx%s.%s' % (split[0],w,h,split[1])
return thumb_url
for size in self.field.sizes:
(w,h) = size
setattr(self, 'url_%sx%s' % (w,h), get_size(self, size))
def save(self, name, content, save=True):
super(ImageWithThumbsFieldFile, self).save(name, content, save)
if self.field.sizes:
for size in self.field.sizes:
(w,h) = size
split = self.name.rsplit('.',1)
thumb_name = '%s.%sx%s.%s' % (split[0],w,h,split[1])
# you can use another thumbnailing function if you like
thumb_content = generate_thumb(content, size, split[1])
thumb_name_ = self.storage.save(thumb_name, thumb_content)
if not thumb_name == thumb_name_:
raise ValueError('There is already a file named %s' % thumb_name)
def delete(self, save=True):
name=self.name
super(ImageWithThumbsFieldFile, self).delete(save)
if self.field.sizes:
for size in self.field.sizes:
(w,h) = size
split = name.rsplit('.',1)
thumb_name = '%s.%sx%s.%s' % (split[0],w,h,split[1])
try:
self.storage.delete(thumb_name)
except:
pass
class ImageWithThumbsField(ImageField):
attr_class = ImageWithThumbsFieldFile
"""
Usage example:
==============
photo = ImageWithThumbsField(upload_to='images', sizes=((125,125),(300,200),)
To retrieve image URL, exactly the same way as with ImageField:
my_object.photo.url
To retrieve thumbnails URL's just add the size to it:
my_object.photo.url_125x125
my_object.photo.url_300x200
Note: The 'sizes' attribute is not required. If you don't provide it,
ImageWithThumbsField will act as a normal ImageField
How it works:
=============
For each size in the 'sizes' atribute of the field it generates a
thumbnail with that size and stores it following this format:
available_filename.[width]x[height].extension
Where 'available_filename' is the available filename returned by the storage
backend for saving the original file.
Following the usage example above: For storing a file called "photo.jpg" it saves:
photo.jpg (original file)
photo.125x125.jpg (first thumbnail)
photo.300x200.jpg (second thumbnail)
With the default storage backend if photo.jpg already exists it will use these filenames:
photo_.jpg
photo_.125x125.jpg
photo_.300x200.jpg
Note: django-thumbs assumes that if filename "any_filename.jpg" is available
filenames with this format "any_filename.[widht]x[height].jpg" will be available, too.
To do:
======
Add method to regenerate thubmnails
"""
def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, sizes=None, **kwargs):
self.verbose_name=verbose_name
self.name=name
self.width_field=width_field
self.height_field=height_field
self.sizes = sizes
super(ImageField, self).__init__(**kwargs)
from south.modelsinspector import add_introspection_rules
#add_introspection_rules = ([], ["^tagging_autocomplete\.models\.TagAutocompleteField"])
# era la mia add_introspection_rules = ([], ["\.thumbs\.ImageWithThumbsField"])
add_introspection_rules([], ["^thumbs_logo\.ImageWithThumbsField"])
|
CARocha/asocam
|
thumbs_logo.py
|
Python
|
gpl-2.0
| 6,072
|
# authenticates with twitter, searches for microsoft, evaluates overall
# sentiment for microsoft
import numpy as np
import twitter
from textblob import TextBlob
f = open('me.auth')
keys = f.readlines()
# Read in keys
keys = [x.strip('\n') for x in keys]
# Connect
api = twitter.Api(consumer_key = keys[0],
consumer_secret = keys[1],
access_token_key = keys[2],
access_token_secret = keys[3])
print 'logged in as ', api.VerifyCredentials().name
search = api.GetSearch(term='microsoft', )
# Make text blobs out of status content
blobs = [ TextBlob(status.text) for status in search ]
sentiments = [ blob.sentiment.polarity for blob in blobs ]
filtered_sentiments = filter(lambda a: a!=0.0, sentiments)
overall_sentiment = sum(filtered_sentiments)/len(filtered_sentiments)
print 'Overall sentiment for microsoft: {0}'.format(overall_sentiment)
|
dankolbman/MarketCents
|
twitter_feed.py
|
Python
|
mit
| 920
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
class Vtk(CMakePackage):
"""The Visualization Toolkit (VTK) is an open-source, freely
available software system for 3D computer graphics, image
processing and visualization. """
homepage = "http://www.vtk.org"
url = "https://www.vtk.org/files/release/9.0/VTK-9.0.0.tar.gz"
list_url = "http://www.vtk.org/download/"
maintainers = ['chuckatkins', 'danlipsa']
version('9.0.0', sha256='15def4e6f84d72f82386617fe595ec124dda3cbd13ea19a0dcd91583197d8715')
version('8.2.0', sha256='34c3dc775261be5e45a8049155f7228b6bd668106c72a3c435d95730d17d57bb')
version('8.1.2', sha256='0995fb36857dd76ccfb8bb07350c214d9f9099e80b1e66b4a8909311f24ff0db')
version('8.1.1', sha256='71a09b4340f0a9c58559fe946dc745ab68a866cf20636a41d97b6046cb736324')
version('8.1.0', sha256='6e269f07b64fb13774f5925161fb4e1f379f4e6a0131c8408c555f6b58ef3cb7')
version('8.0.1', sha256='49107352923dea6de05a7b4c3906aaf98ef39c91ad81c383136e768dcf304069')
version('7.1.0', sha256='5f3ea001204d4f714be972a810a62c0f2277fbb9d8d2f8df39562988ca37497a')
version('7.0.0', sha256='78a990a15ead79cdc752e86b83cfab7dbf5b7ef51ba409db02570dbdd9ec32c3')
version('6.3.0', sha256='92a493354c5fa66bea73b5fc014154af5d9f3f6cee8d20a826f4cd5d4b0e8a5e')
version('6.1.0', sha256='bd7df10a479606d529a8b71f466c44a2bdd11fd534c62ce0aa44fad91883fa34')
# VTK7 defaults to OpenGL2 rendering backend
variant('opengl2', default=True, description='Enable OpenGL2 backend')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('python', default=False, description='Enable Python support')
variant('qt', default=False, description='Build with support for Qt')
variant('xdmf', default=False, description='Build XDMF file support')
variant('ffmpeg', default=False, description='Build with FFMPEG support')
variant('mpi', default=True, description='Enable MPI support')
patch('gcc.patch', when='@6.1.0')
# At the moment, we cannot build with both osmesa and qt, but as of
# VTK 8.1, that should change
conflicts('+osmesa', when='+qt')
extends('python', when='+python')
# Acceptable python versions depend on vtk version
# We need vtk at least 8.0.1 for python@3,
# and at least 9.0 for python@3.8
depends_on('python@2.7:2.9', when='@:8.0 +python', type=('build', 'run'))
depends_on('python@2.7:3.7.9', when='@8.0.1:8.9 +python',
type=('build', 'run'))
depends_on('python@2.7:', when='@9.0: +python', type=('build', 'run'))
# We need mpi4py if buidling python wrappers and using MPI
depends_on('py-mpi4py', when='+python+mpi', type='run')
# python3.7 compatibility patch backported from upstream
# https://gitlab.kitware.com/vtk/vtk/commit/706f1b397df09a27ab8981ab9464547028d0c322
patch('python3.7-const-char.patch', when='@7.0.0:8.1.1 ^python@3.7:')
# The use of the OpenGL2 backend requires at least OpenGL Core Profile
# version 3.2 or higher.
depends_on('gl@3.2:', when='+opengl2')
depends_on('gl@1.2:', when='~opengl2')
if sys.platform != 'darwin':
depends_on('glx', when='~osmesa')
depends_on('libxt', when='~osmesa')
# Note: it is recommended to use mesa+llvm, if possible.
# mesa default is software rendering, llvm makes it faster
depends_on('mesa+osmesa', when='+osmesa')
# VTK will need Qt5OpenGL, and qt needs '-opengl' for that
depends_on('qt+opengl', when='+qt')
depends_on('boost', when='+xdmf')
depends_on('boost+mpi', when='+xdmf +mpi')
depends_on('ffmpeg', when='+ffmpeg')
depends_on('mpi', when='+mpi')
depends_on('expat')
depends_on('freetype')
depends_on('glew')
# set hl variant explicitly, similar to issue #7145
depends_on('hdf5+hl')
depends_on('jpeg')
depends_on('jsoncpp')
depends_on('libxml2')
depends_on('lz4')
depends_on('netcdf-c~mpi', when='~mpi')
depends_on('netcdf-c+mpi', when='+mpi')
depends_on('netcdf-cxx')
depends_on('libpng')
depends_on('libtiff')
depends_on('zlib')
depends_on('eigen', when='@8.2.0:')
depends_on('double-conversion', when='@8.2.0:')
depends_on('sqlite', when='@8.2.0:')
# For finding Fujitsu-MPI wrapper commands
patch('find_fujitsu_mpi.patch', when='@:8.2.0%fj')
def url_for_version(self, version):
url = "http://www.vtk.org/files/release/{0}/VTK-{1}.tar.gz"
return url.format(version.up_to(2), version)
def setup_build_environment(self, env):
# VTK has some trouble finding freetype unless it is set in
# the environment
env.set('FREETYPE_DIR', self.spec['freetype'].prefix)
def cmake_args(self):
spec = self.spec
opengl_ver = 'OpenGL{0}'.format('2' if '+opengl2' in spec else '')
cmake_args = [
'-DBUILD_SHARED_LIBS=ON',
'-DVTK_RENDERING_BACKEND:STRING={0}'.format(opengl_ver),
# In general, we disable use of VTK "ThirdParty" libs, preferring
# spack-built versions whenever possible
'-DVTK_USE_SYSTEM_LIBRARIES:BOOL=ON',
# However, in a few cases we can't do without them yet
'-DVTK_USE_SYSTEM_GL2PS:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBHARU=OFF',
'-DNETCDF_DIR={0}'.format(spec['netcdf-c'].prefix),
'-DNETCDF_C_ROOT={0}'.format(spec['netcdf-c'].prefix),
'-DNETCDF_CXX_ROOT={0}'.format(spec['netcdf-cxx'].prefix),
# Allow downstream codes (e.g. VisIt) to override VTK's classes
'-DVTK_ALL_NEW_OBJECT_FACTORY:BOOL=ON',
# Disable wrappers for other languages.
'-DVTK_WRAP_JAVA=OFF',
'-DVTK_WRAP_TCL=OFF',
]
# Some variable names have changed
if spec.satisfies('@8.2.0:'):
cmake_args.extend([
'-DVTK_USE_SYSTEM_OGG:BOOL=OFF',
'-DVTK_USE_SYSTEM_THEORA:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBPROJ:BOOL=OFF',
'-DVTK_USE_SYSTEM_PUGIXML:BOOL=OFF',
])
else:
cmake_args.extend([
'-DVTK_USE_SYSTEM_OGGTHEORA:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBPROJ4:BOOL=OFF',
])
if '+mpi' in spec:
if spec.satisfies('@:8.2.0'):
cmake_args.extend([
'-DVTK_Group_MPI:BOOL=ON',
'-DVTK_USE_SYSTEM_DIY2:BOOL=OFF'
])
else:
cmake_args.extend([
'-DVTK_USE_MPI=ON'
])
if '+ffmpeg' in spec:
cmake_args.extend(['-DModule_vtkIOFFMPEG:BOOL=ON'])
# Enable/Disable wrappers for Python.
if '+python' in spec:
cmake_args.extend([
'-DVTK_WRAP_PYTHON=ON',
'-DPYTHON_EXECUTABLE={0}'.format(spec['python'].command.path),
])
if '+mpi' in spec:
cmake_args.append('-DVTK_USE_SYSTEM_MPI4PY:BOOL=ON')
if spec.satisfies('@9.0.0: ^python@3:'):
cmake_args.append('-DVTK_PYTHON_VERSION=3')
else:
cmake_args.append('-DVTK_WRAP_PYTHON=OFF')
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DCMAKE_MACOSX_RPATH=ON'
])
if '+qt' in spec:
qt_ver = spec['qt'].version.up_to(1)
qt_bin = spec['qt'].prefix.bin
qmake_exe = os.path.join(qt_bin, 'qmake')
cmake_args.extend([
# Enable Qt support here.
'-DVTK_QT_VERSION:STRING={0}'.format(qt_ver),
'-DQT_QMAKE_EXECUTABLE:PATH={0}'.format(qmake_exe),
'-DVTK_Group_Qt:BOOL=ON',
])
# NOTE: The following definitions are required in order to allow
# VTK to build with qt~webkit versions (see the documentation for
# more info: http://www.vtk.org/Wiki/VTK/Tutorials/QtSetup).
if '~webkit' in spec['qt']:
cmake_args.extend([
'-DVTK_Group_Qt:BOOL=OFF',
'-DModule_vtkGUISupportQt:BOOL=ON',
'-DModule_vtkGUISupportQtOpenGL:BOOL=ON',
])
if '+xdmf' in spec:
if spec.satisfies('^cmake@3.12:'):
# This policy exists only for CMake >= 3.12
cmake_args.extend(["-DCMAKE_POLICY_DEFAULT_CMP0074=NEW"])
cmake_args.extend([
# Enable XDMF Support here
"-DModule_vtkIOXdmf2:BOOL=ON",
"-DModule_vtkIOXdmf3:BOOL=ON",
"-DBOOST_ROOT={0}".format(spec['boost'].prefix),
"-DBOOST_LIBRARY_DIR={0}".format(spec['boost'].prefix.lib),
"-DBOOST_INCLUDE_DIR={0}".format(spec['boost'].prefix.include),
"-DBOOST_NO_SYSTEM_PATHS:BOOL=ON",
# This is needed because VTK has multiple FindBoost
# and they stick to system boost if there's a system boost
# installed with CMake
"-DBoost_NO_BOOST_CMAKE:BOOL=ON",
"-DHDF5_ROOT={0}".format(spec['hdf5'].prefix),
# The xdmf project does not export any CMake file...
"-DVTK_USE_SYSTEM_XDMF3:BOOL=OFF",
"-DVTK_USE_SYSTEM_XDMF2:BOOL=OFF"
])
if '+mpi' in spec:
cmake_args.extend(["-DModule_vtkIOParallelXdmf3:BOOL=ON"])
cmake_args.append('-DVTK_RENDERING_BACKEND:STRING=' + opengl_ver)
if spec.satisfies('@:8.1.0'):
cmake_args.append('-DVTK_USE_SYSTEM_GLEW:BOOL=ON')
if '+osmesa' in spec:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DVTK_USE_COCOA:BOOL=OFF',
'-DVTK_OPENGL_HAS_OSMESA:BOOL=ON'])
else:
cmake_args.append('-DVTK_OPENGL_HAS_OSMESA:BOOL=OFF')
if spec.satisfies('@:7.9.9'):
# This option is gone in VTK 8.1.2
cmake_args.append('-DOpenGL_GL_PREFERENCE:STRING=LEGACY')
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DVTK_USE_COCOA:BOOL=ON'])
elif 'linux' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=ON',
'-DVTK_USE_COCOA:BOOL=OFF'])
if spec.satisfies('@:6.1.0'):
cmake_args.extend([
'-DCMAKE_C_FLAGS=-DGLX_GLXEXT_LEGACY',
'-DCMAKE_CXX_FLAGS=-DGLX_GLXEXT_LEGACY'
])
# VTK 6.1.0 (and possibly earlier) does not use
# NETCDF_CXX_ROOT to detect NetCDF C++ bindings, so
# NETCDF_CXX_INCLUDE_DIR and NETCDF_CXX_LIBRARY must be
# used instead to detect these bindings
netcdf_cxx_lib = spec['netcdf-cxx'].libs.joined()
cmake_args.extend([
'-DNETCDF_CXX_INCLUDE_DIR={0}'.format(
spec['netcdf-cxx'].prefix.include),
'-DNETCDF_CXX_LIBRARY={0}'.format(netcdf_cxx_lib),
])
# Garbage collection is unsupported in Xcode starting with
# version 5.1; if the Apple clang version of the compiler
# is 5.1.0 or later, unset the required Objective-C flags
# to remove the garbage collection flags. Versions of VTK
# after 6.1.0 set VTK_REQUIRED_OBJCXX_FLAGS to the empty
# string. This fix was recommended on the VTK mailing list
# in March 2014 (see
# https://public.kitware.com/pipermail/vtkusers/2014-March/083368.html)
if self.spec.satisfies('%apple-clang@5.1.0:'):
cmake_args.extend(['-DVTK_REQUIRED_OBJCXX_FLAGS='])
# A bug in tao pegtl causes build failures with intel compilers
if '%intel' in spec and spec.version >= Version('8.2'):
cmake_args.append(
'-DVTK_MODULE_ENABLE_VTK_IOMotionFX:BOOL=OFF')
return cmake_args
|
rspavel/spack
|
var/spack/repos/builtin/packages/vtk/package.py
|
Python
|
lgpl-2.1
| 12,385
|
class Base(object):
def __init__(self, client, host):
super(Base, self).__init__()
self._client = client
self._host = host
|
xuru/pyvisdk
|
pyvisdk/esxcli/base/__init__.py
|
Python
|
mit
| 151
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './flowchart/FlowchartTemplate.ui'
#
# Created: Sun Feb 24 19:47:30 2013
# by: pyside-uic 0.2.13 running on PySide 1.1.1
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(529, 329)
self.selInfoWidget = QtGui.QWidget(Form)
self.selInfoWidget.setGeometry(QtCore.QRect(260, 10, 264, 222))
self.selInfoWidget.setObjectName("selInfoWidget")
self.gridLayout = QtGui.QGridLayout(self.selInfoWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.selDescLabel = QtGui.QLabel(self.selInfoWidget)
self.selDescLabel.setText("")
self.selDescLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.selDescLabel.setWordWrap(True)
self.selDescLabel.setObjectName("selDescLabel")
self.gridLayout.addWidget(self.selDescLabel, 0, 0, 1, 1)
self.selNameLabel = QtGui.QLabel(self.selInfoWidget)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.selNameLabel.setFont(font)
self.selNameLabel.setText("")
self.selNameLabel.setObjectName("selNameLabel")
self.gridLayout.addWidget(self.selNameLabel, 0, 1, 1, 1)
self.selectedTree = DataTreeWidget(self.selInfoWidget)
self.selectedTree.setObjectName("selectedTree")
self.selectedTree.headerItem().setText(0, "1")
self.gridLayout.addWidget(self.selectedTree, 1, 0, 1, 2)
self.hoverText = QtGui.QTextEdit(Form)
self.hoverText.setGeometry(QtCore.QRect(0, 240, 521, 81))
self.hoverText.setObjectName("hoverText")
self.view = FlowchartGraphicsView(Form)
self.view.setGeometry(QtCore.QRect(0, 0, 256, 192))
self.view.setObjectName("view")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
from pyqtgraph.widgets.DataTreeWidget import DataTreeWidget
from pyqtgraph.flowchart.FlowchartGraphicsView import FlowchartGraphicsView
|
ibressler/pyqtgraph
|
pyqtgraph/flowchart/FlowchartTemplate_pyside.py
|
Python
|
mit
| 2,393
|
#!/usr/bin/python
#
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This script accepts a json dump via stdin that was generated by calling the
# rabbitmq /api/definitions endpoint. It then removes the queues, bindings and
# exchanges that should not be restored if the dump was loaded and prints
# the json back out to stdout.
#
import json
import sys
defs = json.loads(sys.stdin.read())
newqueues = []
allqueues = set()
# delete all auto-delete queues
for queue in defs['queues']:
if not queue['auto_delete']:
newqueues.append(queue)
allqueues.add(queue['name'])
newbindings = []
allsources = set()
# delete all bindings pointing to auto-delete queues
for binding in defs['bindings']:
if binding['destination'] in allqueues:
newbindings.append(binding)
allsources.add(binding['source'])
newexchanges = []
# delete all exchanges which were left without bindings
for exchange in defs['exchanges']:
if exchange['name'] in allsources:
newexchanges.append(exchange)
defs['queues'] = newqueues
defs['bindings'] = newbindings
defs['exchanges'] = newexchanges
print(json.dumps(defs))
|
xarses/fuel-library
|
files/fuel-ha-utils/tools/rabbitmq-dump-clean.py
|
Python
|
apache-2.0
| 1,705
|
#!/usr/bin/env python3
import cgi
form = cgi.FieldStorage()
print('Content-type: text/html\n')
print('<title>Reply Page</title>')
if not 'user' in form:
print('<h1>Who are you?</h1>')
else:
print('<h1>Hello <i>%s</i>!</h1>' % cgi.escape(form['user'].value))
|
lichengshuang/createvhost
|
python/others/Preview/cgi-bin/cgi101.py
|
Python
|
apache-2.0
| 266
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""fix description field in connection to be text
Revision ID: 64a7d6477aae
Revises: f5b5ec089444
Create Date: 2020-11-25 08:56:11.866607
"""
import sqlalchemy as sa # noqa
from alembic import op # noqa
# revision identifiers, used by Alembic.
revision = '64a7d6477aae'
down_revision = '61ec73d9401f'
branch_labels = None
depends_on = None
def upgrade():
"""Apply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.String(length=5000),
type_=sa.Text(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text())
def downgrade():
"""Unapply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.Text(5000),
type_=sa.String(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column(
'connection',
'description',
existing_type=sa.Text(),
type_=sa.String(length=5000),
existing_nullable=True,
)
|
nathanielvarona/airflow
|
airflow/migrations/versions/64a7d6477aae_fix_description_field_in_connection_to_.py
|
Python
|
apache-2.0
| 2,586
|
"""Treadmill identity trace CLI."""
import logging
import sys
import os
import glob
import tempfile
import click
from treadmill import cli
from treadmill import context
from treadmill.websocket import client as ws_client
_LOGGER = logging.getLogger(__name__)
def init():
"""Return top level command handler."""
ctx = {}
@click.group(name='host-ring')
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--api', required=False, help='REST API url to use.',
metavar='URL',
envvar='TREADMILL_RESTAPI')
@click.option('--wsapi', required=False, help='WebSocket API url to use.',
metavar='URL',
envvar='TREADMILL_WSAPI')
@click.option('--aliases-dir', required=True,
help='Host aliases dir.',
default='/etc/host-aliases')
def host_ring(api, wsapi, aliases_dir):
"""Manage /etc/hosts file inside the container."""
ctx['api'] = api
ctx['wsapi'] = wsapi
ctx['aliases_dir'] = aliases_dir
@host_ring.command(name='identity-group')
@click.option('--pattern', required=False,
default='{identity_group}.{identity}')
@click.argument('identity-group')
def identity_group_cmd(pattern, identity_group):
"""Manage /etc/hosts file inside the container.
"""
alias_dir = ctx['aliases_dir']
cell = context.GLOBAL.cell
def on_message(result):
"""Callback to process trace essage."""
host = result.get('host')
app = result.get('app')
identity_group = result['identity-group']
identity = result['identity']
_LOGGER.info('group: %s, identity: %s, host: %s, app: %s',
identity_group, identity, host, app)
alias_name = pattern.format(identity_group=identity_group,
identity=identity,
cell=cell)
link_name = os.path.join(alias_dir, alias_name)
if host:
temp_name = tempfile.mktemp(dir=alias_dir, prefix='^')
_LOGGER.info('Creating tempname: %s - %s', temp_name, host)
os.symlink(host, temp_name)
_LOGGER.info('Renaming: %s', link_name)
os.rename(temp_name, link_name)
else:
os.unlink(link_name)
return True
def on_error(result):
"""Callback to process errors."""
click.echo('Error: %s' % result['_error'], err=True)
glob_pattern = os.path.join(
alias_dir,
pattern.format(identity_group=identity_group,
identity='*',
cell=cell)
)
for path in glob.glob(glob_pattern):
os.unlink(path)
try:
return ws_client.ws_loop(
ctx['wsapi'],
{'topic': '/identity-groups',
'identity-group': identity_group},
False,
on_message,
on_error
)
except ws_client.ConnectionError:
click.echo('Could not connect to any Websocket APIs', err=True)
sys.exit(-1)
del identity_group_cmd
return host_ring
|
keithhendry/treadmill
|
treadmill/sproc/host_ring.py
|
Python
|
apache-2.0
| 3,490
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
from hashlib import md5
from fractions import Fraction
mpl.rcParams["text.usetex"] = True
mpl.rcParams["text.latex.preamble"] = "\usepackage{bm}"
def read_hash(path):
if not os.path.isfile(path):
return ""
with open(path, "rb") as f:
md = f.read()
return md
def write_hash(path, h):
f = open(path, "wb")
f.write(h)
f.close()
for filename in sorted(os.listdir("output")):
if filename.startswith("h_mz_"):
h = []
mz = []
md = md5()
with open("output/" + filename, "r") as f:
chi = filter(lambda s: s.find("chi=") != -1, filename[:-4].split("_"))[0].split("=")[-1]
svalerr = float(filter(lambda s: s.find("svalerr=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
q = int(filter(lambda s: s.find("q=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
for line in f:
fields = line.split(" ")
h.append(-float(fields[0]))
mz.append(map(float, fields[1:1+q]))
md.update(line)
markers = ["x", "+", "o", "D", "v", "^", "<", ">"]
colours = ["b", "g", "r", "c", "m", "y", "k", "b"]
if md.hexdigest() != read_hash("plots/" + filename.split(".dat")[0] + ".md5"):
print filename
for j in xrange(q):
s = "| {:d} \\rangle \\langle {:d} |".format(j,j)
plt.plot(h, map(lambda x: x[j], mz), markers[j], mfc="none", mec=colours[j], label="$\\langle " + s + " \\rangle_{0,\\infty}$")
#plt.plot(h, mx, marker="x", label="$\\langle \\sigma_x \\rangle_{0,\\infty}$")
plt.title("1D-QPotts ground state via 2D-CTMRG ($q = " + str(q) + "$, $\\chi = " + chi + "$, $\\Delta \\xi = 10^{" + "{:.0f}".format(np.log10(svalerr)) + "}$)")
plt.legend(loc="best")
plt.xlabel("$-h$")
#plt.ylim(0,1)
plt.grid(True)
plt.savefig("plots/" + filename.split(".dat")[0] + ".png", dpi=300)
plt.close()
write_hash("plots/" + filename.split(".dat")[0] + ".md5", md.hexdigest())
primFieldsFM = [
None, None,
[Fraction(1,16)],
[Fraction(1,8),Fraction(13,8)]
]
primFieldsPM = [
None, None,
[Fraction(0), Fraction(1,2)],
[Fraction(0),Fraction(2,3)]
]
def scale_svals_for_fields(f, xi):
xi = np.sort(-np.log(xi))
f = sorted(f)[:2]
f0 = f[0]
if len(f) == 1:
f1 = f0 + 1
else:
f1 = min([f[1],f0+1])
xi = xi * float(f1-f0) / (xi[1]-xi[0])
xi = xi - xi[0] + f0
return xi
def scale_svals_fm(q, xi):
return scale_svals_for_fields(primFieldsFM[q], xi)
def scale_svals_pm(q, xi):
return scale_svals_for_fields(primFieldsPM[q], xi)
def get_yticks_for_fields(fields,ymin,ymax):
t = []
for f in fields:
for j in range(int(np.ceil(ymin-f)), int(np.floor(ymax-h))+1):
if not float(f+j) in t:
t.append(float(f+j))
return t
def get_yticklabels_for_fields(fields,ymin,ymax):
t = []
for f in fields:
for j in range(int(np.ceil(ymin-f)), int(np.floor(ymax-h))+1):
s = "0" if f == 0 else "\\frac{" + str(f.numerator) + "}{" + str(f.denominator) + "}"
if j > 0:
s += "+" + str(j)
s = "$" + s + "$"
if not s in t:
t.append(s)
return t
def get_fm_yticks(q,ymin,ymax):
return get_yticks_for_fields(primFieldsFM[q],ymin,ymax)
def get_pm_yticks(q,ymin,ymax):
return get_yticks_for_fields(primFieldsPM[q],ymin,ymax)
def get_fm_yticklabels(q,ymin,ymax):
return get_yticklabels_for_fields(primFieldsFM[q],ymin,ymax)
def get_pm_yticklabels(q,ymin,ymax):
return get_yticklabels_for_fields(primFieldsPM[q],ymin,ymax)
for filename in sorted(os.listdir("output")):
if filename.startswith("ctmrgsvals_detail_"):
h_xi = dict()
md = md5()
with open("output/" + filename, "r") as f:
chi = filter(lambda s: s.find("chi=") != -1, filename[:-4].split("_"))[0].split("=")[-1]
q = int(filter(lambda s: s.find("q=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
svalerr = float(filter(lambda s: s.find("svalerr=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
for line in f:
fields = line.split(" ")
h = -float(fields[0])
xi = float(fields[1])
if not h_xi.has_key(h):
h_xi[h] = []
if xi > 0:
h_xi[h].append(xi)
md.update(line)
#md.update("foo")
if md.hexdigest() != read_hash("plots/" + filename.split(".dat")[0] + ".md5"):
print filename
for h in h_xi:
xi = h_xi[h]
if h < 1 and len(xi) > 1:
xi = scale_svals_fm(q, xi)
plt.plot([h]*len(xi), xi, "b+")
plt.ylabel("$-a \\log(\\xi) + b$")
ymin,ymax = plt.axes().get_ylim()
plt.axes().set_yticks(get_fm_yticks(q, ymin, ymax))
plt.axes().set_yticklabels(get_fm_yticklabels(q, ymin, ymax))
plt.grid(True)
plt.title("1D-QPotts ground state via 2D-CTMRG ($q = " + str(q) + "$, $\\chi = " + chi + "$, $\\Delta \\xi = 10^{" + "{:.0f}".format(np.log10(svalerr)) + "}$)")
plt.xlabel("$-h$")
s = filename.split(".dat")[0]
s = s.split("_detail_")
s = s[0] + "_detail_fm_" + s[1]
plt.savefig("plots/" + s + ".png", dpi=300)
plt.close()
for h in h_xi:
xi = h_xi[h]
if h > 1 and len(xi) > 1:
xi = scale_svals_pm(q, xi)
plt.plot([h]*len(xi), xi, "b+")
plt.ylabel("$-a \\log(\\xi) + b$")
ymin,ymax = plt.axes().get_ylim()
plt.axes().set_yticks(get_pm_yticks(q, ymin, ymax))
plt.axes().set_yticklabels(get_pm_yticklabels(q, ymin, ymax))
plt.grid(True)
plt.title("1D-QPotts ground state via 2D-CTMRG ($q = " + str(q) + "$, $\\chi = " + chi + "$, $\\Delta \\xi = 10^{" + "{:.0f}".format(np.log10(svalerr)) + "}$)")
plt.xlabel("$-h$")
s = filename.split(".dat")[0]
s = s.split("_detail_")
s = s[0] + "_detail_pm_" + s[1]
plt.savefig("plots/" + s + ".png", dpi=300)
plt.close()
write_hash("plots/" + filename.split(".dat")[0] + ".md5", md.hexdigest())
"""
hDegeneracyLabel1 = 0.5
hDegeneracyLabel2 = 1.5
primaryField1PM = [ Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0) ]
primaryField2PM = [ Fraction(1), Fraction(1), Fraction(1,2), Fraction(2,3), Fraction(1), Fraction(1), Fraction(1), Fraction(1), Fraction(1) ]
primaryField1FM = [ Fraction(0), Fraction(0), Fraction(1,16), Fraction(1,8), Fraction(0), Fraction(0), Fraction(0), Fraction(0), Fraction(0) ]
primaryField2FM = [ Fraction(1), Fraction(1), Fraction(17,16), Fraction(13,8), Fraction(1), Fraction(1), Fraction(1), Fraction(1), Fraction(1) ]
yTicks1 = []
yTickLabels1 = []
yTicks2 = []
yTickLabels2 = []
for q in xrange(9):
f = [primaryField1FM[q], primaryField2FM[q]]
t = list()
l = list()
for k in xrange(len(f)):
for j in xrange(10):
x = f[k] + j
if not float(x) in t:
t.append(float(x))
s = "$0" if f[k].numerator == 0 else "$\\frac{" + str(f[k].numerator) + "}{" + str(f[k].denominator) + "}"
if j > 0:
s += "+" + str(j)
s += "$"
l.append(s)
yTicks1.append(t)
yTickLabels1.append(l)
f = [primaryField1PM[q], primaryField2PM[q]]
t = list()
l = list()
for k in xrange(len(f)):
for j in xrange(10):
x = f[k] + j
if not float(x) in t:
t.append(float(x))
s = "$0" if f[k].numerator == 0 else "$\\frac{" + str(f[k].numerator) + "}{" + str(f[k].denominator) + "}"
if j > 0:
s += "+" + str(j)
s += "$"
l.append(s)
yTicks2.append(t)
yTickLabels2.append(l)
for filename in sorted(os.listdir("output")):
if filename.startswith("ctmrgsvals_"):
h_xi = dict()
md = md5()
#svalcntPM = dict()
#svalcntFM = dict()
with open("output/" + filename, "r") as f:
chi = filter(lambda s: s.find("chi=") != -1, filename[:-4].split("_"))[0].split("=")[-1]
q = int(filter(lambda s: s.find("q=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
svalerr = float(filter(lambda s: s.find("svalerr=") != -1, filename[:-4].split("_"))[0].split("=")[-1])
for line in f:
fields = line.split(" ")
h = -float(fields[0])
xi = float(fields[1])
if not h_xi.has_key(h):
h_xi[h] = []
if xi > 0:
h_xi[h].append(xi)
md.update(line)
md.update(str(np.random.rand(10)))
if md.hexdigest() != read_hash("plots/" + filename.split(".dat")[0] + ".md5"):
print filename
plt.ylabel("$-a \\log(\\xi) + b$")
for h in h_xi:
xi = h_xi[h]
xi = np.sort(-np.log(xi))
if len(xi) > 1:
if h > 1:
if primaryField2PM[q] < primaryField1PM[q] + 1:
xi = xi * (primaryField2PM[q]-primaryField1PM[q]) / (xi[1]-xi[0])
else:
xi = xi / (xi[1]-xi[0])
xi = xi - xi[0] + primaryField1PM[q]
else:
if primaryField2FM[q] < primaryField1FM[q] + 1:
xi = xi * (primaryField2FM[q]-primaryField1FM[q]) / (xi[1]-xi[0])
else:
xi = xi / (xi[1]-xi[0])
xi = xi - xi[0] + primaryField1FM[q]
plt.plot([h]*len(xi), xi, "b+")
if h == hDegeneracyLabel1 or h == hDegeneracyLabel2:
svalcnt = dict()
for x in xi:
minDist = 100
minDistSval = None
for y in svalcnt:
if np.abs(x-y) < minDist:
minDist = np.abs(x-y)
minDistSval = y
if minDistSval is None or minDist > 1e-1:
svalcnt[x] = 1
else:
svalcnt[minDistSval] += 1
for x in svalcnt:
plt.annotate(s="$\\scriptstyle " + str(svalcnt[x]) + "$", xy=(h+0.015, x-0.05))
#plt.axes().set_yticks(yTicks1[q])
#plt.axes().set_yticklabels(yTickLabels1[q])
#plt.grid(True)
plt.axes().set_yticks(yTicks1[q] + yTicks2[q])
plt.axes().set_yticklabels(yTickLabels1[q] + [""]*len(yTicks2[q]))
plt.ylim(-0.5,5.5)
#ylim = plt.axes().get_ylim()
plt.grid(True)
#ax2 = plt.twinx()
#ax2.set_ylim(ylim)
#ax2.set_ylim(-0.5,8)
#ax2.set_yticks(yTicks2[q])
#ax2.set_yticklabels(yTickLabels2[q])
#ax2.grid(True)
plt.title("1D-QPotts ground state via 2D-CTMRG ($q = " + str(q) + "$, $\\chi = " + chi + "$, $\\Delta \\xi = 10^{" + "{:.0f}".format(np.log10(svalerr)) + "}$)")
plt.xlabel("$-h$")
plt.savefig("plots/" + filename.split(".dat")[0] + ".png", dpi=300)
plt.close()
write_hash("plots/" + filename.split(".dat")[0] + ".md5", md.hexdigest())
if q == 4:
exit()
"""
|
kingfisher1337/tns
|
qpotts_groundstate_1d/plot.py
|
Python
|
gpl-3.0
| 12,349
|
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# Standard library imports
import sys
print(f"GOT {sys.argv}")
|
ericmjl/bokeh
|
release/__main__.py
|
Python
|
bsd-3-clause
| 398
|
from __future__ import absolute_import
from django.db.models import Q
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry import roles
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationPermission
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.api.serializers.models.team import TeamWithProjectsSerializer
from sentry.auth.superuser import is_active_superuser
from sentry.models import (
AuditLogEntryEvent,
OrganizationAccessRequest,
OrganizationMember,
OrganizationMemberTeam,
Team,
)
ERR_INSUFFICIENT_ROLE = "You do not have permission to edit that user's membership."
class OrganizationMemberTeamSerializer(serializers.Serializer):
isActive = serializers.BooleanField()
class RelaxedOrganizationPermission(OrganizationPermission):
_allowed_scopes = [
"org:read",
"org:write",
"org:admin",
"member:read",
"member:write",
"member:admin",
]
scope_map = {
"GET": _allowed_scopes,
"POST": _allowed_scopes,
"PUT": _allowed_scopes,
"DELETE": _allowed_scopes,
}
class OrganizationMemberTeamDetailsEndpoint(OrganizationEndpoint):
permission_classes = [RelaxedOrganizationPermission]
def _can_create_team_member(self, request, organization, team_slug):
"""
User can join or add a member to a team:
* If they are an active superuser
* If they are a team admin or have global write access
* If the open membership organization setting is enabled
"""
return (
is_active_superuser(request)
or self._can_admin_team(request, organization, team_slug)
or organization.flags.allow_joinleave
)
def _can_delete(self, request, member, organization, team_slug):
"""
User can remove a member from a team:
* If they are an active superuser
* If they are removing their own membership
* If they are a team admin or have global write access
"""
if is_active_superuser(request):
return True
if not request.user.is_authenticated():
return False
if request.user.id == member.user_id:
return True
if self._can_admin_team(request, organization, team_slug):
return True
return False
def _can_admin_team(self, request, organization, team_slug):
global_roles = [r.id for r in roles.with_scope("org:write") if r.is_global]
team_roles = [r.id for r in roles.with_scope("team:write")]
# must be a team admin or have global write access
return OrganizationMember.objects.filter(
Q(role__in=global_roles)
| Q(organizationmemberteam__team__slug=team_slug, role__in=team_roles),
organization=organization,
user__id=request.user.id,
user__is_active=True,
).exists()
def _get_member(self, request, organization, member_id):
if member_id == "me":
queryset = OrganizationMember.objects.filter(
organization=organization, user__id=request.user.id, user__is_active=True
)
else:
queryset = OrganizationMember.objects.filter(
Q(user__is_active=True) | Q(user__isnull=True),
organization=organization,
id=member_id,
)
return queryset.select_related("user").get()
def _create_access_request(self, request, team, member):
omt, created = OrganizationAccessRequest.objects.get_or_create(team=team, member=member)
if not created:
return
requester = request.user if request.user != member.user else None
if requester:
omt.update(requester=requester)
omt.send_request_email()
def post(self, request, organization, member_id, team_slug):
"""
Join, request access to or add a member to a team.
If the user needs permission to join the team, an access request will
be generated and the returned status code will be 202.
If the user is already a member of the team, this will simply return
a 204.
"""
try:
member = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not request.user.is_authenticated():
return Response(status=status.HTTP_401_UNAUTHORIZED)
try:
team = Team.objects.get(organization=organization, slug=team_slug)
except Team.DoesNotExist:
raise ResourceDoesNotExist
try:
omt = OrganizationMemberTeam.objects.get(team=team, organizationmember=member)
except OrganizationMemberTeam.DoesNotExist:
if self._can_create_team_member(request, organization, team_slug):
omt = OrganizationMemberTeam.objects.create(team=team, organizationmember=member)
else:
self._create_access_request(request, team, member)
return Response(status=202)
else:
return Response(status=204)
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=member.user,
event=AuditLogEntryEvent.MEMBER_JOIN_TEAM,
data=omt.get_audit_log_data(),
)
return Response(serialize(team, request.user, TeamWithProjectsSerializer()), status=201)
def delete(self, request, organization, member_id, team_slug):
"""
Leave or remove a member from a team
"""
try:
member = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not self._can_delete(request, member, organization, team_slug):
return Response({"detail": ERR_INSUFFICIENT_ROLE}, status=400)
try:
team = Team.objects.get(organization=organization, slug=team_slug)
except Team.DoesNotExist:
raise ResourceDoesNotExist
try:
omt = OrganizationMemberTeam.objects.get(team=team, organizationmember=member)
except OrganizationMemberTeam.DoesNotExist:
pass
else:
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=member.user,
event=AuditLogEntryEvent.MEMBER_LEAVE_TEAM,
data=omt.get_audit_log_data(),
)
omt.delete()
return Response(serialize(team, request.user, TeamWithProjectsSerializer()), status=200)
|
beeftornado/sentry
|
src/sentry/api/endpoints/organization_member_team_details.py
|
Python
|
bsd-3-clause
| 6,954
|
from scoring_engine.celery_app import celery_app
from celery.exceptions import SoftTimeLimitExceeded
import subprocess
from scoring_engine.logger import logger
@celery_app.task(name='execute_command', acks_late=True, reject_on_worker_lost=True, soft_time_limit=30)
def execute_command(job):
output = ""
# Disable duplicate celery log messages
if logger.propagate:
logger.propagate = False
logger.info("Running cmd for " + str(job))
try:
cmd_result = subprocess.run(
job['command'],
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
output = cmd_result.stdout.decode("utf-8")
job['errored_out'] = False
except SoftTimeLimitExceeded:
job['errored_out'] = True
job['output'] = output
return job
|
pwnbus/scoring_engine
|
scoring_engine/engine/execute_command.py
|
Python
|
mit
| 836
|
# -*- coding: utf-8 -*-
{
'name': 'test-field-converter',
'version': '0.1',
'category': 'Tests',
'description': """Tests of field conversions""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': ['ir.model.access.csv'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
diogocs1/comps
|
web/openerp/addons/test_converter/__openerp__.py
|
Python
|
apache-2.0
| 445
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_pie04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'pie'})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$3',
'values': '=Sheet1!$B$1:$B$3',
})
chart.set_legend({'position': 'overlay_right'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
jvrsantacruz/XlsxWriter
|
xlsxwriter/test/comparison/test_chart_pie04.py
|
Python
|
bsd-2-clause
| 1,427
|
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from MaKaC.plugins.RoomBooking.default.dalManager import DALManager
class DALManagerCERN( DALManager ):
""" CERN specific implementation. Inherits from the default plugin"""
pass
|
Ictp/indico
|
indico/MaKaC/plugins/RoomBooking/CERN/dalManagerCERN.py
|
Python
|
gpl-3.0
| 960
|
# this is like your scripts with argv
def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
# ok that *args is actually pointerless, we can just do this
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" % (arg1, arg2)
# this just takes one argument
def print_one(arg1):
print "arg1: %r" % arg1
# this one takes no arguments
def print_none():
print "I got nothing."
print_two("Zed", "Shaw")
print_two_again("Zed", "Shaw")
print_one("First!")
print_none()
|
jgavinray/LPTHW
|
ex18.py
|
Python
|
mit
| 515
|
##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class TestStruct( unittest.TestCase ) :
def test( self ) :
s = IECore.Struct()
s.a = 10
self.assertEqual( s.a, 10 )
s = IECore.Struct( b = 20 )
self.assertEqual( s.b, 20 )
if __name__ == "__main__":
unittest.main()
|
appleseedhq/cortex
|
test/IECore/Struct.py
|
Python
|
bsd-3-clause
| 2,048
|
# Copyright (C) 2011-2012 Canonical Services Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
An exponentially-weighted moving average.
See:
- U{UNIX Load Average Part 1: How It Works
<http://www.teamquest.com/pdfs/whitepaper/ldavg1.pdf>}
- U{UNIX Load Average Part 2: Not Your Average Average
<http://www.teamquest.com/pdfs/whitepaper/ldavg2.pdf>}
"""
import math
class Ewma(object):
M1_ALPHA = 1 - math.exp(-5 / 60.0)
M5_ALPHA = 1 - math.exp(-5 / 60.0 / 5)
M15_ALPHA = 1 - math.exp(-5 / 60.0 / 15)
@classmethod
def one_minute_ewma(cls):
"""
Creates a new C{Ewma} which is equivalent to the UNIX one minute
load average and which expects to be ticked every 5 seconds.
"""
return Ewma(Ewma.M1_ALPHA, 5)
@classmethod
def five_minute_ewma(cls):
"""
Creates a new C{Ewma} which is equivalent to the UNIX five minute
load average and which expects to be ticked every 5 seconds.
"""
return Ewma(Ewma.M5_ALPHA, 5)
@classmethod
def fifteen_minute_ewma(cls):
"""
Creates a new C{Ewma} which is equivalent to the UNIX fifteen
minute load average and which expects to be ticked every 5 seconds.
"""
return Ewma(Ewma.M15_ALPHA, 5)
def __init__(self, alpha, interval):
"""Create a new C{Ewma} with a specific smoothing constant.
@param alpha: The smoothing constant.
@param interval: The expected tick interval in seconds.
"""
self.interval = interval
self.alpha = float(alpha)
self.initialized = False
self.rate = 0.0
self.uncounted = 0
def update(self, n):
"""Update the moving average with a new value."""
self.uncounted += n
def tick(self):
"""Mark the passage of time and decay the current rate accordingly."""
count = self.uncounted
self.uncounted = 0
instant_rate = float(count) / self.interval
if self.initialized:
self.rate += (self.alpha * (instant_rate - self.rate))
else:
self.rate = instant_rate
self.initialized = True
|
wikimedia/operations-debs-txstatsd
|
txstatsd/stats/ewma.py
|
Python
|
mit
| 3,210
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.lookup.lookup."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import lookup
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import counter
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training.checkpointable import util as checkpointable
class HashTableOpTest(test.TestCase):
def testHashTable(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
exported_keys_tensor, exported_values_tensor = table.export()
self.assertItemsEqual([b"brain", b"salad", b"surgery"],
exported_keys_tensor.eval())
self.assertItemsEqual([0, 1, 2], exported_values_tensor.eval())
def testHashTableFindHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testHashTableInitWithPythonArrays(self):
with self.cached_session():
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64),
default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableInitWithNumPyArrays(self):
with self.cached_session():
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testMultipleHashTables(self):
with self.cached_session() as sess:
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table2 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table3 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
lookup_ops.tables_initializer().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testHashTableWithTensorDefault(self):
with self.cached_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableWithSparseTensorInput(self):
with self.cached_session() as sess:
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = sess.run(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
# Ref types do not produce a lookup signature mismatch.
input_string_ref = variables.Variable("brain")
variables.global_variables_initializer().run()
self.assertEqual(0, table.lookup(input_string_ref).eval())
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
with self.cached_session():
default_val = -1
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(["a"], [1], [dtypes.string],
dtypes.int64), default_val)
def testNotInitialized(self):
with self.cached_session():
default_val = -1
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
["a"], [1], value_dtype=dtypes.int64),
default_val)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
output.eval()
def testInitializeTwice(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
with self.assertRaisesOpError("Table already initialized"):
table.initializer.run()
def testInitializationWithInvalidDimensions(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
def testMultipleSessions(self):
# Start a server
server = server_lib.Server(
{
"local0": ["localhost:0"]
}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values),
default_val,
name="t1")
# Init the table in the first session.
with session1:
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
# Init the table in the second session and verify that we do not get a
# "Table already initialized" error.
with session2:
table.initializer.run()
self.assertAllEqual(3, table.size().eval())
def testHashTableInt32String(self):
with self.cached_session():
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int32)
values = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.initializer.run()
input_tensor = constant_op.constant([0, 1, -1])
output = table.lookup(input_tensor)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
class MutableHashTableOpTest(test.TestCase):
def testMutableHashTable(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery", "tarkus"])
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
remove_string = constant_op.constant(["tarkus", "tank"])
table.remove(remove_string).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
exported_keys, exported_values = table.export()
self.assertAllEqual([None], exported_keys.get_shape().as_list())
self.assertAllEqual([None], exported_values.get_shape().as_list())
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(exported_keys.eval())
sorted_values = np.sort(exported_values.eval())
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
self.assertAllEqual([0, 1, 2], sorted_values)
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
default_val = -1
keys = constant_op.constant(["b", "c", "d"], dtypes.string)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
save = saver.Saver()
variables.global_variables_initializer().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
default_val = -1
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
table.insert(
constant_op.constant(["a", "c"], dtypes.string),
constant_op.constant([12, 24], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["a", "b", "c", "d", "e"],
dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], output.eval())
@test_util.run_in_graph_and_eager_modes
def testObjectSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_prefix = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
default_val = -1
keys = constant_op.constant(["b", "c", "d"], dtypes.string)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
checkpoint = checkpointable.Checkpoint(table=table, v0=v0, v1=v1)
self.evaluate([v0.initializer, v1.initializer])
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(0, self.evaluate(table.size()))
self.evaluate(table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(table.size()))
save_path = checkpoint.save(save_prefix)
del table, checkpoint, v0, v1
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
default_val = -1
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
self.evaluate(table.insert(
constant_op.constant(["a", "c"], dtypes.string),
constant_op.constant([12, 24], dtypes.int64)))
self.assertAllEqual(2, self.evaluate(table.size()))
checkpoint = checkpointable.Checkpoint(table=table, v0=v0, v1=v1)
# Restore the saved values in the parameter nodes.
checkpoint.restore(save_path).run_restore_ops()
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, self.evaluate(v0))
self.assertEqual(20.0, self.evaluate(v1))
self.assertAllEqual(3, self.evaluate(table.size()))
input_string = constant_op.constant(["a", "b", "c", "d", "e"],
dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))
def testSharing(self):
# Start a server to store the table state
server = server_lib.Server(
{
"local0": ["localhost:0"]
}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
table = lookup.MutableHashTable(
dtypes.int64, dtypes.string, "-", name="t1")
# Populate the table in the first session
with session1:
self.assertAllEqual(0, table.size().eval())
keys = constant_op.constant([11, 12], dtypes.int64)
values = constant_op.constant(["a", "b"])
table.insert(keys, values).run()
self.assertAllEqual(2, table.size().eval())
output = table.lookup(constant_op.constant([11, 12, 13], dtypes.int64))
self.assertAllEqual([b"a", b"b", b"-"], output.eval())
# Verify that we can access the shared data from the second session
with session2:
self.assertAllEqual(2, table.size().eval())
output = table.lookup(constant_op.constant([10, 11, 12], dtypes.int64))
self.assertAllEqual([b"-", b"a", b"b"], output.eval())
def testMutableHashTableOfTensors(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery", "tarkus"])
values = constant_op.constant([[0, 1], [2, 3], [4, 5], [6, 7]],
dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
remove_string = constant_op.constant(["tarkus", "tank"])
table.remove(remove_string).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3, 2], output.get_shape())
result = output.eval()
self.assertAllEqual([[0, 1], [2, 3], [-1, -1]], result)
exported_keys, exported_values = table.export()
self.assertAllEqual([None], exported_keys.get_shape().as_list(),
msg="Saw shape %s" % exported_keys.shape)
self.assertAllEqual([None, 2], exported_values.get_shape().as_list(),
msg="Saw shape %s" % exported_values.shape)
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(exported_keys.eval())
sorted_values = np.sort(exported_values.eval())
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
self.assertAllEqual([[4, 5], [2, 3], [0, 1]], sorted_values)
def testMutableHashTableExportInsert(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
table1 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table1.size().eval())
table1.insert(keys, values).run()
self.assertAllEqual(3, table1.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
expected_output = [[0, 1], [2, 3], [-1, -1]]
output1 = table1.lookup(input_string)
self.assertAllEqual(expected_output, output1.eval())
exported_keys, exported_values = table1.export()
self.assertAllEqual(3, exported_keys.eval().size)
self.assertAllEqual(6, exported_values.eval().size)
# Populate a second table from the exported data
table2 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table2.size().eval())
table2.insert(exported_keys, exported_values).run()
self.assertAllEqual(3, table2.size().eval())
# Verify lookup result is still the same
output2 = table2.lookup(input_string)
self.assertAllEqual(expected_output, output2.eval())
def testMutableHashTableOfTensorsInvalidShape(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
# Shape [6] instead of [3, 2]
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Shape [2,3] instead of [3, 2]
values = constant_op.constant([[0, 1, 2], [3, 4, 5]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Shape [2, 2] instead of [3, 2]
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Shape [3, 1] instead of [3, 2]
values = constant_op.constant([[0], [2], [4]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Valid Insert
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
def testMutableHashTableInvalidDefaultValue(self):
with self.cached_session():
default_val = constant_op.constant([[-1, -1]], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
with self.assertRaisesOpError("Default value must be a vector"):
self.assertAllEqual(0, table.size().eval())
def testMutableHashTableDuplicateInsert(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery", "brain"])
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([3, 1, -1], result)
def testMutableHashTableFindHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2], output.get_shape())
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testMutableHashTableInsertHighRank(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant([["brain", "salad"], ["surgery", "tank"]])
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank", "tarkus"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, 3, -1], result)
def testMutableHashTableRemoveHighRank(self):
with self.test_session():
default_val = -1
keys = constant_op.constant([["brain", "salad"], ["surgery", "tank"]])
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
remove_string = constant_op.constant(["salad", "tarkus"])
table.remove(remove_string).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank", "tarkus"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, -1, 3, -1], result)
def testMutableHashTableOfTensorsFindHighRank(self):
with self.cached_session():
default_val = constant_op.constant([-1, -1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]],
dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2, 3], output.get_shape())
result = output.eval()
self.assertAllEqual(
[[[0, 1, 2], [2, 3, 4]], [[-1, -1, -1], [-1, -1, -1]]], result)
def testMutableHashTableOfTensorsRemoveHighRank(self):
with self.test_session():
default_val = constant_op.constant([-1, -1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]],
dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
remove_string = constant_op.constant([["brain", "tank"]])
table.remove(remove_string).run()
self.assertAllEqual(2, table.size().eval())
input_string = constant_op.constant([["brain", "salad"],
["surgery", "tank"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2, 3], output.get_shape())
result = output.eval()
self.assertAllEqual(
[[[-1, -1, -1], [2, 3, 4]], [[4, 5, 6], [-1, -1, -1]]], result)
def testMultipleMutableHashTables(self):
with self.cached_session() as sess:
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table2 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table3 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table1.insert(keys, values).run()
table2.insert(keys, values).run()
table3.insert(keys, values).run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testMutableHashTableWithTensorDefault(self):
with self.cached_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testSignatureMismatch(self):
with self.cached_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
# insert with keys of the wrong type
with self.assertRaises(ValueError):
table.insert(constant_op.constant([4, 5, 6]), values).run()
# insert with values of the wrong type
with self.assertRaises(ValueError):
table.insert(keys, constant_op.constant(["a", "b", "c"])).run()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string_ref = variables.Variable("brain")
input_int64_ref = variables.Variable(-1, dtype=dtypes.int64)
variables.global_variables_initializer().run()
# Ref types do not produce an insert signature mismatch.
table.insert(input_string_ref, input_int64_ref).run()
self.assertAllEqual(3, table.size().eval())
# Ref types do not produce a lookup signature mismatch.
self.assertEqual(-1, table.lookup(input_string_ref).eval())
# lookup with keys of the wrong type
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(ValueError):
table.lookup(input_string).eval()
# default value of the wrong type
with self.assertRaises(TypeError):
lookup.MutableHashTable(dtypes.string, dtypes.int64, "UNK")
def testMutableHashTableStringFloat(self):
with self.cached_session():
default_val = -1.5
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1.1, 2.2], dtypes.float32)
table = lookup.MutableHashTable(dtypes.string, dtypes.float32,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllClose([0, 1.1, default_val], result)
def testMutableHashTableIntFloat(self):
with self.cached_session():
default_val = -1.0
keys = constant_op.constant([3, 7, 0], dtypes.int64)
values = constant_op.constant([7.5, -1.2, 9.9], dtypes.float32)
table = lookup.MutableHashTable(dtypes.int64, dtypes.float32,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([7, 0, 11], dtypes.int64)
output = table.lookup(input_string)
result = output.eval()
self.assertAllClose([-1.2, 9.9, default_val], result)
def testMutableHashTableInt64String(self):
with self.cached_session():
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int64)
values = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.MutableHashTable(dtypes.int64, dtypes.string,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([0, 1, 3], dtypes.int64)
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual((b"brain", b"salad", b"n/a"), result)
class MutableDenseHashTableOpTest(test.TestCase):
def testBasic(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
remove_string = constant_op.constant([12, 15], dtypes.int64)
table.remove(remove_string).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, -1, -1], result)
def testBasicBool(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([True, True, True, True], dtypes.bool)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.bool,
default_value=False,
empty_key=0,
deleted_key=-1)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
remove_string = constant_op.constant([11, 15], dtypes.int64)
table.remove(remove_string).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([False, True, False], result)
def testSameEmptyAndDeletedKey(self):
with self.cached_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"deleted_key"):
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=42,
deleted_key=42)
self.assertAllEqual(0, table.size().eval())
def testLookupUnknownShape(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
placeholder_keys = array_ops.placeholder(dtypes.int64)
output = table.lookup(placeholder_keys)
self.assertAllEqual(None, output.get_shape())
result = output.eval({placeholder_keys: [11, 12, 15]})
self.assertAllEqual([0, 1, -1], result)
def testMapStringToFloat(self):
with self.cached_session():
keys = constant_op.constant(["a", "b", "c", "d"], dtypes.string)
values = constant_op.constant([0.0, 1.1, 2.2, 3.3], dtypes.float32)
default_value = constant_op.constant(-1.5, dtypes.float32)
table = lookup.MutableDenseHashTable(
dtypes.string,
dtypes.float32,
default_value=default_value,
empty_key="",
deleted_key="$")
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
remove_string = constant_op.constant(["b", "e"])
table.remove(remove_string).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["a", "b", "d", "e"], dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([4], output.get_shape())
result = output.eval()
self.assertAllClose([0, -1.5, 3.3, -1.5], result)
def testMapInt64ToFloat(self):
for float_dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0.0, 1.1, 2.2, 3.3], float_dtype)
default_value = constant_op.constant(-1.5, float_dtype)
table = lookup.MutableDenseHashTable(
dtypes.int64,
float_dtype,
default_value=default_value,
empty_key=0,
deleted_key=-1)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
remove_string = constant_op.constant([12, 15], dtypes.int64)
table.remove(remove_string).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([11, 12, 14, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([4], output.get_shape())
result = output.eval()
self.assertAllClose([0, -1.5, 3.3, -1.5], result)
def testVectorValues(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([[0, 1, 2, 3], [3, 4, 5, 6], [6, 7, 8, 9]],
dtypes.int64)
default_value = constant_op.constant([-1, -2, -3, -4], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=0,
deleted_key=-1,
initial_num_buckets=4)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(4, len(table.export()[0].eval()))
table.insert(
constant_op.constant([14], dtypes.int64),
constant_op.constant([[2, 3, 4, 5]], dtypes.int64)).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(8, len(table.export()[0].eval()))
remove_string = constant_op.constant([12, 16], dtypes.int64)
table.remove(remove_string).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(8, len(table.export()[0].eval()))
input_string = constant_op.constant([11, 12, 14, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([4, 4],
output.shape,
msg="Saw shape: %s" % output.shape)
result = output.eval()
self.assertAllEqual(
[[0, 1, 2, 3], [-1, -2, -3, -4], [2, 3, 4, 5], [-1, -2, -3, -4]],
result)
def testVectorKeys(self):
with self.cached_session():
keys = constant_op.constant([[0, 1], [1, 2], [1, 3]], dtypes.int64)
values = constant_op.constant([10, 11, 12], dtypes.int64)
empty_key = constant_op.constant([0, 3], dtypes.int64)
deleted_key = constant_op.constant([-1, -1], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
initial_num_buckets=8)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
table.insert(
constant_op.constant([[0, 0]], dtypes.int64),
constant_op.constant([13], dtypes.int64)).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(8, len(table.export()[0].eval()))
remove_string = constant_op.constant([[1, 2], [7, 8]], dtypes.int64)
table.remove(remove_string).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(8, len(table.export()[0].eval()))
input_string = constant_op.constant([[0, 1], [1, 2], [1, 3], [0, 2]],
dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([4], output.get_shape())
result = output.eval()
self.assertAllEqual([10, -1, 12, -1], result)
def testResize(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1,
initial_num_buckets=4)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(4, len(table.export()[0].eval()))
keys2 = constant_op.constant([12, 99], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(4, len(table.export()[0].eval()))
keys3 = constant_op.constant([13, 14, 15, 16, 17], dtypes.int64)
values3 = constant_op.constant([3, 4, 5, 6, 7], dtypes.int64)
table.insert(keys3, values3).run()
self.assertAllEqual(6, table.size().eval())
self.assertAllEqual(16, len(table.export()[0].eval()))
keys4 = constant_op.constant([10, 11, 12, 13, 14, 15, 16, 17, 18],
dtypes.int64)
output = table.lookup(keys4)
self.assertAllEqual([-1, 0, -1, 3, 4, 5, 6, 7, -1], output.eval())
def testExport(self):
with self.cached_session():
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([1, 2, 3, 4], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=100,
deleted_key=200,
initial_num_buckets=8)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
keys2 = constant_op.constant([12, 15], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
exported_keys, exported_values = table.export()
self.assertAllEqual([None], exported_keys.get_shape().as_list())
self.assertAllEqual([None], exported_values.get_shape().as_list())
np_keys = exported_keys.eval()
np_values = exported_values.eval()
self.assertAllEqual(8, len(np_keys))
self.assertAllEqual(8, len(np_values))
# pair up keys and values, drop extra added dimension
pairs = np.dstack((np_keys.flatten(), np_values.flatten()))[0]
# sort by key
pairs = pairs[pairs[:, 0].argsort()]
self.assertAllEqual([[11, 1], [13, 3], [14, 4], [100, 0], [100, 0],
[100, 0], [100, 0], [200, 2]], pairs)
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
default_value = -1
empty_key = 0
deleted_key = -1
keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([12, 15], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, -1, 2, 3], output.eval())
@test_util.run_in_graph_and_eager_modes
def testObjectSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_prefix = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
default_value = -1
empty_key = 0
deleted_key = -1
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
save_table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save_checkpoint = checkpointable.Checkpoint(table=save_table)
self.assertAllEqual(0, self.evaluate(save_table.size()))
self.evaluate(save_table.insert(keys, values))
self.assertAllEqual(3, self.evaluate(save_table.size()))
self.assertAllEqual(32, len(self.evaluate(save_table.export()[0])))
save_path = save_checkpoint.save(save_prefix)
del save_table, save_checkpoint
load_table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
self.evaluate(load_table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)))
self.assertAllEqual(2, self.evaluate(load_table.size()))
self.assertAllEqual(64, len(self.evaluate(load_table.export()[0])))
restore_checkpoint = checkpointable.Checkpoint(table=load_table)
# Restore the saved values in the parameter nodes.
restore_checkpoint.restore(save_path).run_restore_ops()
self.assertAllEqual(3, self.evaluate(load_table.size()))
self.assertAllEqual(32, len(self.evaluate(load_table.export()[0])))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = load_table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))
def testVectorSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-2, -3], dtypes.int64)
default_value = constant_op.constant([-1, -2], dtypes.int64)
keys = constant_op.constant([[11, 12], [11, 14], [12, 13], [13, 14]],
dtypes.int64)
values = constant_op.constant([[0, 1], [2, 3], [2, 4], [4, 5]],
dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([[12, 13], [16, 17]], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-2, -3], dtypes.int64)
default_value = constant_op.constant([-1, -2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([[21, 22], [23, 24]], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([[0, 1], [2, 3], [-1, -2], [4, 5], [-1, -2]],
output.eval())
def testVectorScalarSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_scalar_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-1, -1], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant([[11, 12], [11, 14], [12, 13], [13, 14]],
dtypes.int64)
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t2",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([[12, 13], [15, 16]], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
deleted_key = constant_op.constant([-1, -1], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
name="t2",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([3, 4], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([0, 1, -1, 3, -1], output.eval())
def testReprobe(self):
with self.cached_session():
# Insert 6 keys into a table with 8 buckets.
# The values are chosen to make sure collisions occur when using GCC STL
keys = constant_op.constant([11, 12, 13, 19, 20, 21], dtypes.int64)
values = constant_op.constant([51, 52, 53, 54, 55, 56], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1,
initial_num_buckets=8)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(6, table.size().eval())
input_string = constant_op.constant([10, 11, 12, 13, 14, 19, 20, 21, 22],
dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([9], output.get_shape())
result = output.eval()
self.assertAllEqual([-1, 51, 52, 53, -1, 54, 55, 56, -1], result)
def testCustomEmptyKey(self):
with self.cached_session():
keys = constant_op.constant([11, 0, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=12,
deleted_key=-1)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([11, 0, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testErrors(self):
with self.cached_session():
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
deleted_key=-1)
# Inserting the empty key returns an error
keys1 = constant_op.constant([11, 0], dtypes.int64)
values1 = constant_op.constant([0, 1], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"empty_key"):
table.insert(keys1, values1).run()
# Looking up the empty key returns an error
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"empty_key"):
table.lookup(keys1).eval()
# Inserting the deleted key returns an error
keys2 = constant_op.constant([11, -1], dtypes.int64)
values2 = constant_op.constant([0, 1], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"deleted_key"):
table.insert(keys2, values2).run()
# Looking up the empty key returns an error
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"deleted_key"):
table.lookup(keys2).eval()
# Arbitrary tensors of keys are not supported
keys = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
values = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected key shape"):
table.lookup(keys).eval()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected key shape"):
table.insert(keys, values).run()
table2 = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=17,
deleted_key=-1,
initial_num_buckets=12)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Number of buckets must be"):
self.assertAllEqual(0, table2.size().eval())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Empty and deleted keys must have same shape"):
table3 = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=42,
deleted_key=[1, 2])
self.assertAllEqual(0, table3.size().eval())
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Empty and deleted keys cannot be equal"):
table4 = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=42,
deleted_key=42)
self.assertAllEqual(0, table4.size().eval())
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Empty and deleted keys cannot be equal"):
table5 = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=[1, 2, 3],
deleted_key=[1, 2, 3])
self.assertAllEqual(0, table5.size().eval())
class IndexTableFromFile(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_string_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_index_table_from_file_tensor_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_file = constant_op.constant(vocabulary_file)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_string_index_table_from_file_placeholder_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.cached_session():
vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
feed_dict = {vocabulary_placeholder.name: vocabulary_file}
lookup_ops.tables_initializer().run(feed_dict=feed_dict)
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(0,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_int32_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab2.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab3.txt", values=("42", "1", "-1000"))
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab4.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(
constant_op.constant(["salad", "surgery", "tarkus", "toccata"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
ids.eval())
def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file="")
def test_index_table_from_file_fails_with_empty_vocabulary(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=None)
def test_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, -1, -1), ids.eval())
self.assertEqual(2, table.size().eval())
def test_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", table.initializer.run)
def test_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab8.txt")
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
with self.cached_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), ids.eval())
self.assertEqual(3, table.size().eval())
def test_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.cached_session():
with self.assertRaises(TypeError):
lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class KeyValueTensorInitializerTest(test.TestCase):
def test_string(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.initializer.run()
def test_int64(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.initializer.run()
def test_int32(self):
with ops.Graph().as_default(), self.cached_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int32, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
with self.assertRaisesRegexp(
errors_impl.OpError, "No OpKernel was registered"):
table.initializer.run()
class IndexTableFromTensor(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_index_table_from_tensor_with_tensor_init(self):
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(table.lookup(
constant_op.constant(("salad", "surgery", "tarkus"))))
else:
# Reinitializing a table in eager should work.
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus")))
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int32_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_tensor_with_tensor_init(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_tensor_with_default_value(self):
default_value = -42
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"], default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_tensor_missing_mapping(self):
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "mapping must be specified"):
lookup.index_table_from_tensor(mapping=None, num_oov_buckets=1)
def test_index_table_from_tensor_empty_mapping(self):
with self.cached_session():
table = lookup.index_table_from_tensor(
mapping=np.array([], dtype=np.str_), num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "brain"]))
self.assertRaises(errors_impl.OpError, ids.eval)
with self.assertRaisesRegexp(
errors_impl.OpError, "keys and values cannot be empty"):
lookup_ops.tables_initializer().run()
def test_index_table_from_tensor_with_invalid_hashers(self):
with self.cached_session():
with self.assertRaises(TypeError):
lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class StringToIndexTest(test.TestCase):
def test_string_to_index(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), indices.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
feats = constant_op.constant(["hello", "hola"])
_ = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError,
lookup_ops.tables_initializer().run)
def test_string_to_index_with_default_value(self):
default_value = -42
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(
feats, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), indices.eval())
class IndexToStringTableFromFileTest(test.TestCase):
def _createVocabFile(self, basename):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(["brain", "salad", "surgery"]) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_file = self._createVocabFile("i2f_vocab1.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", default_value, default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
init = lookup_ops.tables_initializer()
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", init.run)
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.cached_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", b"UNK"), features.eval())
class IndexToStringTableFromTensorTest(test.TestCase):
def test_index_to_string_table_from_tensor(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 4], dtypes.int64)
features = table.lookup(indices)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), features.eval())
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings, default_value=default_value)
indices = constant_op.constant([1, 2, 4], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
class IndexToStringTest(test.TestCase):
def test_index_to_string(self):
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
feats.eval())
def test_duplicate_entries(self):
with self.cached_session():
mapping_strings = constant_op.constant(["hello", "hello"])
indices = constant_op.constant([0, 1, 4], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())
self.assertRaises(errors_impl.OpError,
lookup_ops.tables_initializer().run)
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.cached_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([1, 2, 4], dtypes.int64)
feats = lookup.index_to_string(
indices, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
class InitializeTableFromFileOpTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
@test_util.run_in_graph_and_eager_modes
def testInitializeStringTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
self.evaluate(table.initializer)
output = table.lookup(constant_op.constant(["brain", "salad", "tank"]))
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInitializeInt64Table(self):
vocabulary_file = self._createVocabFile(
"one_column_int64.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
table.initializer.run()
output = table.lookup(
constant_op.constant((42, 1, 11), dtype=dtypes.int64))
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInitializeIndexTable(self):
vocabulary_file = self._createVocabFile("one_column_2.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.LINE_NUMBER
value_index = lookup.TextFileIndex.WHOLE_LINE
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string, value_index),
default_value)
table.initializer.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
output = table.lookup(input_values)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result)
def testMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 1
value_index = 2
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([1, 5, 6], result)
def testInvalidDataTypeInMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.cached_session():
default_value = -1
key_index = 2
value_index = 1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("is not a valid"):
table.initializer.run()
def testInvalidDataType(self):
vocabulary_file = self._createVocabFile("one_column_3.txt")
with self.cached_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.WHOLE_LINE
value_index = lookup.TextFileIndex.LINE_NUMBER
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string,
value_index), default_value)
def testInvalidIndex(self):
vocabulary_file = self._createVocabFile("one_column_4.txt")
with self.cached_session():
default_value = -1
key_index = 1 # second column of the line
value_index = lookup.TextFileIndex.LINE_NUMBER
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("Invalid number of columns"):
table.initializer.run()
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.cached_session() as sess:
shared_name = "shared-one-columm"
default_value = -1
table1 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table2 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table3 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testInitializeTableWithNoFilename(self):
with self.cached_session():
default_value = -1
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
"", dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testInitializeWithVocabSize(self):
with self.cached_session():
default_value = -1
vocab_size = 3
vocabulary_file1 = self._createVocabFile("one_column6.txt")
table1 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file1,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Initialize from file.
table1.initializer.run()
self.assertEquals(vocab_size, table1.size().eval())
vocabulary_file2 = self._createVocabFile("one_column7.txt")
vocab_size = 5
table2 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file2,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
with self.assertRaisesOpError("Invalid vocab_size"):
table2.initializer.run()
vocab_size = 1
vocabulary_file3 = self._createVocabFile("one_column3.txt")
table3 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file3,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Smaller vocab size reads only vocab_size records.
table3.initializer.run()
self.assertEquals(vocab_size, table3.size().eval())
def testFeedVocabularyName(self):
vocabulary_file = self._createVocabFile("feed_vocabulary.txt")
with self.cached_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer("old_file.txt", dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Initialize with non existing file (old_file.txt) should fail.
# TODO(yleon): Update message, which might change per FileSystem.
with self.assertRaisesOpError("old_file.txt"):
table.initializer.run()
# Initialize the model feeding the vocabulary file.
filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
table.initializer.run(feed_dict={filenames[0]: vocabulary_file})
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInvalidFilenames(self):
vocabulary_file = self._createVocabFile("filename_shape.txt")
with self.cached_session():
default_value = -1
# Invalid data type
other_type = constant_op.constant(1)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
other_type, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Non-scalar filename
filenames = constant_op.constant([vocabulary_file, vocabulary_file])
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
filenames, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testIdToStringTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = "UNK"
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileStringTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.initializer.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
out = table.lookup(input_values)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testStringToIdTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, -1], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testInt64ToIdTable(self):
vocab_file = self._createVocabFile(
"feat_to_id_3.txt", values=("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value)
table.initializer.run()
out = table.lookup(
constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))
self.assertAllEqual((0, 1, 2, -1), out.eval())
self.assertEquals(vocab_size, table.size().eval())
class IdTableWithHashBucketsTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testStringIdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value),
oov_buckets)
table.initializer.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt32IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets,
key_dtype=dtypes.int32)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt64IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets)
table.initializer.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testStringIdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(None, oov_buckets)
table.initializer.run()
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testInt32IdTableWithOnlyHashBucket(self):
with self.cached_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(
None, oov_buckets, key_dtype=dtypes.int32)
table.initializer.run()
input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)
out = table.lookup(input_string)
self.assertAllEqual(
[
1, # fingerprint("42") mod 5.
4, # fingerprint("1") mod 5.
2 # fingerprint("-1000") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testFloat64IdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.float64)
def testBoolIdTableWithOnlyHashBucket(self):
with self.cached_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.bool)
def testIdTableWithHashBucketsWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session() as sess:
default_value = -1
vocab_size = 3
oov_buckets = 3
vocab_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table1 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.FastHashSpec,
name="table1")
table2 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec((1, 2)),
name="table2")
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 3], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
test_util.assert_ops_in_graph({
"table1_Lookup/hash_bucket": "StringToHashBucketFast",
"table2_Lookup/hash_bucket": "StringToHashBucketStrong",
}, sess.graph)
def testIdTableWithHashBucketsInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
shared_name = "across-sessions"
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
table1.initializer.run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], out1.eval())
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to call table2.initializer.run()
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], out2.eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):
vocab_file = self._createVocabFile("feat_to_id_6.txt")
with self.cached_session() as sess:
default_value1 = -1
vocab_size = 3
oov_buckets = 0
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value1),
oov_buckets)
default_value2 = -2
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value2),
oov_buckets)
lookup_ops.tables_initializer().run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out1 = table1.lookup(input_string_1)
out2 = table2.lookup(input_string_2)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([0, 1, 2, -1], out1)
self.assertAllEqual([-2, 1, -2], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=3),
-1),
1)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int32)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.cached_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int64)
table.initializer.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testIdTableWithHashBucketsWithInvalidHashers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.cached_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
lookup_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
with self.assertRaises(TypeError):
lookup.IdTableWithHashBuckets(
lookup_table, oov_buckets, hasher_spec=1)
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
with self.assertRaises(ValueError):
table.lookup(input_string)
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([]))
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([1, 2, 3]))
with self.assertRaises(TypeError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([None, 2]))
class MutableHashTableBenchmark(test.Benchmark):
def _create_table(self):
return lookup.MutableHashTable(dtypes.int64, dtypes.float32, 0.0)
def benchmark_single_repeated_scalar_insert_scalar(self):
table = self._create_table()
value = variables.Variable(1.0)
insert = table.insert(0, value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)
assert sess.run(size) == 1
def benchmark_many_repeated_scalar_insert_scalar(self):
table = self._create_table()
c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()
value = variables.Variable(1.0)
insert = table.insert(c, value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)
assert sess.run(size) >= 10000
def benchmark_single_repeated_batch_32_insert_scalar(self):
table = self._create_table()
value = variables.Variable([1.0] * 32)
insert = table.insert(list(range(32)), value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)
assert sess.run(size) == 32
def benchmark_many_repeated_batch_32_insert_scalar(self):
table = self._create_table()
c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()
value = variables.Variable([1.0] * 32)
insert = table.insert(32 * c + list(range(32)), value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)
assert sess.run(size) >= 1000*32
class MutableDenseHashTableBenchmark(MutableHashTableBenchmark):
def _create_table(self):
return lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.float32,
default_value=0.0,
empty_key=-1,
deleted_key=-2)
if __name__ == "__main__":
test.main()
|
hfp/tensorflow-xsmm
|
tensorflow/contrib/lookup/lookup_ops_test.py
|
Python
|
apache-2.0
| 106,863
|
# -*- coding: utf-8 -*-
"""djangoflash.models test cases.
"""
from unittest import TestCase
from djangoflash.models import FlashScope, _SESSION_KEY, _USED_KEY
class FlashScopeTestCase(TestCase):
"""Tests the FlashScope object.
"""
def setUp(self):
"""Create a FlashScope object to be used by the test methods.
"""
self.flash = FlashScope()
self.flash['info'] = 'Info'
def test_restore(self):
"""FlashScope: Should restore the flash using a dict.
"""
data = {_SESSION_KEY: {'info' : 'Info',
'error': 'Error'},
_USED_KEY : {'error': None}}
self.flash = FlashScope(data)
self.assertEqual(2, len(self.flash))
self.assertEqual('Info', self.flash['info'])
self.assertEqual('Error', self.flash['error'])
self.flash.update()
self.assertEqual('Info', self.flash['info'])
self.assertFalse('error' in self.flash)
def test_restore_immutability(self):
"""FlashScope: Should restore the flash using a shallow copy of a dict.
"""
data = {_SESSION_KEY: {'info' : 'Info',
'error': 'Error'},
_USED_KEY : {'error': None}}
self.flash = FlashScope(data)
self.assertEqual('Info', self.flash['info'])
del data[_SESSION_KEY]['info']
self.assertTrue('info' in self.flash)
def test_restore_with_invalid_type(self):
"""FlashScope: Should not restore the flash using an invalid object.
"""
self.assertRaises(TypeError, lambda: FlashScope('invalid_data'))
def test_restore_with_invalid_keys(self):
"""FlashScope: Should not restore the flash using a dict with invalid keys.
"""
data = {_SESSION_KEY: None}
self.assertRaises(ValueError, lambda: FlashScope(data))
data = {_USED_KEY: None}
self.assertRaises(ValueError, lambda: FlashScope(data))
def test_restore_with_invalid_values(self):
"""FlashScope: Should not restore the flash using a dict with invalid values.
"""
data = {_SESSION_KEY: {}, _USED_KEY: None}
self.assertRaises(ValueError, lambda: FlashScope(data))
data = {_SESSION_KEY: None, _USED_KEY: {}}
self.assertRaises(ValueError, lambda: FlashScope(data))
def test_contains(self):
"""FlashScope: "key in flash" syntax should be supported.
"""
self.assertFalse('error' in self.flash)
self.assertEqual('Info', self.flash['info'])
def test_get_invalid_item(self):
"""FlashScope: Should raise KeyError if trying to get an invalid value.
"""
self.assertRaises(KeyError, lambda: self.flash['error']);
def test_set_item(self):
"""FlashScope: flash[key] = value" syntax should be supported.
"""
self.flash['error'] = 'Error'
self.assertEqual('Error', self.flash['error']);
def test_del_item(self):
"""FlashScope: "del flash[key]" syntax should be supported.
"""
self.assertEqual('Info', self.flash['info'])
del self.flash['info']
self.assertFalse('info' in self.flash)
def test_clear(self):
"""FlashScope: flash.clear() should remove all items from the flash scope.
"""
self.flash['error'] = 'Error'
self.assertEqual(2, len(self.flash))
self.flash.clear()
self.assertEqual(0, len(self.flash))
def test_len(self):
"""FlashScope: "len(flash)" syntax should be supported.
"""
self.assertEqual(1, len(self.flash))
def test_keys(self):
"""FlashScope: Should return the list of keys stored in the flash scope.
"""
self.assertEqual(['info'], self.flash.keys())
def test_values(self):
"""FlashScope: Should return the list of values stored in the flash scope.
"""
self.assertEqual(['Info'], self.flash.values())
def test_items(self):
"""FlashScope: Should return the list of items stored in the flash scope.
"""
self.assertEqual([('info', 'Info')], self.flash.items())
def test_iterkeys(self):
"""FlashScope: Should return an iterator to the keys stored in the flash scope.
"""
iterator = self.flash.iterkeys()
self.assertEqual('info', iterator.next())
self.assertRaises(StopIteration, iterator.next)
def test_itervalues(self):
"""FlashScope: Should return an iterator to the values stored in the flash scope.
"""
iterator = self.flash.itervalues()
self.assertEqual('Info', iterator.next())
self.assertRaises(StopIteration, iterator.next)
def test_iteritems(self):
"""FlashScope: Should return an iterator to the items stored in the flash scope.
"""
iterator = self.flash.iteritems()
self.assertEqual(('info', 'Info'), iterator.next())
self.assertRaises(StopIteration, iterator.next)
def test_add_with_existing_non_list_value(self):
"""FlashScope: Should append a value to a key even if the current value is not a list.
"""
self.flash.add('info', 'Error')
self.assertEqual(['Info', 'Error'], self.flash['info'])
def test_add_with_existing_list_value(self):
"""FlashScope: Should append a value if the current value is a list.
"""
self.flash['error'] = ['Error 1']
self.flash.add('error', 'Error 2')
self.assertEqual(['Error 1', 'Error 2'], self.flash['error'])
def test_add_with_non_existing_value(self):
"""FlashScope: Should add a value even if the given key doesn't exists.
"""
self.flash.add('error', 'Error')
self.assertEqual(['Error'], self.flash['error'])
def test_add_across_requests(self):
"""FlashScope: Should keep a key when a value is appended to it.
"""
self.flash['error'] = 'Error 1'
self.flash.update()
self.flash.add('error', 'Error 2')
self.assertEqual(['Error 1', 'Error 2'], self.flash['error'])
self.flash.update()
self.assertEqual(['Error 1', 'Error 2'], self.flash['error'])
self.flash.update()
self.assertFalse('error' in self.flash)
def test_get(self):
"""FlashScope: Should return a default value if the given key doesn' exists.
"""
self.assertEqual('Oops', self.flash.get('error', 'Oops'))
self.assertEqual('Info', self.flash.get('info', 'Something'))
self.assertEqual(None, self.flash.get('error'))
def test_pop(self):
"""FlashScope: Should pop a value from the flash scope.
"""
self.assertEqual(None, self.flash.pop('error'))
self.assertEqual('Info', self.flash.pop('info'))
self.assertFalse('info' in self.flash)
def test_pop_used_value(self):
"""FlashScope: Should pop a used value from the flash scope.
"""
self.flash.update()
self.assertEqual('Info', self.flash.pop('info'))
self.assertFalse('info' in self.flash)
def test_put(self):
"""FlashScope: Should put several keys into the flash scope at the same time.
"""
self.flash.put(warn='Warning', error='Error')
self.assertEqual('Warning', self.flash['warn'])
self.assertEqual('Error', self.flash['error'])
def test_discard(self):
"""FlashScope: Should mark a value for removal.
"""
self.flash.discard()
self.flash.update()
self.assertFalse('info' in self.flash)
def test_keep(self):
"""FlashScope: Should avoid the removal of specific values.
"""
self.flash.update()
self.flash.keep('info')
self.flash.update()
self.assertEqual('Info', self.flash['info'])
self.flash.update()
self.assertFalse('info' in self.flash)
def test_keep_all(self):
"""FlashScope: Should avoid the removal of all values.
"""
self.flash.update()
self.flash.keep()
self.flash.update()
self.assertEqual('Info', self.flash['info'])
self.flash.update()
self.assertFalse('info' in self.flash)
def test_replace_used_value(self):
"""FlashScope: Should keep a key when its value is replaced.
"""
self.flash.update()
self.assertEqual('Info', self.flash['info'])
self.flash['info'] = 'Error'
self.assertEqual('Error', self.flash['info'])
self.flash.update()
self.assertEqual('Error', self.flash['info'])
self.flash.update()
self.assertFalse('info' in self.flash)
def test_empty_to_dict(self):
"""FlashScope: Should export the flash data to a dict even if it's empty.
"""
self.flash = FlashScope()
expected_data = {_SESSION_KEY: {}, _USED_KEY:{}}
data = self.flash.to_dict()
self.assertEqual(expected_data, data)
def test_to_dict(self):
"""FlashScope: Should export the flash data to a dict.
"""
self.flash.update()
self.flash['error'] = 'Error'
expected_data = {_SESSION_KEY: {'info' : 'Info',
'error': 'Error'},
_USED_KEY : {'info' : None}}
data = self.flash.to_dict()
self.assertEqual(expected_data, data)
def test_to_dict_immutability(self):
"""FlashScope: Should export a copy of the flash data as a dict.
"""
data = self.flash.to_dict()
del self.flash['info']
self.assertEqual('Info', data[_SESSION_KEY]['info'])
class ImmediateFlashScope(TestCase):
"""Tests the ``Flashscope.now``.
"""
def setUp(self):
"""Create a FlashScope object to be used by the test methods.
"""
self.flash = FlashScope()
self.flash.now['info'] = 'Info'
def test_now(self):
"""FlashScope.now: "flash.now[key] = value" syntax should be supported.
"""
self.assertEqual('Info', self.flash['info'])
self.flash.update()
self.assertFalse('info' in self.flash)
def test_alternative_now(self):
"""FlashScope.now: Immediate values (flash.now) should be supported.
"""
self.flash.now.put(error='Error')
self.assertEqual('Error', self.flash['error'])
self.flash.update()
self.assertFalse('error' in self.flash)
def test_contains(self):
"""FlashScope.now: "key in flash.now" syntax should be supported.
"""
self.assertFalse('error' in self.flash.now)
self.flash.now['error'] = 'Error'
self.assertTrue('error' in self.flash.now)
def test_get_invalid_item(self):
"""FlashScope.now: Should raise KeyError if trying to get an invalid item.
"""
self.assertRaises(KeyError, lambda: self.flash.now['error']);
def test_add_with_non_existing_value(self):
"""FlashScope.now: Should append an immediate value even if the given key doesn't exists.
"""
self.flash.now.add('error', 'Error 1')
self.flash.now.add('error', 'Error 2', 'Error 3')
self.assertEqual(['Error 1', 'Error 2', 'Error 3'], self.flash['error'])
def test_add_with_existing_non_list_value(self):
"""FlashScope.now: Should append immediate values to a key even if the current value is not a list.
"""
self.flash.now.add('info', 'Error 1')
self.flash.now.add('info', 'Error 2', 'Error 3')
self.assertEqual(['Info', 'Error 1', 'Error 2', 'Error 3'], self.flash['info'])
def test_add_with_existing_list_value(self):
"""FlashScope.now: Should append an immediate value if the current value is a list.
"""
self.flash.now['error'] = ['Error 1']
self.flash.now.add('error', 'Error 2')
self.flash.now.add('error', 'Error 3', 'Error 4')
self.assertEqual(['Error 1', 'Error 2', 'Error 3', 'Error 4'], self.flash['error'])
class MixedFlashScope(TestCase):
"""Tests mixing regular and immediate values.
"""
def setUp(self):
"""Create a FlashScope object to be used by the test methods.
"""
self.flash = FlashScope()
def test_replace_with_immediate_value(self):
"""FlashScope: Should replace a regular value by an immediate value.
"""
self.flash['info'] = 'Info'
self.flash.update()
self.assertEqual('Info', self.flash['info'])
self.flash.now['info'] = 'Error'
self.assertEqual('Error', self.flash['info'])
self.flash.update()
self.assertFalse('info' in self.flash)
def test_replace_immediate_with_regular_value(self):
"""FlashScope: Should replace an immediate value with a regular value.
"""
self.flash.now['info'] = 'Info'
self.assertEqual('Info', self.flash['info'])
self.flash['info'] = 'Error'
self.flash.update()
self.assertEqual('Error', self.flash['info'])
self.flash.update()
self.assertFalse('info' in self.flash)
def test_add_immediate_with_existing_regular_value(self):
"""FlashScope.now: Should add an immediate value to a regular key, expiring on the current request.
"""
self.flash['error'] = 'Error 1'
self.flash.now.add('error', 'Error 2')
self.assertEqual(['Error 1', 'Error 2'], self.flash['error'])
self.flash.update()
self.assertFalse('error' in self.flash)
def test_add_immediate_with_existing_regular_list(self):
"""FlashScope.now: Should add an immediate value to a regular list, expiring on the current request.
"""
self.flash['error'] = ['Error 1']
self.flash.now.add('error', 'Error 2')
self.assertEqual(['Error 1', 'Error 2'], self.flash['error'])
self.flash.update()
self.assertFalse('error' in self.flash)
def test_add_regular_with_existing_immediate_value(self):
"""FlashScope: Should add a regular value to an immediate key, expiring on the next request.
"""
self.flash.now['error'] = 'Error 1'
self.flash.add('error', 'Error 2')
self.assertEqual(['Error 1', 'Error 2'], self.flash['error'])
self.flash.update()
self.assertEqual(['Error 1', 'Error 2'], self.flash['error'])
self.flash.update()
self.assertFalse('error' in self.flash)
def test_add_regular_with_existing_immediate_list(self):
"""FlashScope: Should add a regular value to an immediate list, expiring on the next request.
"""
self.flash.now['error'] = ['Error 1']
self.flash.add('error', 'Error 2')
self.assertEqual(['Error 1', 'Error 2'], self.flash['error'])
self.flash.update()
self.assertEqual(['Error 1', 'Error 2'], self.flash['error'])
self.flash.update()
self.assertFalse('error' in self.flash)
|
danielfm/django-flash
|
src/djangoflash/tests/models.py
|
Python
|
bsd-3-clause
| 15,008
|
from django.http import HttpResponse
from django.urls import reverse
from django.utils.decorators import decorator_from_middleware
from django.test import (
TestCase,
RequestFactory,
modify_settings,
override_settings,
)
from headers.middleware import (
MultipleProxyMiddleware,
ViaHeaderMiddleware,
)
def goodview(request, *args, **kwargs):
return HttpResponse(
"Hello world",
content_type="text/html"
)
class HeadersMiddleWareTest(TestCase):
def setUp(self):
self.rf = RequestFactory()
def test_with_extra_meta(self):
req = self.rf.get('/')
for f in getattr(
MultipleProxyMiddleware,
'FORWARDED_FOR_FIELDS'
): req.META[f] = 'Value1'
del(req.META['HTTP_X_FORWARDED_SERVER'])
req.META['HTTP_X_FORWARDED_FOR'] += ',Foo'
req.META['SERVER_SOFTWARE'] = 'foo/1.1'
viawrap = decorator_from_middleware(ViaHeaderMiddleware)
mulwrap = decorator_from_middleware(MultipleProxyMiddleware)
viawrap(mulwrap(goodview))(req)
@override_settings(USE_ETAGS=False)
def test_without_etags(self):
res = self.client.get(
reverse('contact'),
CONTENT_LENGTH=123
)
self.assertEqual(200, res.status_code)
self.assertFalse(res.has_header('ETag'))
|
alphageek-xyz/site
|
tests/test_middleware.py
|
Python
|
bsd-3-clause
| 1,350
|
# module pyparsing.py
#
# Copyright (c) 2003-2013 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString( hello ))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.0.2"
__versionTime__ = "13 April 2014 12:10"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr',
]
PY_3 = sys.version.startswith('3')
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_lowercase + string.ascii_uppercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputline __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{L{ParseFatalException}}, but thrown internally when an
C{L{ErrorStop<And._ErrorStop>}} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def iterkeys( self ):
"""Returns all named result keys."""
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def itervalues( self ):
"""Returns all named result values."""
return (self[k] for k in self.iterkeys())
def iteritems( self ):
return ((k, self[k]) for k in self.iterkeys())
if PY_3:
keys = iterkeys
values = itervalues
items = iteritems
else:
def keys( self ):
"""Returns all named result keys."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values."""
return list(self.itervalues())
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""Removes and returns item at specified index (default=last).
Supports both list and dict semantics for pop(). If passed no
argument or an integer argument, it will use list semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use dict
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in dict.pop()."""
if not args:
args = [-1]
if 'default' in kwargs:
args.append(kwargs['default'])
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""Add single element to end of ParseResults list of elements."""
self.__toklist.append(item)
def extend( self, itemseq ):
"""Add sequence of elements to end of ParseResults list of elements."""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""Clear all elements and results names."""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = []
for i in self.__toklist:
if isinstance(i, ParseResults):
out.append(_ustr(i))
else:
out.append(repr(i))
return '[' + ', '.join(out) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
if PY_3:
return dict( self.items() )
else:
return dict( self.iteritems() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
items = sorted(self.items())
for k,v in items:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.haskeys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
def pprint(self, *args, **kwargs):
"""Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + list(self.keys())
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
if limit[0] <= maxargs and not foundArity[0]:
limit[0] += 1
continue
raise
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
"""
ParserElement.literalStringClass = cls
inlineLiteralsUsing = staticmethod(inlineLiteralsUsing)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value, Exception):
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException as pe:
pe.__traceback__ = None
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns C{L{And}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{L{And}} with error stop"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{L{MatchFirst}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{L{Or}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{L{Each}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{L{NotAny}}"""
return NotAny( self )
def __call__(self, name=None):
"""Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "r")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement.literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}::
Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.
Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is C{False}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{exclude} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
charset = ''.join(set(self.quoteChar[0]+self.endQuoteChar[0])).replace('^',r'\^').replace('-',r'\-')
self.escCharReplacePattern = re.escape(self.escChar)+("([%s])" % charset)
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
elif isinstance( exprs, collections.Sequence ):
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(Literal, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults:
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print("found ignoreExpr, advance to", loc)
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
raise ParseException(instring, loc, self.errmsg, self)
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of C{ParseExpression}, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( str.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr )
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Sequence):
symbols = list(strs[:])
elif isinstance(strs, _generatorType):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) )
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty."""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\x' (\x21, which is a '!' character)
(\0x## is also supported for backwards compatibility)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{L{originalTextFor}}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=Suppress('('))
- rpar - expression for matching right-parentheses (default=Suppress(')'))
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one C{blockStatement}.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException as err:
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
|
Sir-Fancy/AlgArt
|
libalgart/libpyparsing/pyparsing.py
|
Python
|
artistic-2.0
| 153,727
|
def func_good(
a = 3,
b = 2):
pass
def func_bad(
a = 3,
b = 2
):
pass
|
zedlander/flake8-commas
|
test/data/multiline_bad_function_def.py
|
Python
|
mit
| 116
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class mrp_product_produce(osv.osv_memory):
_name = "mrp.product.produce"
_description = "Product Produce"
_columns = {
'product_qty': fields.float('Select Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'mode': fields.selection([('consume_produce', 'Consume & Produce'),
('consume', 'Consume Only')], 'Mode', required=True,
help="'Consume only' mode will only consume the products with the quantity selected.\n"
"'Consume & Produce' mode will consume as well as produce the products with the quantity selected "
"and it will finish the production order when total ordered quantities are produced."),
}
def _get_product_qty(self, cr, uid, context=None):
""" To obtain product quantity
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: Quantity
"""
if context is None:
context = {}
prod = self.pool.get('mrp.production').browse(cr, uid,
context['active_id'], context=context)
done = 0.0
for move in prod.move_created_ids2:
if move.product_id == prod.product_id:
if not move.scrapped:
done += move.product_qty
return (prod.product_qty - done) or prod.product_qty
_defaults = {
'product_qty': _get_product_qty,
'mode': lambda *x: 'consume_produce'
}
def do_produce(self, cr, uid, ids, context=None):
production_id = context.get('active_id', False)
assert production_id, "Production Id should be specified in context as a Active ID."
data = self.browse(cr, uid, ids[0], context=context)
self.pool.get('mrp.production').action_produce(cr, uid, production_id,
data.product_qty, data.mode, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jmesteve/saas3
|
openerp/addons/mrp/wizard/mrp_product_produce.py
|
Python
|
agpl-3.0
| 3,258
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import Gaffer
import GafferImage
import GafferUI
import GafferImageUI
from IECore import StringVectorData
class ChannelMaskPlugValueWidget( GafferUI.PlugValueWidget ) :
## \todo The inputImagePlug argument should take a Plug, not the name of a plug.
# It should default to None, and in this case look for an input ImagePlug called "in".
# We should then be able to remove all the specific registrations of this class with
# PlugValueWidget, and instead rely on the one at the bottom of this file, which is
# currently useless.
def __init__( self, plug, inputImagePlug, **kw ) :
self.__multiSelectionMenu = GafferUI.MultiSelectionMenu( allowMultipleSelection = True, alwaysHaveASelection=False )
GafferUI.PlugValueWidget.__init__( self, self.__multiSelectionMenu, plug, **kw )
self.__selectionChangedConnection = self.__multiSelectionMenu.selectionChangedSignal().connect( Gaffer.WeakMethod( self._updateFromSelection ) )
self.__inputPlug = None
if inputImagePlug != None :
p = plug.node()[inputImagePlug]
if p.direction() != Gaffer.Plug.Direction.In :
raise RuntimeError("Image plug is not an input. Please connect an input image plug.")
else:
self.__inputPlug = p
self.__inputChangedConnection = self.__inputPlug.node().plugInputChangedSignal().connect( Gaffer.WeakMethod( self._updateFromImagePlug ) )
self._updateFromImagePlug( self.__inputPlug )
else :
raise RuntimeError("Failed to find an input image plug. Please ensure that one has been assigned in the ChannelMaskPlugValueWidget's constructor.")
self._updateFromPlug()
def _displayText( self ) :
selected = self.__multiSelectionMenu.getSelection()
nSelected = len( selected )
nEntries = len( self.__multiSelectionMenu )
if nEntries == 0 :
return "none"
elif nSelected == nEntries :
return "all"
elif nSelected == 0 :
return "none"
text = ""
for i in range( 0, nSelected ) :
if i < 4 :
text = text + selected[i][-1]
elif i == 4 :
text = text + "..."
break
return text
def _updateFromSelection( self, widget ) :
plug = self.getPlug()
if plug is not None :
with Gaffer.UndoContext( plug.ancestor( Gaffer.ScriptNode ) ) :
selection = StringVectorData( self.__multiSelectionMenu.getSelection() )
self.__multiSelectionMenu.setText( self._displayText() )
selection = [ channel.replace( "/", "." ) for channel in selection ]
plug.setValue( StringVectorData( selection ) )
def _updateFromPlug( self ) :
# Get the value from the plug and select those channels from the menu.
plug = self.getPlug()
if plug is not None :
with self.getContext() :
plugValue = plug.getValue()
if plugValue is None :
plugValue = plug.ValueType()
else :
plugValue = [ channel.replace( ".", "/" ) for channel in plugValue ]
self.__multiSelectionMenu.setSelection( plugValue )
self.__multiSelectionMenu.setEnabled( True )
## Populates the menu with the channels found on the inputPlug.
## When populating the menu, if the current plug value is trying to mask a
## channel which doesn't exist on the input, it is disabled (but still displayed).
def _updateFromImagePlug( self, inputPlug ) :
if not inputPlug.isSame( self.__inputPlug ) and not self.__inputPlug == None :
return
input = self.__inputPlug
# Get the new channels from the input plug.
with self.getContext() :
channels = list( input['channelNames'].getValue() )
channels = [ channel.replace( ".", "/" ) for channel in channels ]
# Get the currently selected channels from the input plug.
plug = self.getPlug()
if plug is not None :
with self.getContext() :
plugValue = plug.getValue()
selected = []
for item in plugValue :
selected.append( item )
# Merge the selected channels and the input's channels.
# We do this by creating a list of unique channels which are also ordered so that
# any channels that were selected but don't belong to the input's channels are
# appended to the end.
seen = set()
seen_add = seen.add
newChannels = [ x for x in channels + selected if x not in seen and not seen_add(x)]
self.__multiSelectionMenu[:] = newChannels
# Now disable the channels that don't exist on the input.
disabled = set( selected ) - set( channels )
self.__multiSelectionMenu.setSelection( selected )
if len( disabled ) > 0 :
enabled = set(self.__multiSelectionMenu.getEnabledItems()) - disabled
self.__multiSelectionMenu.setEnabledItems( enabled )
GafferUI.PlugValueWidget.registerType( GafferImage.ChannelMaskPlug, ChannelMaskPlugValueWidget )
|
goddardl/gaffer
|
python/GafferImageUI/ChannelMaskPlugValueWidget.py
|
Python
|
bsd-3-clause
| 6,424
|
"""
functionality for finding inverse foreign key relations in model classes
"""
try:
from django.contrib.contenttypes.fields import ReverseGenericRelatedObjectsDescriptor
except ImportError:
from django.contrib.contenttypes.generic import ReverseGenericRelatedObjectsDescriptor
from django.db.models.base import ModelBase
from django.db.models.fields.related import ForeignRelatedObjectsDescriptor, SingleRelatedObjectDescriptor
from easymode.i18n.meta import DefaultFieldDescriptor
INTROSPECTION_ERROR = """
%s
Easymode caught an AttributeError while trying
to inspect %s looking for %s.
Please report to easymode@librelist.com.
"""
def _get_members_of_type(obj, member_type):
"""
Finds members of a certain type in obj.
:param obj: A model instance or class.
:param member_type: The type of the menber we are trying to find.
:rtype: A :class:`list` of ``member_type`` found in ``obj``
"""
if not issubclass(type(obj), ModelBase):
obj = obj.__class__
key_hash = []
for key in dir(obj):
try:
attr = getattr(obj, key)
except AttributeError as e:
try:
attr = obj.__dict__[key]
except KeyError:
raise AttributeError(INTROSPECTION_ERROR % (e, obj, member_type))
if type(attr) is member_type:
key_hash.append((key, attr))
return key_hash
def get_foreign_key_desciptors(obj):
"""
finds all :class:`~django.db.models.fields.ForeignRelatedObjectsDescriptor` in obj.
:param obj: A model instance or class.
"""
return _get_members_of_type(obj, ForeignRelatedObjectsDescriptor)
def get_one_to_one_descriptors(obj):
"""
finds all :class:`~django.db.models.fields.SingleRelatedObjectDescriptor` in obj.
:param obj: A model instance or class.
"""
return _get_members_of_type(obj, SingleRelatedObjectDescriptor)
def get_generic_relation_descriptors(obj):
"""
Finds all the :class:`~django.contrib.contenttypes.generic.ReverseGenericRelatedObjectsDescriptor` in obj.
:param obj: A model instance or class.
"""
return _get_members_of_type(obj, ReverseGenericRelatedObjectsDescriptor)
def get_default_field_descriptors(obj):
"""
find all :class:`~easymode.i18n.meta.DefaultFieldDescriptor` in obj.
:param obj: A model instance or class.
"""
return _get_members_of_type(obj, DefaultFieldDescriptor)
|
raagin/django-easymode
|
easymode/tree/introspection.py
|
Python
|
gpl-3.0
| 2,442
|
# -*- coding: utf-8 -*-
import json
import unittest
from preprocessings.ja.cleaning import clean_html_tags, clean_html_and_js_tags, clean_code, clean_url
class TestCleaning(unittest.TestCase):
def setUp(self):
self.html_text = open('data/test.html').read()
def test_clean_html_tag(self):
clean_text = clean_html_tags(self.html_text)
self.assertTrue('<span color="red">' not in clean_text)
print(clean_text)
def test_clean_javascript_tag(self):
clean_text = clean_html_and_js_tags(self.html_text)
self.assertTrue('<span color="red">' not in clean_text)
self.assertTrue('var textbook' not in clean_text)
print(clean_text)
def test_qiita_text(self):
with open('data/qiita.json') as f:
qiita_json = json.load(f)
html_text = qiita_json['rendered_body']
clean_text = clean_code(html_text)
print(clean_text)
def test_clean_url(self):
with open('data/qiita.json') as f:
qiita_json = json.load(f)
html_text = qiita_json['rendered_body']
clean_text = clean_code(html_text)
clean_text = clean_url(clean_text)
print(clean_text)
|
Hironsan/natural-language-preprocessings
|
tests/ja/cleaning.py
|
Python
|
mit
| 1,204
|
# -*- coding: utf-8 -*-
#
# Rackspace Developer Documentation documentation build configuration file,
# created by sphinx-quickstart on Thu Mar 6 14:14:55 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# import os
from pygments.lexers.web import PhpLexer
from sphinx.highlighting import lexers
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.ifconfig',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Document Repository'
copyright = '2016, Joseph Robinson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1'
# The full version, including alpha/beta/rc tags.
# release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'samples', 'README.rst', 'common/*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'document-repository'
# this will change the 'paragraph' character to '#'
html_add_permalinks = '#'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [('index', 'documentation-repository.tex',
'Documentation Repository',
'Joseph Robinson', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'documentation-repository',
'Documentation Repository', ['Joseph Robinson'], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [('index', 'documentation-repository',
'Documentation Repository',
'Joseph Robinson', 'documentation-repository')]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Custom options for PHP output ----------------------------------------
lexers['php'] = PhpLexer(startinline=True)
|
RobinJoe/Docs
|
doc/conf.py
|
Python
|
gpl-3.0
| 8,757
|
import logging
from collections import Counter
from copy import copy
from pathlib import Path
from typing import Iterable, NamedTuple, Union
from fs import path as fspath
from fs.base import FS
from fs.errors import NoSysPath
from fs.walk import Walker
from rich.console import Console
from . import config, console
from .actions import ACTIONS
from .actions.action import Action
from .filters import FILTERS
from .filters.filter import Filter
from .migration import migrate_v1
from .utils import (
basic_args,
deep_merge_inplace,
ensure_dict,
ensure_list,
fs_path_from_options,
to_args,
)
logger = logging.getLogger(__name__)
highlighted_console = Console()
class Location(NamedTuple):
walker: Walker
fs: FS
fs_path: str
DEFAULT_SYSTEM_EXCLUDE_FILES = [
"thumbs.db",
"desktop.ini",
"~$*",
".DS_Store",
".localized",
]
DEFAULT_SYSTEM_EXCLUDE_DIRS = [
".git",
".svn",
]
def convert_options_to_walker_args(options: dict):
# combine system_exclude and exclude into a single list
excludes = copy(
ensure_list(options.get("system_exclude_files", DEFAULT_SYSTEM_EXCLUDE_FILES))
)
excludes.extend(ensure_list(options.get("exclude_files", [])))
exclude_dirs = copy(
ensure_list(options.get("system_exclude_dirs", DEFAULT_SYSTEM_EXCLUDE_DIRS))
)
exclude_dirs.extend(ensure_list(options.get("exclude_dirs", [])))
if not excludes:
excludes = None
if not exclude_dirs:
exclude_dirs = None
filter_ = copy(ensure_list(options.get("filter", [])))
filter_dirs = copy(ensure_list(options.get("filter_dirs", [])))
if not filter_:
filter_ = None
if not filter_dirs:
filter_dirs = None
# return all the default options
result = {
"ignore_errors": options.get("ignore_errors", False),
"on_error": options.get("on_error", None),
"search": options.get("search", "depth"),
"exclude": excludes,
"exclude_dirs": exclude_dirs,
"max_depth": options.get("max_depth", None),
"filter": filter_,
"filter_dirs": filter_dirs,
}
return result
def instantiate_location(options: Union[str, dict], default_max_depth=0) -> Location:
if isinstance(options, Location):
return options
if isinstance(options, str):
options = {"path": options}
# set default max depth from rule
if not "max_depth" in options:
options["max_depth"] = default_max_depth
if "walker" not in options:
args = convert_options_to_walker_args(options)
walker = Walker(**args)
else:
walker = options["walker"]
fs, fs_path = fs_path_from_options(
path=options.get("path", "/"),
filesystem=options.get("filesystem"),
)
return Location(walker=walker, fs=fs, fs_path=fs_path)
def instantiate_filter(filter_config):
if isinstance(filter_config, Filter):
return filter_config
spec = ensure_dict(filter_config)
name, value = next(iter(spec.items()))
parts = name.split(maxsplit=1)
invert = False
if len(parts) == 2 and parts[0] == "not":
name = parts[1]
invert = True
args, kwargs = to_args(value)
instance = FILTERS[name](*args, **kwargs)
instance.set_logic(inverted=invert)
return instance
def instantiate_action(action_config):
if isinstance(action_config, Action):
return action_config
spec = ensure_dict(action_config)
name, value = next(iter(spec.items()))
args, kwargs = to_args(value)
return ACTIONS[name](*args, **kwargs)
def syspath_or_exception(fs, path):
try:
return Path(fs.getsyspath(path))
except NoSysPath as e:
return e
def replace_with_instances(config: dict):
warnings = []
for rule in config["rules"]:
default_depth = None if rule.get("subfolders", False) else 0
_locations = []
for options in ensure_list(rule["locations"]):
try:
instance = instantiate_location(
options=options,
default_max_depth=default_depth,
)
_locations.append(instance)
except Exception as e:
if isinstance(options, dict) and options.get("ignore_errors", False):
warnings.append(str(e))
else:
raise ValueError("Invalid location %s (%s)" % (options, e)) from e
# filters are optional
_filters = []
for x in ensure_list(rule.get("filters", [])):
try:
_filters.append(instantiate_filter(x))
except Exception as e:
raise ValueError("Invalid filter %s (%s)" % (x, e)) from e
# actions
_actions = []
for x in ensure_list(rule["actions"]):
try:
_actions.append(instantiate_action(x))
except Exception as e:
raise ValueError("Invalid action %s (%s)" % (x, e)) from e
rule["locations"] = _locations
rule["filters"] = _filters
rule["actions"] = _actions
return warnings
def filter_pipeline(filters: Iterable[Filter], args: dict, filter_mode: str) -> bool:
"""
run the filter pipeline.
Returns True on a match, False otherwise and updates `args` in the process.
"""
results = []
for filter_ in filters:
try:
# update dynamic path args
args["path"] = syspath_or_exception(args["fs"], args["fs_path"])
args["relative_path"] = fspath.frombase(
args["fs_base_path"], args["fs_path"]
)
match, updates = filter_.pipeline(args)
result = match ^ filter_.inverted
# we cannot exit early on "any".
if (filter_mode == "none" and result) or (
filter_mode == "all" and not result
):
return False
results.append(result)
deep_merge_inplace(args, updates)
except Exception as e: # pylint: disable=broad-except
logger.exception(e)
# console.print_exception()
filter_.print_error(str(e))
return False
if filter_mode == "any":
return any(results)
return True
def action_pipeline(actions: Iterable[Action], args: dict, simulate: bool) -> bool:
for action in actions:
try:
# update dynamic path args
args["path"] = syspath_or_exception(args["fs"], args["fs_path"])
args["relative_path"] = fspath.frombase(
args["fs_base_path"], args["fs_path"]
)
updates = action.pipeline(args, simulate=simulate)
# jobs may return a dict with updates that should be merged into args
if updates is not None:
deep_merge_inplace(args, updates)
except Exception as e: # pylint: disable=broad-except
logger.exception(e)
action.print_error(str(e))
return False
return True
def run_rules(rules: dict, simulate: bool = True):
count = Counter(done=0, fail=0) # type: Counter
if simulate:
console.simulation_banner()
console.spinner(simulate=simulate)
for rule_nr, rule in enumerate(rules["rules"], start=1):
target = rule.get("targets", "files")
console.rule(rule.get("name", "Rule %s" % rule_nr))
filter_mode = rule.get("filter_mode", "all")
for walker, walker_fs, walker_path in rule["locations"]:
console.location(walker_fs, walker_path)
walk = walker.files if target == "files" else walker.dirs
for path in walk(fs=walker_fs, path=walker_path):
if walker_fs.islink(path):
continue
# tell the user which resource we're handling
console.path(walker_fs, path)
# assemble the available args
args = basic_args()
args.update(
fs=walker_fs,
fs_path=path,
fs_base_path=walker_path,
)
# run resource through the filter pipeline
match = filter_pipeline(
filters=rule["filters"],
args=args,
filter_mode=filter_mode,
)
# if the currently handled resource changed we adjust the prefix message
if args.get("resource_changed"):
console.path_changed_during_pipeline(
fs=walker_fs,
fs_path=path,
new_fs=args["fs"],
new_path=args["fs_path"],
reason=args.get("resource_changed"),
)
args.pop("resource_changed", None)
# run resource through the action pipeline
if match:
is_success = action_pipeline(
actions=rule["actions"],
args=args,
simulate=simulate,
)
if is_success:
count["done"] += 1
else:
count["fail"] += 1
if simulate:
console.simulation_banner()
return count
def run(rules: Union[str, dict], simulate: bool, validate=True):
# load and validate
if isinstance(rules, str):
rules = config.load_from_string(rules)
rules = config.cleanup(rules)
migrate_v1(rules)
if validate:
config.validate(rules)
# instantiate
warnings = replace_with_instances(rules)
for msg in warnings:
console.warn(msg)
# run
count = run_rules(rules=rules, simulate=simulate)
console.summary(count)
if count["fail"]:
raise RuntimeWarning("Some actions failed.")
|
tfeldmann/organize
|
organize/core.py
|
Python
|
mit
| 9,998
|
from pycp2k.inputsection import InputSection
class _spawned_hills_invdt1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Default_keyword = []
self._name = "SPAWNED_HILLS_INVDT"
self._repeated_default_keywords = {'Default_keyword': 'DEFAULT_KEYWORD'}
self._attributes = ['Default_keyword']
|
SINGROUP/pycp2k
|
pycp2k/classes/_spawned_hills_invdt1.py
|
Python
|
lgpl-3.0
| 356
|
# -*- coding: utf-8 -*-
from django.conf.urls import url, patterns
urlpatterns = patterns(
'tcms.testplans.views',
url(r'^$', 'all'),
url(r'^new/$', 'new'),
url(r'^ajax/$', 'ajax_search'),
url(r'^treeview/$', 'tree_view'),
url(r'^clone/$', 'clone'),
url(r'^printable/$', 'printable'),
url(r'^export/$', 'export'),
url(r'^component/$', 'component'),
)
|
MrSenko/Nitrate
|
tcms/testplans/urls/plans_urls.py
|
Python
|
gpl-2.0
| 389
|
#!/usr/bin/env python
#
# Copyright 2011 Authors of PyTask.
#
# This file is part of PyTask.
#
# PyTask is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyTask is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyTask. If not, see <http://www.gnu.org/licenses/>.
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@fossee.in>',
'"Nishanth Amuluru" <nishanth@fossee.in>',
]
# Django settings for pytask project.
import os
from pytask.local import *
ADMINS = (
('Madhusudan C.S.', 'madhusudancs@fossee.in'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Kolkata'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/pytask/media/'
# Absolute path to the directory that holds static files.
# Example: "/home/static-files/static-files.lawrence.com/"
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
# URL that handles the static files served from STATIC_ROOT. Make sure to use
# a trailing slash if there is a path component (optional in other cases).
# Examples: "http://static-files.lawrence.com",
# "http://example.com/static-files/"
STATIC_URL = '/pytask/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/pytask/admin_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^ww=xk&idt)=03kqg*fz8x%=dqbhh1kd2z=f%$m@r9_+9b=&x='
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'pytask.taskapp.context_processors.configuration',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'pytask.middleware.exceptions.ExceptionMiddleware',
)
ROOT_URLCONF = 'pytask.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates'),
)
INSTALLED_APPS = (
'django_extensions',
'registration',
'tagging',
'south',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'pytask',
'pytask.profile',
'pytask.taskapp',
)
AUTH_PROFILE_MODULE = 'profile.Profile'
#django-registration
ACCOUNT_ACTIVATION_DAYS = 7
DEFAULT_FROM_EMAIL = 'FOSSEE Admin <admin@fossee.in>'
|
madhusudancs/pytask
|
pytask/settings.py
|
Python
|
agpl-3.0
| 4,774
|
from marathon.models.app import MarathonApp
import unittest
class MarathonAppTest(unittest.TestCase):
def test_env_defaults_to_empty_dict(self):
"""
é testé
"""
app = MarathonApp()
self.assertEquals(app.env, {})
def test_add_env_empty_dict(self):
app = MarathonApp()
app.add_env("MY_ENV", "my-value")
self.assertDictEqual({"MY_ENV": "my-value"}, app.env)
def test_add_env_non_empty_dict(self):
env_data = {"OTHER_ENV": "other-value"}
app = MarathonApp(env=env_data)
app.add_env("MY_ENV", "my-value")
self.assertDictEqual({"MY_ENV": "my-value", "OTHER_ENV": "other-value"}, app.env)
|
thefactory/marathon-python
|
tests/test_model_app.py
|
Python
|
mit
| 698
|
from Screens.Screen import Screen
from Plugins.Plugin import PluginDescriptor
from Components.SystemInfo import SystemInfo
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry, config, ConfigBoolean
from Components.ActionMap import ActionMap
from Components.Button import Button
class Preferences(Screen, ConfigListScreen):
def __init__(self, session):
Screen.__init__(self, session)
self.list = [
getConfigListEntry(_("Start emulator at:"), config.sifteam.emudelay),
getConfigListEntry(_("Skin developer mode:"), config.sifteam.skindevelopermode),
getConfigListEntry(_("Video green switch enable 4:3 letterbox:"), config.sifteam.switch_4_3_letterbox),
getConfigListEntry(_("Video green switch enable 4:3 panscan:"), config.sifteam.switch_4_3_panscan),
getConfigListEntry(_("Video green switch enable 16:9:"), config.sifteam.switch_16_9),
getConfigListEntry(_("Video green switch enable 16:9 always:"), config.sifteam.switch_16_9_always),
getConfigListEntry(_("Video green switch enable 16:9 letterbox:"), config.sifteam.switch_16_9_letterbox),
getConfigListEntry(_("Video green switch enable 16:10 letterbox:"), config.sifteam.switch_16_10_letterbox),
getConfigListEntry(_("Video green switch enable 16:10 panscan:"), config.sifteam.switch_16_10_panscan)
]
ConfigListScreen.__init__(self, self.list, session = session)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"red": self.ok,
#"green": self.green,
"blue": self.keyCancel,
"cancel": self.keyCancel,
}, -2)
self["key_green"] = Button("")
self["key_red"] = Button(_("Ok"))
self["key_blue"] = Button(_("Exit"))
self["key_yellow"] = Button("")
def ok(self):
self.keySave()
|
SIFTeam/enigma2
|
lib/python/SIFTeam/Preferences.py
|
Python
|
gpl-2.0
| 1,786
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras discretization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers.preprocessing import discretization
from tensorflow.python.keras.layers.preprocessing import discretization_v1
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
def get_layer_class():
if context.executing_eagerly():
return discretization.Discretization
else:
return discretization_v1.Discretization
@keras_parameterized.run_all_keras_modes
class DiscretizationTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_bucketize_with_explicit_buckets_integer(self):
input_array = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])
expected_output = [[0, 1, 3, 1], [0, 3, 2, 0]]
expected_output_shape = [None, 4]
input_data = keras.Input(shape=(4,))
layer = discretization.Discretization(bins=[0., 1., 2.])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_int_input(self):
input_array = np.array([[-1, 1, 3, 0], [0, 3, 1, 0]], dtype=np.int64)
expected_output = [[0, 2, 3, 1], [1, 3, 2, 1]]
expected_output_shape = [None, 4]
input_data = keras.Input(shape=(4,), dtype=dtypes.int64)
layer = discretization.Discretization(bins=[-.5, 0.5, 1.5])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_sparse_float_input(self):
indices = [[0, 1], [0, 2], [1, 1]]
input_array = sparse_tensor.SparseTensor(
indices=indices, values=[-1.5, 1.0, 3.4], dense_shape=[2, 3])
expected_output = [0, 2, 3]
input_data = keras.Input(shape=(3,), dtype=dtypes.float32, sparse=True)
layer = discretization.Discretization(bins=[-.5, 0.5, 1.5])
bucket_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(indices, output_dataset.indices)
self.assertAllEqual(expected_output, output_dataset.values)
def test_bucketize_with_explicit_buckets_ragged_float_input(self):
input_array = ragged_factory_ops.constant([[-1.5, 1.0, 3.4, .5],
[0.0, 3.0, 1.3]])
expected_output = [[0, 1, 3, 1], [0, 3, 2]]
expected_output_shape = [None, None]
input_data = keras.Input(shape=(None,), ragged=True)
layer = discretization.Discretization(bins=[0., 1., 2.])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_ragged_int_input(self):
input_array = ragged_factory_ops.constant([[-1, 1, 3, 0], [0, 3, 1]],
dtype=dtypes.int64)
expected_output = [[0, 2, 3, 1], [1, 3, 2]]
expected_output_shape = [None, None]
input_data = keras.Input(shape=(None,), ragged=True, dtype=dtypes.int64)
layer = discretization.Discretization(bins=[-.5, 0.5, 1.5])
bucket_data = layer(input_data)
self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_bucketize_with_explicit_buckets_sparse_int_input(self):
indices = [[0, 1], [0, 2], [1, 1]]
input_array = sparse_tensor.SparseTensor(
indices=indices, values=[-1, 1, 3], dense_shape=[2, 3])
expected_output = [0, 2, 3]
input_data = keras.Input(shape=(3,), dtype=dtypes.int32, sparse=True)
layer = discretization.Discretization(bins=[-.5, 0.5, 1.5])
bucket_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=bucket_data)
output_dataset = model.predict(input_array, steps=1)
self.assertAllEqual(indices, output_dataset.indices)
self.assertAllEqual(expected_output, output_dataset.values)
@parameterized.named_parameters([
{
"testcase_name": "2d_single_element",
"adapt_data": np.array([[1.], [2.], [3.], [4.], [5.]]),
"test_data": np.array([[1.], [2.], [3.]]),
"use_dataset": True,
"expected": np.array([[0], [1], [2]]),
"num_bins": 5,
"epsilon": 0.01
}, {
"testcase_name": "2d_multi_element",
"adapt_data": np.array([[1., 6.], [2., 7.], [3., 8.], [4., 9.],
[5., 10.]]),
"test_data": np.array([[1., 10.], [2., 6.], [3., 8.]]),
"use_dataset": True,
"expected": np.array([[0, 4], [0, 2], [1, 3]]),
"num_bins": 5,
"epsilon": 0.01
}, {
"testcase_name": "1d_single_element",
"adapt_data": np.array([3., 2., 1., 5., 4.]),
"test_data": np.array([1., 2., 3.]),
"use_dataset": True,
"expected": np.array([0, 1, 2]),
"num_bins": 5,
"epsilon": 0.01
}, {
"testcase_name": "300_batch_1d_single_element_1",
"adapt_data": np.arange(300),
"test_data": np.arange(300),
"use_dataset": True,
"expected":
np.concatenate([np.zeros(101), np.ones(99), 2 * np.ones(100)]),
"num_bins": 3,
"epsilon": 0.01
}, {
"testcase_name": "300_batch_1d_single_element_2",
"adapt_data": np.arange(300) ** 2,
"test_data": np.arange(300) ** 2,
"use_dataset": True,
"expected":
np.concatenate([np.zeros(101), np.ones(99), 2 * np.ones(100)]),
"num_bins": 3,
"epsilon": 0.01
}, {
"testcase_name": "300_batch_1d_single_element_large_epsilon",
"adapt_data": np.arange(300),
"test_data": np.arange(300),
"use_dataset": True,
"expected": np.concatenate([np.zeros(137), np.ones(163)]),
"num_bins": 2,
"epsilon": 0.1
}])
def test_layer_computation(self, adapt_data, test_data, use_dataset,
expected, num_bins=5, epsilon=0.01):
input_shape = tuple(list(test_data.shape)[1:])
np.random.shuffle(adapt_data)
if use_dataset:
# Keras APIs expect batched datasets
adapt_data = dataset_ops.Dataset.from_tensor_slices(adapt_data).batch(
test_data.shape[0] // 2)
test_data = dataset_ops.Dataset.from_tensor_slices(test_data).batch(
test_data.shape[0] // 2)
cls = get_layer_class()
layer = cls(epsilon=epsilon, bins=num_bins)
layer.adapt(adapt_data)
input_data = keras.Input(shape=input_shape)
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)
@parameterized.named_parameters(
{
"num_bins": 5,
"data": np.array([[1.], [2.], [3.], [4.], [5.]]),
"expected": {
"bins": np.array([1., 2., 3., 4., np.Inf])
},
"testcase_name": "2d_single_element_all_bins"
}, {
"num_bins": 5,
"data": np.array([[1., 6.], [2., 7.], [3., 8.], [4., 9.], [5., 10.]]),
"expected": {
"bins": np.array([2., 4., 6., 8., np.Inf])
},
"testcase_name": "2d_multi_element_all_bins",
}, {
"num_bins": 3,
"data": np.array([[0.], [1.], [2.], [3.], [4.], [5.]]),
"expected": {
"bins": np.array([1., 3., np.Inf])
},
"testcase_name": "2d_single_element_3_bins"
})
def test_combiner_computation(self, num_bins, data, expected):
epsilon = 0.01
combiner = discretization.Discretization.DiscretizingCombiner(epsilon,
num_bins)
self.validate_accumulator_extract(combiner, data, expected)
if __name__ == "__main__":
test.main()
|
freedomtan/tensorflow
|
tensorflow/python/keras/layers/preprocessing/discretization_test.py
|
Python
|
apache-2.0
| 9,949
|
"""Machine limits for Float32 and Float64 and (long double) if available...
"""
__all__ = ['finfo', 'iinfo']
import warnings
from .machar import MachAr
from .overrides import set_module
from . import numeric
from . import numerictypes as ntypes
from .numeric import array, inf
from .umath import log10, exp2
from . import umath
def _fr0(a):
"""fix rank-0 --> rank-1"""
if a.ndim == 0:
a = a.copy()
a.shape = (1,)
return a
def _fr1(a):
"""fix rank > 0 --> rank-0"""
if a.size == 1:
a = a.copy()
a.shape = ()
return a
class MachArLike:
""" Object to simulate MachAr instance """
def __init__(self,
ftype,
*, eps, epsneg, huge, tiny, ibeta, **kwargs):
params = _MACHAR_PARAMS[ftype]
float_conv = lambda v: array([v], ftype)
float_to_float = lambda v : _fr1(float_conv(v))
float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype))
self.title = params['title']
# Parameter types same as for discovered MachAr object.
self.epsilon = self.eps = float_to_float(eps)
self.epsneg = float_to_float(epsneg)
self.xmax = self.huge = float_to_float(huge)
self.xmin = self.tiny = float_to_float(tiny)
self.ibeta = params['itype'](ibeta)
self.__dict__.update(kwargs)
self.precision = int(-log10(self.eps))
self.resolution = float_to_float(float_conv(10) ** (-self.precision))
self._str_eps = float_to_str(self.eps)
self._str_epsneg = float_to_str(self.epsneg)
self._str_xmin = float_to_str(self.xmin)
self._str_xmax = float_to_str(self.xmax)
self._str_resolution = float_to_str(self.resolution)
_convert_to_float = {
ntypes.csingle: ntypes.single,
ntypes.complex_: ntypes.float_,
ntypes.clongfloat: ntypes.longfloat
}
# Parameters for creating MachAr / MachAr-like objects
_title_fmt = 'numpy {} precision floating point number'
_MACHAR_PARAMS = {
ntypes.double: dict(
itype = ntypes.int64,
fmt = '%24.16e',
title = _title_fmt.format('double')),
ntypes.single: dict(
itype = ntypes.int32,
fmt = '%15.7e',
title = _title_fmt.format('single')),
ntypes.longdouble: dict(
itype = ntypes.longlong,
fmt = '%s',
title = _title_fmt.format('long double')),
ntypes.half: dict(
itype = ntypes.int16,
fmt = '%12.5e',
title = _title_fmt.format('half'))}
# Key to identify the floating point type. Key is result of
# ftype('-0.1').newbyteorder('<').tobytes()
# See:
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
_KNOWN_TYPES = {}
def _register_type(machar, bytepat):
_KNOWN_TYPES[bytepat] = machar
_float_ma = {}
def _register_known_types():
# Known parameters for float16
# See docstring of MachAr class for description of parameters.
f16 = ntypes.float16
float16_ma = MachArLike(f16,
machep=-10,
negep=-11,
minexp=-14,
maxexp=16,
it=10,
iexp=5,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(f16(-10)),
epsneg=exp2(f16(-11)),
huge=f16(65504),
tiny=f16(2 ** -14))
_register_type(float16_ma, b'f\xae')
_float_ma[16] = float16_ma
# Known parameters for float32
f32 = ntypes.float32
float32_ma = MachArLike(f32,
machep=-23,
negep=-24,
minexp=-126,
maxexp=128,
it=23,
iexp=8,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(f32(-23)),
epsneg=exp2(f32(-24)),
huge=f32((1 - 2 ** -24) * 2**128),
tiny=exp2(f32(-126)))
_register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
_float_ma[32] = float32_ma
# Known parameters for float64
f64 = ntypes.float64
epsneg_f64 = 2.0 ** -53.0
tiny_f64 = 2.0 ** -1022.0
float64_ma = MachArLike(f64,
machep=-52,
negep=-53,
minexp=-1022,
maxexp=1024,
it=52,
iexp=11,
ibeta=2,
irnd=5,
ngrd=0,
eps=2.0 ** -52.0,
epsneg=epsneg_f64,
huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
tiny=tiny_f64)
_register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
_float_ma[64] = float64_ma
# Known parameters for IEEE 754 128-bit binary float
ld = ntypes.longdouble
epsneg_f128 = exp2(ld(-113))
tiny_f128 = exp2(ld(-16382))
# Ignore runtime error when this is not f128
with numeric.errstate(all='ignore'):
huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
float128_ma = MachArLike(ld,
machep=-112,
negep=-113,
minexp=-16382,
maxexp=16384,
it=112,
iexp=15,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(ld(-112)),
epsneg=epsneg_f128,
huge=huge_f128,
tiny=tiny_f128)
# IEEE 754 128-bit binary float
_register_type(float128_ma,
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
_register_type(float128_ma,
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
_float_ma[128] = float128_ma
# Known parameters for float80 (Intel 80-bit extended precision)
epsneg_f80 = exp2(ld(-64))
tiny_f80 = exp2(ld(-16382))
# Ignore runtime error when this is not f80
with numeric.errstate(all='ignore'):
huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
float80_ma = MachArLike(ld,
machep=-63,
negep=-64,
minexp=-16382,
maxexp=16384,
it=63,
iexp=15,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(ld(-63)),
epsneg=epsneg_f80,
huge=huge_f80,
tiny=tiny_f80)
# float80, first 10 bytes containing actual storage
_register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
_float_ma[80] = float80_ma
# Guessed / known parameters for double double; see:
# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
# These numbers have the same exponent range as float64, but extended number of
# digits in the significand.
huge_dd = (umath.nextafter(ld(inf), ld(0))
if hasattr(umath, 'nextafter') # Missing on some platforms?
else float64_ma.huge)
float_dd_ma = MachArLike(ld,
machep=-105,
negep=-106,
minexp=-1022,
maxexp=1024,
it=105,
iexp=11,
ibeta=2,
irnd=5,
ngrd=0,
eps=exp2(ld(-105)),
epsneg= exp2(ld(-106)),
huge=huge_dd,
tiny=exp2(ld(-1022)))
# double double; low, high order (e.g. PPC 64)
_register_type(float_dd_ma,
b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
# double double; high, low order (e.g. PPC 64 le)
_register_type(float_dd_ma,
b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
_float_ma['dd'] = float_dd_ma
def _get_machar(ftype):
""" Get MachAr instance or MachAr-like instance
Get parameters for floating point type, by first trying signatures of
various known floating point types, then, if none match, attempting to
identify parameters by analysis.
Parameters
----------
ftype : class
Numpy floating point type class (e.g. ``np.float64``)
Returns
-------
ma_like : instance of :class:`MachAr` or :class:`MachArLike`
Object giving floating point parameters for `ftype`.
Warns
-----
UserWarning
If the binary signature of the float type is not in the dictionary of
known float types.
"""
params = _MACHAR_PARAMS.get(ftype)
if params is None:
raise ValueError(repr(ftype))
# Detect known / suspected types
key = ftype('-0.1').newbyteorder('<').tobytes()
ma_like = _KNOWN_TYPES.get(key)
# Could be 80 bit == 10 byte extended precision, where last bytes can be
# random garbage. Try comparing first 10 bytes to pattern.
if ma_like is None and ftype == ntypes.longdouble:
ma_like = _KNOWN_TYPES.get(key[:10])
if ma_like is not None:
return ma_like
# Fall back to parameter discovery
warnings.warn(
'Signature {} for {} does not match any known type: '
'falling back to type probe function'.format(key, ftype),
UserWarning, stacklevel=2)
return _discovered_machar(ftype)
def _discovered_machar(ftype):
""" Create MachAr instance with found information on float types
"""
params = _MACHAR_PARAMS[ftype]
return MachAr(lambda v: array([v], ftype),
lambda v:_fr0(v.astype(params['itype']))[0],
lambda v:array(_fr0(v)[0], ftype),
lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
params['title'])
@set_module('numpy')
class finfo:
"""
finfo(dtype)
Machine limits for floating point types.
Attributes
----------
bits : int
The number of bits occupied by the type.
eps : float
The difference between 1.0 and the next smallest representable float
larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
standard, ``eps = 2**-52``, approximately 2.22e-16.
epsneg : float
The difference between 1.0 and the next smallest representable float
less than 1.0. For example, for 64-bit binary floats in the IEEE-754
standard, ``epsneg = 2**-53``, approximately 1.11e-16.
iexp : int
The number of bits in the exponent portion of the floating point
representation.
machar : MachAr
The object which calculated these parameters and holds more
detailed information.
machep : int
The exponent that yields `eps`.
max : floating point number of the appropriate type
The largest representable number.
maxexp : int
The smallest positive power of the base (2) that causes overflow.
min : floating point number of the appropriate type
The smallest representable number, typically ``-max``.
minexp : int
The most negative power of the base (2) consistent with there
being no leading 0's in the mantissa.
negep : int
The exponent that yields `epsneg`.
nexp : int
The number of bits in the exponent including its sign and bias.
nmant : int
The number of bits in the mantissa.
precision : int
The approximate number of decimal digits to which this kind of
float is precise.
resolution : floating point number of the appropriate type
The approximate decimal resolution of this type, i.e.,
``10**-precision``.
tiny : float
The smallest positive usable number. Type of `tiny` is an
appropriate floating point type.
Parameters
----------
dtype : float, dtype, or instance
Kind of floating point data-type about which to get information.
See Also
--------
MachAr : The implementation of the tests that produce this information.
iinfo : The equivalent for integer data types.
spacing : The distance between a value and the nearest adjacent number
nextafter : The next floating point value after x1 towards x2
Notes
-----
For developers of NumPy: do not instantiate this at the module level.
The initial calculation of these parameters is expensive and negatively
impacts import times. These objects are cached, so calling ``finfo()``
repeatedly inside your functions is not a problem.
"""
_finfo_cache = {}
def __new__(cls, dtype):
try:
dtype = numeric.dtype(dtype)
except TypeError:
# In case a float instance was given
dtype = numeric.dtype(type(dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
dtypes = [dtype]
newdtype = numeric.obj2sctype(dtype)
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
if not issubclass(dtype, numeric.inexact):
raise ValueError("data type %r not inexact" % (dtype))
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
if not issubclass(dtype, numeric.floating):
newdtype = _convert_to_float[dtype]
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
obj = cls._finfo_cache.get(dtype, None)
if obj is not None:
return obj
obj = object.__new__(cls)._init(dtype)
for dt in dtypes:
cls._finfo_cache[dt] = obj
return obj
def _init(self, dtype):
self.dtype = numeric.dtype(dtype)
machar = _get_machar(dtype)
for word in ['precision', 'iexp',
'maxexp', 'minexp', 'negep',
'machep']:
setattr(self, word, getattr(machar, word))
for word in ['tiny', 'resolution', 'epsneg']:
setattr(self, word, getattr(machar, word).flat[0])
self.bits = self.dtype.itemsize * 8
self.max = machar.huge.flat[0]
self.min = -self.max
self.eps = machar.eps.flat[0]
self.nexp = machar.iexp
self.nmant = machar.it
self.machar = machar
self._str_tiny = machar._str_xmin.strip()
self._str_max = machar._str_xmax.strip()
self._str_epsneg = machar._str_epsneg.strip()
self._str_eps = machar._str_eps.strip()
self._str_resolution = machar._str_resolution.strip()
return self
def __str__(self):
fmt = (
'Machine parameters for %(dtype)s\n'
'---------------------------------------------------------------\n'
'precision = %(precision)3s resolution = %(_str_resolution)s\n'
'machep = %(machep)6s eps = %(_str_eps)s\n'
'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
'maxexp = %(maxexp)6s max = %(_str_max)s\n'
'nexp = %(nexp)6s min = -max\n'
'---------------------------------------------------------------\n'
)
return fmt % self.__dict__
def __repr__(self):
c = self.__class__.__name__
d = self.__dict__.copy()
d['klass'] = c
return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
@set_module('numpy')
class iinfo:
"""
iinfo(type)
Machine limits for integer types.
Attributes
----------
bits : int
The number of bits occupied by the type.
min : int
The smallest integer expressible by the type.
max : int
The largest integer expressible by the type.
Parameters
----------
int_type : integer type, dtype, or instance
The kind of integer data type to get information about.
See Also
--------
finfo : The equivalent for floating point data types.
Examples
--------
With types:
>>> ii16 = np.iinfo(np.int16)
>>> ii16.min
-32768
>>> ii16.max
32767
>>> ii32 = np.iinfo(np.int32)
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
With instances:
>>> ii32 = np.iinfo(np.int32(10))
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
"""
_min_vals = {}
_max_vals = {}
def __init__(self, int_type):
try:
self.dtype = numeric.dtype(int_type)
except TypeError:
self.dtype = numeric.dtype(type(int_type))
self.kind = self.dtype.kind
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
if self.kind not in 'iu':
raise ValueError("Invalid integer data type %r." % (self.kind,))
@property
def min(self):
"""Minimum value of given dtype."""
if self.kind == 'u':
return 0
else:
try:
val = iinfo._min_vals[self.key]
except KeyError:
val = int(-(1 << (self.bits-1)))
iinfo._min_vals[self.key] = val
return val
@property
def max(self):
"""Maximum value of given dtype."""
try:
val = iinfo._max_vals[self.key]
except KeyError:
if self.kind == 'u':
val = int((1 << self.bits) - 1)
else:
val = int((1 << (self.bits-1)) - 1)
iinfo._max_vals[self.key] = val
return val
def __str__(self):
"""String representation."""
fmt = (
'Machine parameters for %(dtype)s\n'
'---------------------------------------------------------------\n'
'min = %(min)s\n'
'max = %(max)s\n'
'---------------------------------------------------------------\n'
)
return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
def __repr__(self):
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
self.min, self.max, self.dtype)
|
WarrenWeckesser/numpy
|
numpy/core/getlimits.py
|
Python
|
bsd-3-clause
| 19,076
|
import pdb,sys
from threading import Thread
from django.contrib import messages
from django.shortcuts import render
#from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
from django.views.generic.base import View
# Create your views here.
from .email import send_contact_message
from .forms import ContactForm
from .models import ContactMessage
class ContactFormTagView(TemplateView):
template_name = "tag-contact-form-ajax.html"
def post(self, request):
#pdb.set_trace()
success='false'
form = ContactForm(request.POST)
form_valid = form.is_valid()
cleaned_data = form.clean()
if form_valid:
try:
form.save()
thread = Thread(target=send_contact_message, args=(form.instance,))
thread.start()
form = ContactForm()
success = 'true'
messages.success(request,_("Thank you!! We can't wait to read it!"))
except:
messages.error(request,_('Sorry! We could not send your email.'))
form_errors = form.errors.as_json()
form.anti_spam()
data_context = {'success':success,'form_errors':form_errors}
return render(request, self.template_name, data_context, content_type='application/json')
class ContactFormView(View):
template_name = 'contact-form.html'
def get(self, request):
form = ContactForm()
form.anti_spam()
data_context = {'form':form}
return render(request, self.template_name, data_context)
def post(self, request):
form = ContactForm(request.POST)
form_valid = form.is_valid()
cleaned_data = form.clean()
if form_valid:
try:
form.save()
thread = Thread(target=send_contact_message, args=(form.instance,))
thread.start()
form = ContactForm()
messages.success(self.request,_("Thank you!! We can't wait to read it!"))
except:
#pdb.set_trace()
messages.error(self.request,_('Sorry! We could not send your email.'))
form.anti_spam()
data_context = {'form':form}
return render(request, self.template_name, data_context)
|
nirvaris/nirvaris-contactform
|
contactform/views.py
|
Python
|
mit
| 2,389
|
u={"1":"1","2":"2"}
u=u.get("1")
g=u["1"]
print(g)
print(u)
|
xiaoyongaa/ALL
|
网络编程第四周/11.py
|
Python
|
apache-2.0
| 60
|
'''Python 3 串列資料(List)的應用
1. 了解何謂串列資料 (List)
[1, 2, 3] 以中括號圈起, 逗點隔開的群組資料, 就是串列資料
[1, 2, 3, "資料"] 也是串列資料
若 變數 = [1, 2, 3] 則 變數[0] = 1, 變數[1] = 2,
表示串列的序號由 0 開始
串列資料的次序可以重新排列, 也可以排序.
>>> sorted([3, 1, 2])
[1, 2, 3]
也可以反向排序
>>> sorted([3, 1, 2], reverse=True)
[3, 2, 1]
有關串列 (List) 的用法, 請參照 python3教材_2.pdf,
第 4 頁~第 11 頁, 以及第 27 頁~ 30 頁的說明.
教材位置: https://sites.google.com/a/mde.tw/service/2012_fall_c1
2. 了解何謂元組資料 (Tuple)
(1, 2, 3) 以小括號圈起, 逗點隔開的群組資料,
就是元組資料.
(1, 2, “資料”) 也是元組資料
若 變數 = (1, 2, 3) 則 變數[0] = 1, 變數[1] = 2,
表示元組的序號也由 0 開始
元組資料之次序不可重新排列, 因此也無法進行排序
(亦即, 無法使用 變數.sort(), 但是可以利用 sorted(變數)
生成另外一個排序的串列), 但是可以經過 list() 函式
將元組資料轉為串列資料, 就可以進行排序:
'''
a = [3, 2, 4, 5, 1]
# 數列排序
b = sorted(a)
# 反向排序
c = sorted(a, reverse=True)
print(b, c)
元組 = (1, 2, 3)
串列 = list(元組)
# 元組必須利用 list() 轉為串列資料後, 才可以排序, 因為元祖資料中的索引是不可變更次序的
print(sorted(串列))
print(sorted(串列, reverse=True))
# 課程資料為串列資料, 其元件則為元組資料, 串列可排序, 元組則次序固定
課程資料 =[(2,1,"1,2 課程"),(1,2,"2,1 課程"),(3,3,"3,3 課程")]
print(sorted(課程資料, key=lambda 課程: 課程[0], reverse=True))
print(sorted(課程資料, key=lambda 課程: 課程[1], reverse=True))
print(sorted(課程資料, key=lambda 課程: 課程[2], reverse=True))
'''
上列程式採用 lambda 函式, 所謂 lambda 函式,
指沒有給定名稱的函式 (亦即在執行階段,
並沒有指向某一特定函式名稱的函式),
而 lambda 即此種無名稱函式的建構子.
其中的 key=lambda 課程: 課程[0],
表示排序用的 key 變數, 指定為 lambda 類別的無名函式,
其輸入為”課程”而傳回值為”課程[0]“, 也就是說,
該行課程資料的排序所指定的排序依據 (即 key)
指定為課程資料元素中序號為 0 的”列”資料.
換言之, sorted(課程資料, key=lambda 課程:
課程[0]) 就會以列資料的次序進行排序.
'''
|
2014c2g5/2014cadp
|
wsgi/local_data/brython_programs/sorted1.py
|
Python
|
gpl-3.0
| 2,520
|
from django.utils.encoding import smart_str
try:
from django.utils.encoding import force_text as force_unicode
except ImportError:
from django.utils.encoding import force_unicode
try:
from django.utils.encoding import smart_text as smart_unicode
except ImportError:
try:
from django.utils.encoding import smart_unicode
except ImportError:
from django.forms.util import smart_unicode
from django.forms.forms import pretty_name
from django.db.models.fields import FieldDoesNotExist
from django.utils import formats
from mongoengine import fields
from mongodbforms.util import init_document_options
import collections
class RelationWrapper(object):
"""
Wraps a document referenced from a ReferenceField with an Interface similiar to
django's ForeignKeyField.rel
"""
def __init__(self, document):
self.to = init_document_options(document)
def is_django_user_model(user):
"""
Checks if a user model is compatible with Django's
recent changes. Django requires User models to have
an int pk, so we check here if it has (mongoengine hasn't)
"""
try:
if hasattr(user, 'pk'):
int(user.pk)
else:
int(user)
except (ValueError, TypeError):
return False
return True
def label_for_field(name, model, model_admin=None, return_attr=False):
attr = None
model._meta = init_document_options(model)
try:
field = model._meta.get_field_by_name(name)[0]
label = field.name.replace('_', ' ')
except FieldDoesNotExist:
if name == "__unicode__":
label = force_unicode(model._meta.verbose_name)
elif name == "__str__":
label = smart_str(model._meta.verbose_name)
else:
if isinstance(name, collections.Callable):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif isinstance(attr, collections.Callable):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, fields.BooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, fields.DateTimeField):
return formats.localize(value)
elif isinstance(field, fields.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, fields.FloatField):
return formats.number_format(value)
else:
return smart_unicode(value)
|
asm-products/movie-database-service
|
mongoadmin/util.py
|
Python
|
agpl-3.0
| 3,673
|
__author__ = 'Luke'
|
lukeroge/multithreaded_uuid_converter
|
util/__init__.py
|
Python
|
cc0-1.0
| 20
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-09-27 21:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0013_board_locked'),
]
operations = [
# Removed to prevent crashing when migrating
# migrations.AddField(
# model_name='thread',
# name='originalPost',
# field=models.IntegerField(default=False),
# ),
# migrations.AddField(
# model_name='thread',
# name='pinned',
# field=models.BooleanField(default=False),
# ),
]
|
WarwickAnimeSoc/aniMango
|
forum/migrations/0014_auto_20180927_2204.py
|
Python
|
mit
| 670
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
from oslo_utils import timeutils
import webob.exc
from cinder.api.contrib import hosts as os_hosts
from cinder import context
from cinder import db
from cinder import test
created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
curr_time = datetime.datetime(2013, 7, 3, 0, 0, 1)
SERVICE_LIST = [
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'},
{'created_at': created_time, 'updated_at': curr_time,
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
'availability_zone': 'cinder'}]
LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time},
{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time},
{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time},
{'service-status': 'available', 'service': 'cinder-volume',
'zone': 'cinder', 'service-state': 'enabled',
'host_name': 'test.host.1', 'last-update': curr_time}]
def stub_utcnow():
return datetime.datetime(2013, 7, 3, 0, 0, 2)
def stub_service_get_all(self, req):
return SERVICE_LIST
class FakeRequest(object):
environ = {'cinder.context': context.get_admin_context()}
GET = {}
class FakeRequestWithcinderZone(object):
environ = {'cinder.context': context.get_admin_context()}
GET = {'zone': 'cinder'}
class HostTestCase(test.TestCase):
"""Test Case for hosts."""
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
self.req = FakeRequest()
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
self.stubs.Set(timeutils, 'utcnow', stub_utcnow)
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
result = self.controller.update(self.req, host, body=body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the volume hosts are returned."""
hosts = os_hosts._list_hosts(self.req)
self.assertEqual(hosts, LIST_RESPONSE)
cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume')
expected = [host for host in LIST_RESPONSE
if host['service'] == 'cinder-volume']
self.assertEqual(cinder_hosts, expected)
def test_list_hosts_with_zone(self):
req = FakeRequestWithcinderZone()
hosts = os_hosts._list_hosts(req)
self.assertEqual(hosts, LIST_RESPONSE)
def test_bad_status_value(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body={'status': 'bad'})
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
self.req,
'test.host.1',
body={'status': 'disablabc'})
def test_bad_update_key(self):
bad_body = {'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body=bad_body)
def test_bad_update_key_and_correct_udpate_key(self):
bad_body = {'status': 'disable', 'crazy': 'bad'}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'test.host.1', body=bad_body)
def test_good_udpate_keys(self):
body = {'status': 'disable'}
self.assertRaises(NotImplementedError, self.controller.update,
self.req, 'test.host.1', body=body)
def test_bad_host(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
'bogus_host_name',
body={'disabled': 0})
def test_show_forbidden(self):
self.req.environ['cinder.context'].is_admin = False
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.show,
self.req, dest)
self.req.environ['cinder.context'].is_admin = True
def test_show_host_not_exist(self):
"""A host given as an argument does not exists."""
self.req.environ['cinder.context'].is_admin = True
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
self.req, dest)
class HostSerializerTest(test.TestCase):
def setUp(self):
super(HostSerializerTest, self).setUp()
self.deserializer = os_hosts.HostDeserializer()
def test_index_serializer(self):
serializer = os_hosts.HostIndexTemplate()
text = serializer.serialize({"hosts": LIST_RESPONSE})
tree = etree.fromstring(text)
self.assertEqual('hosts', tree.tag)
self.assertEqual(len(LIST_RESPONSE), len(tree))
for i in range(len(LIST_RESPONSE)):
self.assertEqual('host', tree[i].tag)
self.assertEqual(LIST_RESPONSE[i]['service-status'],
tree[i].get('service-status'))
self.assertEqual(LIST_RESPONSE[i]['service'],
tree[i].get('service'))
self.assertEqual(LIST_RESPONSE[i]['zone'],
tree[i].get('zone'))
self.assertEqual(LIST_RESPONSE[i]['service-state'],
tree[i].get('service-state'))
self.assertEqual(LIST_RESPONSE[i]['host_name'],
tree[i].get('host_name'))
self.assertEqual(str(LIST_RESPONSE[i]['last-update']),
tree[i].get('last-update'))
def test_update_serializer_with_status(self):
exemplar = dict(host='test.host.1', status='enabled')
serializer = os_hosts.HostUpdateTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('host', tree.tag)
for key, value in exemplar.items():
self.assertEqual(value, tree.get(key))
def test_update_deserializer(self):
exemplar = dict(status='enabled', foo='bar')
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<updates><status>enabled</status><foo>bar</foo></updates>')
result = self.deserializer.deserialize(intext)
self.assertEqual(dict(body=exemplar), result)
|
JioCloud/cinder
|
cinder/tests/unit/api/contrib/test_hosts.py
|
Python
|
apache-2.0
| 8,038
|
DATASET_DIR = './voxceleb'
AUDIO_DIR = './voxceleb/voxceleb1_wav'
NUM_PREVIOUS_FRAME = 9
#NUM_PREVIOUS_FRAME = 13
NUM_NEXT_FRAME = 23
NUM_FRAMES = NUM_PREVIOUS_FRAME + NUM_NEXT_FRAME
USE_LOGSCALE = True
USE_DELTA = False
USE_SCALE = False
SAMPLE_RATE = 16000
TRUNCATE_SOUND_FIRST_SECONDS = 0.5
FILTER_BANK = 64
|
qqueing/DeepSpeaker-pytorch
|
constants.py
|
Python
|
mit
| 316
|
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script runs the LLVM regression tests and the LLVM testsuite.
These tests are tightly coupled to the LLVM build, and require that
LLVM has been built on this host by build.sh. It also assumes that
the test suite source has been checked out using gclient (build.sh
git-sync).
The testsuite must be configured, then run, then reported.
Currently it requires clean in between runs of different arches.
The regression tests require nothing more than running 'make check'
in the build directory, but currently not all of the upstream tests
pass in our source tree, so we currently use the same
known-failures mechanism that the testsuite uses. Once we eliminate
the locally-caused failures, we should expect 'make check' to
always pass and can get rid of the regression known failures.
"""
import contextlib
import datetime
import os
import optparse
import shutil
import subprocess
import sys
import parse_llvm_test_report
@contextlib.contextmanager
def remember_cwd():
"""Provides a shell 'pushd'/'popd' pattern.
Use as:
with remember_cwd():
os.chdir(...)
...
# Original cwd restored here
"""
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
def ParseCommandLine(argv):
usage = """%prog [options]
Specify the tests or test subsets in the options; common tests are
--llvm-regression and --testsuite-all.
The --opt arguments control the frontend/backend optimization flags.
The default set is {O3f,O2b}, other options are {O0f,O0b}.
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('--arch', dest='arch',
help=('Architecture to test, e.g. x86-32, x86-64, arm; ' +
'required for most tests'))
parser.add_option('--opt', dest='opt_attributes', action='append',
default=[],
help=('Add optimization level attribute of ' +
'test configuration'))
parser.add_option('--llvm-regression', dest='run_llvm_regression',
action='store_true', default=False,
help='Run the LLVM regression tests')
parser.add_option('--libcxx-tests', dest='run_libcxx_tests',
action='store_true', default=False,
help='Run the libc++ tests')
parser.add_option('--testsuite-clean', dest='testsuite_clean',
action='store_true', default=False,
help='Clean the testsuite build directory')
parser.add_option('--testsuite-prereq', dest='testsuite_prereq',
action='store_true', default=False,
help='Build the testsuite prerequisites')
parser.add_option('--testsuite-configure', dest='testsuite_configure',
action='store_true', default=False,
help='Configure the testsuite build directory')
parser.add_option('--testsuite-run', dest='testsuite_run',
action='store_true', default=False,
help='Run the testsuite (requires <arch> argument)')
parser.add_option('--testsuite-report', dest='testsuite_report',
action='store_true', default=False,
help=('Generate the testsuite report ' +
'(requires <arch> argument)'))
parser.add_option('--testsuite-all', dest='testsuite_all',
action='store_true', default=False,
help='Run all testsuite steps (requires <arch> argument)')
# The following options are specific to parse_llvm_test_report.
parser.add_option('-x', '--exclude', action='append', dest='excludes',
default=[],
help=('[--testsuite-report option] ' +
'Add list of excluded tests (expected fails)'))
parser.add_option('-c', '--check-excludes', action='store_true',
default=False, dest='check_excludes',
help=('[--testsuite-report option] ' +
'Report tests which unexpectedly pass'))
parser.add_option('-v', '--verbose', action='store_true',
default=False, dest='verbose',
help=('[--testsuite-report option] ' +
'Print compilation/run logs of failing tests'))
parser.add_option('-p', '--build-path', dest='buildpath',
help=('[--testsuite-report option] ' +
'Path to test-suite build directory'))
parser.add_option('-a', '--attribute', dest='attributes', action='append',
default=[],
help=('[--testsuite-report option] ' +
'Add attribute of test configuration (e.g. arch)'))
parser.add_option('-t', '--testsuite', action='store_true', dest='testsuite',
default=False,
help=('[--testsuite-report option] ' +
'Signify LLVM testsuite tests'))
parser.add_option('-l', '--lit', action='store_true', dest='lit',
default=False,
help=('[--testsuite-report option] ' +
'Signify LLVM LIT regression tests'))
options, args = parser.parse_args(argv)
return options, args
def Fatal(text):
"""Prints an error message and exits."""
print >> sys.stderr, text
sys.exit(1)
def ParseConfig(options):
"""Constructs a frontend/backend dict based on --opt arguments.
Args:
options: The result of OptionParser().parse_args().
Returns:
A simple dict containing keys 'frontend_opt', 'frontend_attr',
'backend_opt', and 'backend_attr', each mapped to a valid string
value. The result is a function of the --opt command-line
arguments, with defaults in place when there are too few --opt
arguments.
"""
configs = dict(O0f={'frontend_opt': '-O0', 'frontend_attr': 'O0f'},
O3f={'frontend_opt': '-O3', 'frontend_attr': 'O3f'},
O0b={'backend_opt': '-translate-fast',
'backend_attr': 'O0b'},
O2b={'backend_opt': '-O2', 'backend_attr': 'O2b'})
result = {}
# Default is pnacl-clang -O3, pnacl-translate -O2
for attr in ['O3f', 'O2b'] + options.opt_attributes:
if attr in configs:
result.update(configs[attr])
return result
def GetConfigSuffix(config):
"""Create a string to be used as a file suffix.
Args:
config: A dict that was the result of ParseConfig().
Returns:
A string that concatenates the frontend and backend attributes.
"""
return config['frontend_attr'] + '_' + config['backend_attr']
def SetupEnvironment():
"""Create an environment.
This is based on the current system, various defaults, and various
environment variables.
Returns:
A dict with various string->string mappings.
"""
env = {}
pwd = os.getcwd()
if not pwd.endswith('/native_client'):
Fatal("ERROR: must be run in native_client/ directory!\n" +
" (Current directory is " + pwd + ")")
# Simulate what's needed from common-tools.sh.
# We need PNACL_BUILDBOT, BUILD_PLATFORM, and HOST_ARCH.
env['PNACL_BUILDBOT'] = os.environ.get('PNACL_BUILDBOT', 'false')
if sys.platform == 'linux2':
env['BUILD_PLATFORM'] = 'linux'
env['BUILD_ARCH'] = os.environ.get('BUILD_ARCH', os.uname()[4])
env['HOST_ARCH'] = os.environ.get('HOST_ARCH', env['BUILD_ARCH'])
elif sys.platform == 'cygwin':
env['BUILD_PLATFORM'] = 'win'
env['HOST_ARCH'] = os.environ.get('HOST_ARCH', 'x86_32')
elif sys.platform == 'darwin':
env['BUILD_PLATFORM'] = 'mac'
env['HOST_ARCH'] = os.environ.get('HOST_ARCH', 'x86_64')
else:
Fatal("Unknown system " + sys.platform)
if env['HOST_ARCH'] in ['i386', 'i686']:
env['HOST_ARCH'] = 'x86_32'
# Set up the rest of the environment.
env['NACL_ROOT'] = pwd
env['LLVM_TESTSUITE_SRC'] = (
'{NACL_ROOT}/pnacl/git/llvm-test-suite'.format(**env))
env['LLVM_TESTSUITE_BUILD'] = (
'{NACL_ROOT}/pnacl/build/llvm-test-suite'.format(**env))
env['TC_SRC_LLVM'] = (
'{NACL_ROOT}/pnacl/git/llvm'.format(**env))
env['TC_BUILD_LLVM'] = (
'{NACL_ROOT}/pnacl/build/llvm_{HOST_ARCH}'.format(**env))
env['TC_BUILD_LIBCXX'] = (
('{NACL_ROOT}/pnacl/build/' +
'c++-stdlib-newlib-portable-libc++/pnacl-target').format(**env))
env['PNACL_CONCURRENCY'] = os.environ.get('PNACL_CONCURRENCY', '8')
# The toolchain used may not be the one downloaded, but one that is freshly
# built into a different directory,
# Overriding the default here will Not affect the sel_ldr
# and IRT used to run the tests (they are controlled by run.py)
env['PNACL_TOOLCHAIN_LABEL'] = (
os.environ.get('PNACL_TOOLCHAIN_LABEL',
'pnacl_{BUILD_PLATFORM}_x86'.format(**env)))
env['PNACL_BIN'] = (
'{NACL_ROOT}/toolchain/{PNACL_TOOLCHAIN_LABEL}/bin'.format(**env))
env['PNACL_SDK_DIR'] = (
'{NACL_ROOT}/toolchain/{PNACL_TOOLCHAIN_LABEL}/sdk/lib'
.format(**env))
env['PNACL_SCRIPTS'] = '{NACL_ROOT}/pnacl/scripts'.format(**env)
env['LIT_KNOWN_FAILURES'] = ('{pwd}/pnacl/scripts/lit_known_failures.txt'
.format(pwd=pwd))
return env
def RunLitTest(testdir, testarg, env, options):
"""Run LLVM lit tests, and check failures against known failures.
Args:
testdir: Directory with the make/ninja file to test.
testarg: argument to pass to make/ninja.
env: The result of SetupEnvironment().
options: The result of OptionParser().parse_args().
Returns:
0 always
"""
with remember_cwd():
os.chdir(testdir)
maker = 'ninja' if os.path.isfile('./build.ninja') else 'make'
cmd = [maker, testarg, '-v' if maker == 'ninja' else 'VERBOSE=1']
make_pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
lines = []
# When run by a buildbot, we need to incrementally tee the 'make'
# stdout to our stdout, rather than collect its entire stdout and
# print it at the end. Otherwise the watchdog may try to kill the
# process after too long without any output.
#
# Note: We use readline instead of 'for line in make_pipe.stdout'
# because otherwise the process on the Cygwin bot seems to hang
# when the 'make' process completes (with slightly truncated
# output). The readline avoids buffering when reading from a
# pipe in Python 2, which may be complicit in the problem.
for line in iter(make_pipe.stdout.readline, ''):
if env['PNACL_BUILDBOT'] != 'false':
# The buildbots need to be fully verbose and print all output.
print str(datetime.datetime.now()) + ' ' + line,
lines.append(line)
print (str(datetime.datetime.now()) + ' ' +
"Waiting for '%s' to complete." % cmd)
make_pipe.wait()
make_stdout = ''.join(lines)
parse_options = vars(options)
parse_options['lit'] = True
parse_options['excludes'].append(env['LIT_KNOWN_FAILURES'])
parse_options['attributes'].append(env['BUILD_PLATFORM'])
print (str(datetime.datetime.now()) + ' ' +
'Parsing LIT test report output.')
parse_llvm_test_report.Report(parse_options, filecontents=make_stdout)
return 0
def EnsureSdkExists(env):
"""Ensure that the SDK directory exists. Exits if not.
Args:
env: The result of SetupEnvironment().
"""
if not os.path.isdir(env['PNACL_SDK_DIR']):
Fatal("""
ERROR: sdk dir does not seem to exist
ERROR: have you run 'pnacl/build.sh sdk newlib' ?
""")
def TestsuitePrereq(env, options):
"""Run the LLVM test suite prerequisites.
Args:
env: The result of SetupEnvironment().
options: The result of OptionParser().parse_args().
Returns:
0 for success, non-zero integer on failure.
"""
arch = options.arch or Fatal("Error: missing --arch argument")
return subprocess.call(['./scons',
'platform=' + arch,
'irt_core',
'sel_ldr',
'-j{PNACL_CONCURRENCY}'.format(**env)])
def TestsuiteRun(env, config, options):
"""Run the LLVM test suite.
Args:
env: The result of SetupEnvironment().
config: A dict that was the result of ParseConfig(). This
determines the specific optimization levels.
options: The result of OptionParser().parse_args().
Returns:
0 for success, non-zero integer on failure.
"""
arch = options.arch or Fatal("Error: missing --arch argument")
EnsureSdkExists(env)
suffix = GetConfigSuffix(config)
opt_clang = config['frontend_opt']
opt_trans = config['backend_opt']
build_path = env['LLVM_TESTSUITE_BUILD']
if not os.path.isdir(build_path):
os.makedirs(build_path)
with remember_cwd():
os.chdir(build_path)
if not os.path.exists('Makefile'):
result = TestsuiteConfigure(env)
if result:
return result
result = subprocess.call(['make',
'-j{PNACL_CONCURRENCY}'.format(**env),
'OPTFLAGS=' + opt_clang,
'PNACL_TRANSLATE_FLAGS=' + opt_trans,
'PNACL_BIN={PNACL_BIN}'.format(**env),
'PNACL_RUN={NACL_ROOT}/run.py'.format(**env),
'COLLATE=true',
'PNACL_ARCH=' + arch,
'ENABLE_PARALLEL_REPORT=true',
'DISABLE_CBE=true',
'DISABLE_JIT=true',
'RUNTIMELIMIT=850',
'TEST=pnacl',
'report.csv'])
if result:
return result
os.rename('report.pnacl.csv', 'report.pnacl.{arch}.{suffix}.csv'
.format(arch=arch, suffix=suffix))
os.rename('report.pnacl.raw.out',
('report.pnacl.{arch}.{suffix}.raw.out'
.format(arch=arch, suffix=suffix)))
return 0
def TestsuiteConfigure(env):
"""Run the LLVM test suite configure script.
Args:
env: The result of SetupEnvironment().
Returns:
0 for success, non-zero integer on failure.
"""
build_path = env['LLVM_TESTSUITE_BUILD']
if not os.path.isdir(build_path):
os.makedirs(build_path)
with remember_cwd():
os.chdir(build_path)
args = ['{LLVM_TESTSUITE_SRC}/configure'.format(**env),
'--with-llvmcc=clang',
'--with-clang={PNACL_BIN}/pnacl-clang'.format(**env),
'--with-llvmsrc={TC_SRC_LLVM}'.format(**env),
'--with-llvmobj={TC_BUILD_LLVM}'.format(**env)]
result = subprocess.call(args)
return result
def TestsuiteClean(env):
"""Clean the LLVM test suite build directory.
Args:
env: The result of SetupEnvironment().
Returns:
0 always
Raises:
OSError: The LLVM_TESTSUITE_BUILD directory couldn't be removed
for some reason.
"""
if os.path.isdir(env['LLVM_TESTSUITE_BUILD']):
shutil.rmtree(env['LLVM_TESTSUITE_BUILD'])
elif os.path.isfile(env['LLVM_TESTSUITE_BUILD']):
os.remove(env['LLVM_TESTSUITE_BUILD'])
return 0
def TestsuiteReport(env, config, options):
"""Generate a report from the prior LLVM test suite run.
Args:
env: The result of SetupEnvironment().
config: A dict that was the result of ParseConfig(). This
determines the specific optimization levels.
options: The result of OptionParser().parse_args().
Returns:
0 for success, non-zero integer on failure.
"""
arch = options.arch or Fatal("Error: missing --arch argument")
suffix = GetConfigSuffix(config)
report_file = ('{LLVM_TESTSUITE_BUILD}/report.pnacl.{arch}.{suffix}.csv'
.format(arch=arch, suffix=suffix, **env))
failures1 = '{PNACL_SCRIPTS}/testsuite_known_failures_base.txt'.format(**env)
failures2 = '{PNACL_SCRIPTS}/testsuite_known_failures_pnacl.txt'.format(**env)
parse_options = vars(options)
parse_options['excludes'].extend([failures1, failures2])
parse_options['buildpath'] = env['LLVM_TESTSUITE_BUILD']
parse_options['attributes'].extend([arch,
config['frontend_attr'],
config['backend_attr']])
parse_options['testsuite'] = True
return parse_llvm_test_report.Report(parse_options, filename=report_file)
def main(argv):
env = SetupEnvironment()
options, args = ParseCommandLine(argv[1:])
if len(args):
Fatal("Unknown arguments: " + ', '.join(args))
config = ParseConfig(options)
result = 0
# Run each specified test in sequence, and return on the first failure.
if options.run_llvm_regression:
result = result or RunLitTest(env['TC_BUILD_LLVM'], 'check-all',
env, options)
if options.run_libcxx_tests:
result = result or RunLitTest(env['TC_BUILD_LIBCXX'], 'check-libcxx',
env, options)
if options.testsuite_all or options.testsuite_prereq:
result = result or TestsuitePrereq(env, options)
if options.testsuite_all or options.testsuite_clean:
result = result or TestsuiteClean(env)
if options.testsuite_all or options.testsuite_configure:
result = result or TestsuiteConfigure(env)
if options.testsuite_all or options.testsuite_run:
result = result or TestsuiteRun(env, config, options)
if options.testsuite_all or options.testsuite_report:
result = result or TestsuiteReport(env, config, options)
return result
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
cvsuser-chromium/native_client
|
pnacl/scripts/llvm-test.py
|
Python
|
bsd-3-clause
| 17,712
|
"""Support for Xiaomi curtain."""
import logging
from homeassistant.components.cover import ATTR_POSITION, CoverDevice
from . import PY_XIAOMI_GATEWAY, XiaomiDevice
_LOGGER = logging.getLogger(__name__)
ATTR_CURTAIN_LEVEL = "curtain_level"
DATA_KEY_PROTO_V1 = "status"
DATA_KEY_PROTO_V2 = "curtain_status"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Perform the setup for Xiaomi devices."""
devices = []
for (_, gateway) in hass.data[PY_XIAOMI_GATEWAY].gateways.items():
for device in gateway.devices["cover"]:
model = device["model"]
if model == "curtain":
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = DATA_KEY_PROTO_V1
else:
data_key = DATA_KEY_PROTO_V2
devices.append(XiaomiGenericCover(device, "Curtain", data_key, gateway))
add_entities(devices)
class XiaomiGenericCover(XiaomiDevice, CoverDevice):
"""Representation of a XiaomiGenericCover."""
def __init__(self, device, name, data_key, xiaomi_hub):
"""Initialize the XiaomiGenericCover."""
self._data_key = data_key
self._pos = 0
XiaomiDevice.__init__(self, device, name, xiaomi_hub)
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._pos
@property
def is_closed(self):
"""Return if the cover is closed."""
return self.current_cover_position <= 0
def close_cover(self, **kwargs):
"""Close the cover."""
self._write_to_hub(self._sid, **{self._data_key: "close"})
def open_cover(self, **kwargs):
"""Open the cover."""
self._write_to_hub(self._sid, **{self._data_key: "open"})
def stop_cover(self, **kwargs):
"""Stop the cover."""
self._write_to_hub(self._sid, **{self._data_key: "stop"})
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
position = kwargs.get(ATTR_POSITION)
if self._data_key == DATA_KEY_PROTO_V2:
self._write_to_hub(self._sid, **{ATTR_CURTAIN_LEVEL: position})
else:
self._write_to_hub(self._sid, **{ATTR_CURTAIN_LEVEL: str(position)})
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if ATTR_CURTAIN_LEVEL in data:
self._pos = int(data[ATTR_CURTAIN_LEVEL])
return True
return False
|
fbradyirl/home-assistant
|
homeassistant/components/xiaomi_aqara/cover.py
|
Python
|
apache-2.0
| 2,535
|
#!/usr/bin/env python
from setuptools import setup
entry_points = {'console_scripts': [
'vst-pawplot = surveytools.quicklook:vst_pawplot_main',
'vphas-quicklook = surveytools.quicklook:vphas_quicklook_main',
'vphas-filenames = surveytools.footprint:vphas_filenames_main',
'vphas-offset-catalogue = surveytools.catalogue:vphas_offset_catalogue_main',
'vphas-index-offset-catalogues = surveytools.catalogue:vphas_index_offset_catalogues_main',
'vphas-tile-merge = surveytools.tiling:vphas_tile_merge_main'
]}
setup(name='surveytools',
version='0.1',
description='Tools for the VPHAS+ astronomy survey.',
author='Geert Barentsen',
license='MIT',
url='http://www.vphas.eu',
packages=['surveytools'],
install_requires=['numpy',
'matplotlib',
'imageio>=1.0',
'astropy',
'photutils',
'pyraf'],
entry_points=entry_points,
)
|
barentsen/surveytools
|
setup.py
|
Python
|
mit
| 1,016
|
# noinspection PyPackageRequirements
import wx
import gui.globalEvents as GE
import gui.mainFrame
from gui.contextMenu import ContextMenuUnconditional
from service.settings import GraphSettings
_t = wx.GetTranslation
class GraphDmgIgnoreResistsMenu(ContextMenuUnconditional):
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
self.settings = GraphSettings.getInstance()
def display(self, callingWindow, srcContext):
return srcContext == 'dmgStatsGraph'
def getText(self, callingWindow, itmContext):
return _t('Ignore Target Resists')
def activate(self, callingWindow, fullContext, i):
self.settings.set('ignoreResists', not self.settings.get('ignoreResists'))
wx.PostEvent(self.mainFrame, GE.GraphOptionChanged(refreshAxeLabels=True, refreshColumns=True))
def isChecked(self, i):
return self.settings.get('ignoreResists')
GraphDmgIgnoreResistsMenu.register()
|
pyfa-org/Pyfa
|
gui/builtinContextMenus/graphDmgIgnoreResists.py
|
Python
|
gpl-3.0
| 972
|
import os
import sys
import unittest
from test.support import run_unittest, import_module
# Skip tests if we don't have threading.
import_module('threading')
# Skip tests if we don't have concurrent.futures.
import_module('concurrent.futures')
def suite():
tests = unittest.TestSuite()
loader = unittest.TestLoader()
for fn in os.listdir(os.path.dirname(__file__)):
if fn.startswith("test") and fn.endswith(".py"):
mod_name = 'test.test_asyncio.' + fn[:-3]
try:
__import__(mod_name)
except unittest.SkipTest:
pass
else:
mod = sys.modules[mod_name]
tests.addTests(loader.loadTestsFromModule(mod))
return tests
def test_main():
run_unittest(suite())
|
PennartLoettring/Poettrix
|
rootfs/usr/lib/python3.4/test/test_asyncio/__init__.py
|
Python
|
gpl-2.0
| 793
|
# Copyright (c) 2019 Red Hat, Inc.
#
# This file is part of ARA Records Ansible.
#
# ARA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ARA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ARA. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os
from distutils.sysconfig import get_python_lib
from . import action_plugins, callback_plugins, lookup_plugins
exports = """
export ANSIBLE_CALLBACK_PLUGINS=${{ANSIBLE_CALLBACK_PLUGINS:-}}${{ANSIBLE_CALLBACK_PLUGINS+:}}{}
export ANSIBLE_ACTION_PLUGINS=${{ANSIBLE_ACTION_PLUGINS:-}}${{ANSIBLE_ACTION_PLUGINS+:}}{}
export ANSIBLE_LOOKUP_PLUGINS=${{ANSIBLE_LOOKUP_PLUGINS:-}}${{ANSIBLE_LOOKUP_PLUGINS+:}}{}
""".format(
callback_plugins, action_plugins, lookup_plugins
)
if "VIRTUAL_ENV" in os.environ:
""" PYTHONPATH may be exported when 'ara' module is installed in a
virtualenv and ansible is installed on system python to avoid ansible
failure to find ara module.
"""
# inspired by https://stackoverflow.com/a/122340/99834
lib = get_python_lib()
if "PYTHONPATH" in os.environ:
python_paths = os.environ["PYTHONPATH"].split(os.pathsep)
else:
python_paths = []
if lib not in python_paths:
python_paths.append(lib)
exports += "export PYTHONPATH=${PYTHONPATH:-}${PYTHONPATH+:}%s\n" % os.pathsep.join(python_paths)
if __name__ == "__main__":
print(exports.strip())
|
dmsimard/ara
|
ara/setup/env.py
|
Python
|
gpl-3.0
| 1,891
|
# -*- coding: utf-8 -*-
#
# Codimension - Python 3 experimental IDE
# Copyright (C) 2010-2017 Sergey Satskiy <sergey.satskiy@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Setup script for the Codimension IDE"""
import os.path
import sys
import io
from setuptools import setup
def getVersion():
"""The version is coming from a file in the source tree"""
verFileName = 'codimension/cdmverspec.py'
if not os.path.exists(verFileName):
print('Cannot find the IDE version file. Expected here: ' +
verFileName, file=sys.stderr)
sys.exit(1)
with open(verFileName) as version_file:
for line in version_file:
line = line.strip()
if line.startswith('version'):
return line.split('=')[1].strip()[1:-1]
print('Cannot find a version line in the ' + verFileName,
file=sys.stderr)
sys.exit(1)
def getDescription():
"""Provides a short description"""
return 'Experimental Python 3 IDE which aims at both textual and ' \
'graphical representation of a code. The graphics is ' \
'automatically re-generated while the code is typed'
def getLongDescription():
"""Provides the long description"""
try:
import pypandoc
converted = pypandoc.convert('README.md', 'rst').splitlines()
no_travis = [line for line in converted if 'travis-ci.org' not in line]
long_description = '\n'.join(no_travis)
# Pypi index does not like this link
long_description = long_description.replace('|Build Status|', '')
except Exception as exc:
print('pypandoc package is not installed: the markdown '
'README.md convertion to rst failed: ' + str(exc), file=sys.stderr)
# pandoc is not installed, fallback to using raw contents
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
return long_description
def getRequirements():
"""Provides the requirements list"""
if not os.path.exists('requirements.txt'):
print('Could not find requirements.txt', file=sys.stderr)
sys.exit(1)
with open('requirements.txt') as f:
required = f.read().splitlines()
return required
def getDataFiles():
"""Provides the data files"""
result = [('share/applications', ['resources/codimension.desktop']),
('share/pixmaps', ['resources/codimension.png']),
('share/metainfo', ['resources/codimension.appdata.xml'])]
return result
def getPackageData():
"""Provides the data files"""
extensions = ['.png', '.svg', '.svgz', '.json', '.css', '.md', '.jar',
'README', 'COPYING']
package_data = [('codimension.pixmaps',
'codimension/pixmaps/'),
('codimension.skins',
'codimension/skins/'),
('doc',
'doc/'),
('doc.md',
'doc/md'),
('doc.cml',
'doc/cml'),
('doc.plugins',
'doc/plugins'),
('doc.technology',
'doc/technology'),
('doc.smartzoom',
'doc/smartzoom'),
('doc.grouping',
'doc/grouping'),
('doc.deadcode',
'doc/deadcode'),
('doc.complexity',
'doc/complexity'),
('doc.pyflakes',
'doc/pyflakes'),
('doc.disassembling',
'doc/disassembling'),
('doc.colorschemes',
'doc/colorschemes'),
('doc.editorsettings',
'doc/editorsettings'),
('doc.dependencies',
'doc/dependencies'),
('doc.project',
'doc/project'),
('plantuml',
'plantuml/')]
# If a skin needs to be added, then the following item should be also
# appended:
# package_data.append(('codimension.skins.myskin',
# 'codimension/skins/myskin/'))
result = {}
for item in package_data:
package = item[0]
matchFiles = []
for fName in os.listdir(item[1]):
for ext in extensions:
if fName.endswith(ext):
matchFiles.append(fName)
break
if matchFiles:
result[package] = matchFiles
return result
def getPackages():
"""Provides packages"""
return ['codimension',
'codimension.analysis',
'codimension.autocomplete',
'codimension.diagram',
'codimension.editor',
'codimension.flowui',
'codimension.profiling',
'codimension.ui',
'codimension.utils',
'codimension.search',
'codimension.debugger', 'codimension.debugger.client',
'codimension.plugins',
'codimension.plugins.categories',
'codimension.plugins.manager',
'codimension.plugins.vcssupport',
'codimension.pixmaps',
'codimension.skins',
'doc', 'doc.cml', 'doc.plugins', 'doc.technology', 'doc.md',
'doc.smartzoom', 'doc.grouping', 'doc.deadcode',
'doc.complexity', 'doc.pyflakes', 'doc.disassembling',
'doc.colorschemes', 'doc.editorsettings',
'doc.dependencies', 'doc.project',
'plantuml']
# If a myskin skin is to be added as well, then one more package should
# be mentioned: ..., 'codimension.skins.myskin']
# install_requires=['pypandoc'] could be added but really it needs to only
# at the time of submitting a package to Pypi so it is excluded from the
# dependencies
setup(name='codimension',
description=getDescription(),
python_requires='>=3.5, <3.9',
long_description=getLongDescription(),
# long_description_content_type does not really work so far
# long_description_content_type='text/markdown',
version=getVersion(),
author='Sergey Satskiy',
author_email='sergey.satskiy@gmail.com',
url='https://github.com/SergeySatskiy/codimension',
license='GPLv3',
classifiers=['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3'],
platforms=['any'],
packages=getPackages(),
package_data=getPackageData(),
install_requires=getRequirements(),
data_files=getDataFiles(),
entry_points={'gui_scripts':
['codimension = codimension.codimension:main']})
|
SergeySatskiy/codimension
|
setup.py
|
Python
|
gpl-3.0
| 7,612
|
# Copyright (C) 2010 Wil Mahan <wmahan+fatics@gmail.com>
#
# This file is part of FatICS.
#
# FatICS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FatICS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with FatICS. If not, see <http://www.gnu.org/licenses/>.
#
import time
from datetime import datetime
from test import *
class TestChannel(Test):
def test_channel_guest(self):
t = self.connect_as_guest()
# guests should be in 53 by default
t.write('+ch 53\n')
self.expect('is already on your channel list', t)
t.write('+ch 1\n')
self.expect("[1] added to your channel list", t)
t.write('t 1 foo bar baz\n')
self.expect("(1): foo bar baz", t)
t.write('=ch\n')
self.expect('channel list: 3 channels', t)
self.expect('1 4 53', t)
t.write('-ch 1\n')
self.expect("[1] removed from your channel list", t)
t.write('-ch 1\n')
self.expect("[1] is not on your channel list", t)
t.write('t 1 foo bar baz\n')
self.expect("not in channel 1", t)
t.write('+ch 0\n')
self.expect("Only admins can join channel 0.", t)
self.close(t)
def test_channel_admin(self):
t = self.connect_as_admin()
t.write(', foo bar\n')
self.expect("No previous channel", t)
t.write('-ch 100\n')
t.write('+ch 100\n')
self.expect("[100] added to your channel list", t)
t.write('t 100 foo bar baz\n')
self.expect("(100): foo bar baz", t)
t.write(', a b c d\n')
self.expect("(100): a b c d", t)
t.write('+ch foo\n')
self.expect("must be a number", t)
t.write('+ch -1\n')
self.expect("Invalid channel", t)
t.write('+ch 10000000000\n')
self.expect("Invalid channel", t)
t.write('-ch 10000000000\n')
self.expect("Invalid channel", t)
self.close(t)
t = self.connect_as_admin()
t.write('=ch\n')
self.expect('channel list: 1 channel', t)
t.write('+ch 100\n')
self.expect('is already on your channel list', t)
t.write('-ch 100\n')
self.expect("[100] removed from your channel list", t)
self.close(t)
t = self.connect_as_admin()
t.write('-ch 100\n')
self.expect("is not on your channel list", t)
self.close(t)
def test_channel_admin_ch0(self):
t = self.connect_as_admin()
t.write('+ch 0\n')
self.expect("[0] added to your channel list", t)
t.write('inch 0\n')
self.expect_re(r'0: .*admin', t)
self.close(t)
# XXX want to do a server restart here to check whether
# the value is stored correctly in the DB
t = self.connect_as_admin()
t.write('inch 0\n')
self.expect_re(r'0: .*admin', t)
t.write('-ch 0\n')
self.expect("[0] removed from your channel list", t)
self.close(t)
def test_chanoff_var(self):
t = self.connect_as_guest()
t2 = self.connect_as_guest()
t.write('+ch 1000\n')
self.expect('[1000] added to your channel list.', t)
t2.write('+ch 1000\n')
self.expect('[1000] added to your channel list.', t2)
t.write('t 1000 test 1\n')
self.expect('test 1', t2)
t2.write('set chanoff 1\n')
self.expect('You will not hear channel tells.', t2)
t.write('t 1000 test 2\n')
self.expect_not('test 2', t2)
t2.write('set chanoff 0\n')
self.expect('You will now hear channel tells.', t2)
t.write('t 1000 test 3\n')
self.expect('test 3', t2)
self.close(t)
self.close(t2)
class TestChannelOwnership(Test):
def test_channel_owner(self):
t = self.connect_as_admin()
t.write('+ch 1024\n')
self.expect('You are now the owner of channel 1024.', t)
self.expect('[1024] added to your channel list.', t)
t.write('-ch 1024\n')
self.expect('You are no longer an owner of channel 1024.', t)
self.expect('[1024] removed from your channel list.', t)
self.close(t)
@with_player('TestPlayer')
def test_channel_ownership_limit(self):
t = self.connect_as('TestPlayer')
for i in range(5000, 5008):
t.write('+ch %d\n' % i)
self.expect('You are now the owner of channel %d.' % i, t)
self.expect('[%d] added to your channel list.' % i, t)
t.write('+ch 5008\n')
self.expect('You cannot own more than 8 channels.', t)
for i in range(5000, 5008):
t.write('-ch %d\n' % i)
self.expect('You are no longer an owner of channel %d.' % i, t)
self.expect('[%d] removed from your channel list.' % i, t)
self.close(t)
def test_channel_owner_guests(self):
t = self.connect_as_guest()
t.write('+ch 1024\n')
self.expect('Only registered players can join channels 1024 and above.', t)
self.close(t)
class TestKick(Test):
@with_player('TestPlayer')
def test_kick(self):
t = self.connect_as_admin()
t2 = self.connect_as('testplayer')
t.write('=ch\n')
t.write('+ch 1024\n')
self.expect('You are now the owner of channel 1024.', t)
self.expect('[1024] added to your channel list.', t)
t.write('chkick 1024 testplayer\n')
self.expect('TestPlayer is not in channel 1024.', t)
t2.write('+ch 1024\n')
self.expect('[1024] added to your channel list.', t2)
t.write('chkick 1024 testplayer\n')
self.expect('admin(*)(1024): *** Kicked out TestPlayer. ***', t)
self.expect('*** You have been kicked out of channel 1024 by admin. ***', t2)
t.write('-ch 1024\n')
self.expect('You are no longer an owner of channel 1024.', t)
self.expect('[1024] removed from your channel list.', t)
self.close(t)
self.close(t2)
def test_kick_guest(self):
t = self.connect_as_guest('GuestARST')
t2 = self.connect_as_admin()
t.write('+ch 1\n')
self.expect('[1] added', t)
t2.write('+ch 1\n')
self.expect('[1] added', t2)
t2.write('chkick 1 guestarst\n')
self.expect('You have been kicked out of channel 1 by', t)
self.expect('Kicked out GuestARST', t2)
t2.write('-ch 1\n')
self.expect("[1] removed from your channel list", t2)
self.close(t)
self.close(t2)
@with_player('TestPlayer')
def test_kick_admin(self):
t = self.connect_as_admin()
t2 = self.connect_as('TestPlayer')
t2.write('+ch 5000\n')
self.expect('You are now the owner of channel 5000.', t2)
t.write('chkick 5000 testplayer\n')
self.expect('You are not in channel 5000.', t)
t.write('+ch 5000\n')
self.expect('[5000] added', t)
t2.write('chkick 5000 admin\n')
self.expect('You cannot kick out an admin.', t2)
t.write('chkick 5000 testplayer\n')
self.expect('admin(*)(5000): *** Kicked out TestPlayer. ***', t)
self.expect('*** You have been kicked out of channel 5000 by admin. ***', t2)
t.write('-ch 5000\n')
self.expect('[5000] removed from your channel list.', t)
self.close(t)
self.close(t2)
@with_player('TestPlayer')
def test_kick_offline(self):
t2 = self.connect_as('testplayer')
t2.write('+ch 1024\n')
self.expect('You are now the owner of channel 1024.', t2)
self.expect('[1024] added to your channel list.', t2)
self.close(t2)
t = self.connect_as_admin()
t.write('+ch 1024\n')
self.expect('[1024] added to your channel list.', t)
t.write('chkick 1024 testplayer\n')
self.expect('admin(*)(1024): *** Kicked out TestPlayer. ***', t)
t.write('-ch 1024\n')
self.expect('[1024] removed from your channel list.', t)
self.close(t)
t2 = self.connect_as('testplayer')
t2.write('=ch\n')
self.expect('-- channel list: 1 channel --\r\n1\r\n', t2)
t2.write('inch 1024\n')
self.expect('0 players', t2)
self.close(t2)
@with_player('testone')
@with_player('testtwo')
def test_kick_bad(self):
t = self.connect_as('testone')
t2 = self.connect_as('testtwo')
t.write('+ch 2000\n')
self.expect('You are now the owner of channel 2000.', t)
self.expect('[2000] added to your channel list.', t)
t2.write('+ch 2000\n')
self.expect('[2000] added to your channel list.', t2)
t2.write('chkick 2000 testone\n')
self.expect("You don't have permission to do that.", t2)
t.write('chkick 2000\n')
self.expect('Usage:', t)
t.write('chkick 2000 testtwo\n')
self.expect('*** You have been kicked out of channel 2000 by testone. ***', t2)
self.expect('testone(2000): *** Kicked out testtwo. ***', t)
t.write('-ch 2000\n')
self.expect('You are no longer an owner of channel 2000.', t)
self.expect('[2000] removed from your channel list.', t)
self.close(t)
self.close(t2)
class TestInchannel(Test):
def test_inchannel(self):
t = self.connect_as_guest('GuestTest')
t.write('inch\n')
self.expect_re("4: .*GuestTest", t)
t.write('inch -1\n')
self.expect('Invalid channel', t)
t.write('inch 9999999999\n')
self.expect('Invalid channel', t)
t.write('+ch 1\n')
t.write('inch 1\n')
self.expect_re('1 "help": .*GuestTest', t)
self.expect('in channel 1.', t)
t.write('inch 28741\n') # XXX somebody could join
self.expect('There are 0 players in channel 28741.', t)
self.close(t)
class TestCtellVar(Test):
def test_ctell_var(self):
t = self.connect_as_admin()
t2 = self.connect_as_guest()
t.write('set ctell 0\n')
self.expect('You will not hear channel tells from unregistered', t)
t.write('+ch 1\n')
self.expect("[1] added to your channel list", t)
t2.write('+ch 1\n')
self.expect("[1] added to your channel list", t2)
t2.write('tell 1 Channel 1 test\n')
self.expect_not('Channel 1 test', t)
t.write('set ctell 1\n')
self.expect('You will now hear channel tells from unregistered', t)
t2.write('tell 1 Another channel 1 test\n')
self.expect('Another channel 1 test', t)
t.write('-ch 1\n')
self.expect("[1] removed from your channel list", t)
self.close(t2)
self.close(t)
class TestTopic(Test):
@with_player('TestPlayer')
def test_topic(self):
t = self.connect_as_admin()
t2 = self.connect_as('TestPlayer')
today = datetime.utcnow().date()
t.write('chtopic\n')
self.expect('Usage:', t)
t.write('chtopic 10 test\n')
self.expect('You are not in channel 10.', t)
t.write('+ch 10\n')
self.expect('[10] added to your channel list.', t)
t2.write('+ch 10\n')
self.expect('[10] added to your channel list.', t2)
t.write('chtopic 10\n')
self.expect('There is no topic for channel 10.', t)
t.write('chtopic 10 This is an example topic.\n')
self.expect('TOPIC(10): *** This is an example topic. (admin at %s' % today, t)
self.expect(') ***', t)
self.expect('TOPIC(10): *** This is an example topic. (admin at %s' % today, t2)
self.expect(') ***', t2)
t2.write('chtopic 10\n')
self.expect('TOPIC(10): *** This is an example topic. (admin at %s' % today, t2)
self.expect(') ***', t2)
# joining channel displays topic
t2.write('-ch 10\n')
self.expect('[10] removed from your channel list.', t2)
t2.write('+ch 10\n')
self.expect('TOPIC(10): *** This is an example topic. (admin at %s' % today, t2)
self.expect(') ***', t2)
self.expect('[10] added to your channel list.', t2)
# Leaving and rejoining does not display the topic...
time.sleep(1)
self.close(t2)
t2 = self.connect()
t2.write('testplayer\n%s\n' % tpasswd)
self.expect('**** Starting FICS session as TestPlayer ****', t2)
self.expect_not('TOPIC', t2)
self.close(t2)
# ...unless it has been modified.
t.write('chtopic 10 A new topic.\n')
self.expect('TOPIC(10): *** A new topic. (admin at %s' % today, t)
time.sleep(1)
t2 = self.connect()
t2.write('testplayer\n%s\n' % tpasswd)
self.expect('TOPIC(10): *** A new topic. (admin at %s' % today, t2)
# clear topic
t.write('chtopic 10 -\n')
self.expect('admin(*)(10): *** Cleared topic. ***', t)
self.expect('admin(*)(10): *** Cleared topic. ***', t2)
t2.write('chtopic 10\n')
self.expect('There is no topic for channel 10.', t2)
t.write('-ch 10\n')
self.expect('[10] removed from your channel list.', t)
t2.write('-ch 10\n')
self.expect('[10] removed from your channel list.', t2)
t2.write('chtopic 10\n')
self.expect('There is no topic for channel 10.', t2)
self.close(t)
self.close(t2)
#def test_topic_userchannel(self):
# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 smarttab autoindent
|
ecolitan/fatics
|
test/test_channel.py
|
Python
|
agpl-3.0
| 14,009
|
#!/usr/bin/python2
import os
import sys
import codecs
import pyauparser
def main():
# tokenize an input string by lexer with grammar loaded
# you can know how to use lexer manually
g = pyauparser.Grammar.load_file("data/operator.egt")
lexer = pyauparser.Lexer(g)
lexer.load_file("data/operator_sample_1.txt")
while True:
token = lexer.read_token()
print (token.symbol.name, token.lexeme, token.position)
if token.symbol.type == pyauparser.SymbolType.END_OF_FILE:
break
elif token.symbol.type == pyauparser.SymbolType.ERROR:
print "ERROR({0}:{1}): Unknown Token '{0}'".format(
token.position[0], token.position[1], token.lexeme)
return
print "done", lexer.position
if __name__ == "__main__":
main()
|
veblush/PyAuParser
|
sample/sample-lexer.py
|
Python
|
mit
| 857
|
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use cubic-interpolation (Mar. 2004)
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'Result', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import numpy
from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, \
squeeze, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf
from linesearch import \
line_search_BFGS, line_search_wolfe1, line_search_wolfe2, \
line_search_wolfe2 as line_search
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev' : 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.alltrue(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class Result(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess : ndarray
Values of objective function, Jacobian and Hessian (if available).
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, self.keys())) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in self.iteritems()])
else:
return self.__class__.__name__ + "()"
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in Scipy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
# These have been copied from Numeric's MLab.py
# I don't think they made the transition to scipy_core
def max(m, axis=0):
"""max(m,axis=0) returns the maximum of m along dimension axis.
"""
m = asarray(m)
return numpy.maximum.reduce(m, axis)
def min(m, axis=0):
"""min(m,axis=0) returns the minimum of m along dimension axis.
"""
m = asarray(m)
return numpy.minimum.reduce(m, axis)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return len(atleast_1d(x) == 1)
abs = absolute
import __builtin__
pymin = __builtin__.min
pymax = __builtin__.max
__version__ = "0.7"
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(abs(x))
elif ord == -Inf:
return numpy.amin(abs(x))
else:
return numpy.sum(abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""The Rosenbrock function.
The function computed is
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0
Parameters
----------
x : array_like, 1D
The point at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
return numpy.sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0, axis=0)
def rosen_der(x):
"""The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like, 1D
The point at which the derivative is to be computed.
Returns
-------
der : 1D numpy array
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = 200*(xm - xm_m1**2) - 400*(xm_p1 - xm**2)*xm - 2*(1 - xm)
der[0] = -400*x[0]*(x[1] - x[0]**2) - 2*(1 - x[0])
der[-1] = 200*(x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like, 1D
The point at which the Hessian matrix is to be computed.
Returns
-------
hess : 2D numpy array
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
"""
x = atleast_1d(x)
H = numpy.diag(-400*x[:-1], 1) - numpy.diag(400*x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200*x[0]**2 - 400*x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200*x[1:-1]**2 - 400*x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like, 1D
The point at which the Hessian matrix is to be computed.
p : array_like, 1D, same size as `x`.
The vector to be multiplied by the Hessian matrix.
Returns
-------
v : 1D numpy array
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200*x[0]**2 - 400*x[1] + 2)*p[0] - 400*x[0]*p[1]
Hp[1:-1] = -400*x[:-2]*p[:-2] + (202 + 1200*x[1:-1]**2 - 400*x[2:])*p[1:-1] \
- 400*x[1:-1]*p[2:]
Hp[-1] = -400*x[-2]*p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
def function_wrapper(x):
ncalls[0] += 1
return function(x, *args)
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple
Extra arguments passed to func, i.e. ``f(x,*args)``.
callback : callable
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
Other parameters
----------------
xtol : float
Relative error in xopt acceptable for convergence.
ftol : number
Relative error in func(xopt) acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfun : number
Maximum number of function evaluations to make.
full_output : bool
Set to True if fopt and warnflag outputs are desired.
disp : bool
Set to True to print convergence messages.
retall : bool
Set to True to return list of solutions at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does.
References
----------
Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options for the Nelder-Mead algorithm are:
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
This function is called by the `minimize` function with
`method=Nelder-Mead`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
rank = len(x0.shape)
if not -1 < rank < 2:
raise ValueError("Initial guess must be a scalar or rank-1 sequence.")
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
one2np1 = range(1, N + 1)
if rank == 0:
sim = numpy.zeros((N + 1,), dtype=x0.dtype)
else:
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
fsim = numpy.zeros((N + 1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0, N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
f = func(y)
fsim[k + 1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (max(numpy.ravel(abs(sim[1:] - sim[0]))) <= xtol \
and max(abs(fsim[0] - fsim[1:])) <= ftol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho)*xbar - rho*sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho*chi)*xbar - rho*chi*sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi*rho)*xbar - psi*rho*sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi)*xbar + psi*sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma*(sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print 'Warning: ' + msg
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print 'Warning: ' + msg
else:
msg = _status_message['success']
if disp:
print msg
print " Current function value: %f" % fval
print " Iterations: %d" % iterations
print " Function evaluations: %d" % fcalls[0]
result = Result(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0), message=msg,
x=x)
if retall:
result['allvecs'] = allvecs
return result
def approx_fprime(xk, f, epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : array_like
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`.
\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(np.float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
f0 = f(*((xk,) + args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[k] = (f(*((xk+d,)+args)) - f0) / d[k]
ei[k] = 0.0
return grad
def check_grad(func, grad, x0, *args):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable func(x0,*args)
Function whose derivative is to be checked.
grad : callable grad(x0, *args)
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \*args, optional
Extra arguments passed to `func` and `grad`.
Returns
-------
err : float
The square root of the sum of squares (i.e. the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
"""
return sqrt(sum((grad(x0, *args) - approx_fprime(x0, func, _epsilon, *args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
f2 = fprime(*((x0 + epsilon*p,) + args))
f1 = fprime(*((x0,) + args))
return (f2 - f1) / epsilon
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before succesful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
Results at each iteration. Only returned if retall is True.
Other Parameters
----------------
maxiter : int
Maximum number of iterations to perform.
full_output : bool
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool
Print convergence message if True.
retall : bool
Return a list of results at each iteration if True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['jac'], res['hess'], \
res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options for the BFGS algorithm are:
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
This function is called by the `minimize` function with `method=BFGS`.
It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0)*200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N, dtype=int)
Hk = I
old_fval = f(x0)
old_old_fval = old_fval + 5000
xk = x0
if retall:
allvecs = [x0]
sk = [2*gtol]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -numpy.dot(Hk, gfk)
alpha_k, fc, gc, old_fval2, old_old_fval2, gfkp1 = \
line_search_wolfe1(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval)
if alpha_k is not None:
old_fval = old_fval2
old_old_fval = old_old_fval2
else:
# line search failed: try different one.
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
line_search_wolfe2(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval)
if alpha_k is None:
# This line search also failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not numpy.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
try: #this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (numpy.dot(yk, sk))
except ZeroDivisionError:
rhok = 1000.0
print "Divide-by-zero encountered: rhok assumed large"
if isinf(rhok): #this is patch for numpy
rhok = 1000.0
print "Divide-by-zero encountered: rhok assumed large"
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + rhok * sk[:, numpy.newaxis] \
* sk[numpy.newaxis, :]
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print "Warning: " + msg
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print "Warning: " + msg
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
else:
msg = _status_message['success']
if disp:
print msg
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
result = Result(fun=fval, jac=gfk, hess=Hk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Function which computes the gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Stop when norm of gradient is less than gtol.
norm : float, optional
Order of vector norm to use. -Inf is min, Inf is max.
epsilon : float or ndarray, optional
If fprime is approximated, use this value for the step
size (can be scalar or vector).
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value found, f(xopt).
func_calls : int
The number of function_calls made.
grad_calls : int
The number of gradient calls made.
warnflag : int
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : ndarray
If retall is True (see other parameters below), then this
vector containing the result at each iteration is returned.
Other Parameters
----------------
maxiter : int
Maximum number of iterations to perform.
full_output : bool
If True then return fopt, func_calls, grad_calls, and
warnflag in addition to xopt.
disp : bool
Print convergence message if True.
retall : bool
Return a list of results at each iteration if True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'CG' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the nonlinear conjugate gradient algorithm of Polak and
Ribiere. See Wright & Nocedal, 'Numerical Optimization',
1999, pg. 120-122.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options for the conjugate gradient algorithm are:
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
This function is called by the `minimize` function with `method=CG`. It
is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0)*200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
xk = x0
old_fval = f(xk)
old_old_fval = old_fval + 5000
if retall:
allvecs = [xk]
sk = [2*gtol]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
# These values are modified by the line search, even if it fails
old_fval_backup = old_fval
old_old_fval_backup = old_old_fval
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
line_search_wolfe1(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4)
if alpha_k is None: # line search failed -- use different one.
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
line_search_wolfe2(f, myfprime, xk, pk, gfk,
old_fval_backup, old_old_fval_backup)
if alpha_k is None or alpha_k == 0:
# This line search also failed to find a better solution.
warnflag = 2
break
xk = xk + alpha_k * pk
if retall:
allvecs.append(xk)
if gfkp1 is None:
gfkp1 = myfprime(xk)
yk = gfkp1 - gfk
beta_k = pymax(0, numpy.dot(yk, gfkp1) / deltak)
pk = -gfkp1 + beta_k * pk
gfk = gfkp1
gnorm = vecnorm(gfk, ord=norm)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print "Warning: " + msg
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print "Warning: " + msg
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
else:
msg = _status_message['success']
if disp:
print msg
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % func_calls[0]
print " Gradient evaluations: %d" % grad_calls[0]
result = Result(fun=fval, jac=gfk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args)
Gradient of f.
fhess_p : callable fhess_p(x,p,*args), optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable fhess(x,*args), optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
Returns
-------
xopt : ndarray
Parameters which minimizer f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
Other Parameters
----------------
avextol : float
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int
Maximum number of iterations to perform.
full_output : bool
If True, return the optional outputs.
disp : bool
If True, print convergence message.
retall : bool
If True, return a list of results at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable seperately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], \
res['nhev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Options for the Newton-CG algorithm are:
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
This function is called by the `minimize` function with
`method=Newton-CG`. It is not supposed to be called directly.
Also note that the `jac` parameter (Jacobian) is required.
"""
_check_unknown_options(unknown_options)
if jac == None:
raise ValueError('Jacobian is required for Newton-CG method')
f = fun
fprime = jac
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
xtol = len(x0)*avextol
update = [2*xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
old_fval = f(x0)
while (numpy.add.reduce(abs(update)) > xtol) and (k < maxiter):
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(abs(b))
eta = min([0.5, numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,) + args)
hcalls = hcalls + 1
while numpy.add.reduce(abs(ri)) > termcond:
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi, Ap)
if 0 <= curv <= 3*numpy.finfo(numpy.float64).eps:
break
elif curv < 0:
if (i > 0):
break
else:
xsupi = xsupi + dri0 / curv * psupi
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
alphak, fc, gc, old_fval = line_search_BFGS(f, xk, pk, gfk, old_fval)
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
fval = old_fval
if k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print "Warning: " + msg
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % fcalls[0]
print " Gradient evaluations: %d" % gcalls[0]
print " Hessian evaluations: %d" % hcalls
else:
warnflag = 0
msg = _status_message['success']
if disp:
print msg
print " Current function value: %f" % fval
print " Iterations: %d" % k
print " Function evaluations: %d" % fcalls[0]
print " Gradient evaluations: %d" % gcalls[0]
print " Hessian evaluations: %d" % hcalls
result = Result(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0],
nhev=hcalls, status=warnflag, success=(warnflag == 0),
message=msg, x=xk)
if retall:
result['allvecs'] = allvecs
return result
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
"""
options = {'xtol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xtol=1e-5, maxiter=500, disp=0,
**unknown_options):
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5*(3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean*(b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5*(a + b)
tol1 = sqrt_eps*abs(xf) + xtol / 3.0
tol2 = 2.0*tol1
if disp > 2:
print (" ")
print (header)
print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))
while (abs(xf - xm) > (tol2 - 0.5*(b - a))):
golden = 1
# Check for parabolic fit
if abs(e) > tol1:
golden = 0
r = (xf - nfc)*(fx - ffulc)
q = (xf - fulc)*(fx - fnfc)
p = (xf - fulc)*q - (xf - nfc)*r
q = 2.0*(q - r)
if q > 0.0: p = -p
q = abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((abs(p) < abs(0.5*q*r)) and (p > q*(a - xf)) and \
(p < q*(b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = numpy.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1*si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si*max([abs(rat), tol1])
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5*(a + b)
tol1 = sqrt_eps*abs(xf) + xtol / 3.0
tol2 = 2.0*tol1
if num >= maxfun:
flag = 1
break
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xtol, disp)
result = Result(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.'}.get(flag, ''),
x=xf, nfev=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
#need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
dum = xa; xa = xc; xc = dum
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be " \
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
#set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#we are making NO CHANGES in this
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls = 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol*abs(x) + _mintol
tol2 = 2.0*tol1
xmid = 0.5*(a + b)
if abs(x - xmid) < (tol2 - 0.5*(b - a)): # check for convergence
xmin = x
fval = fx
break
if (abs(deltax) <= tol1):
if (x >= xmid): deltax = a - x # do a golden section step
else: deltax = b - x
rat = _cg*deltax
else: # do a parabolic step
tmp1 = (x - w)*(fx - fv)
tmp2 = (x - v)*(fx - fw)
p = (x - v)*tmp2 - (x - w)*tmp1;
tmp2 = 2.0*(tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2*(a - x)) and (p < tmp2*(b - x)) and (abs(p) < abs(0.5*tmp2*dx_temp))):
rat = p*1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0: rat = tol1
else: rat = -tol1
else:
if (x >= xmid): deltax = a - x # if it's not do a golden section step
else: deltax = b - x
rat = _cg*deltax
if (abs(rat) < tol1): # update by at least tol1
if rat >= 0: u = x + tol1
else: u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x): a = u
else: b = u
if (fu <= fw) or (w == x):
v = w; w = u; fv = fw; fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u; fv = fu
else:
if (u >= x): a = x
else: b = x
v = w; w = x; x = u
fv = fw; fw = fx; fx = fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args
Additional arguments (if present).
brack : tuple
Triple (a,b,c) where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,c)
then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that the obtained solution will satisfy a<=x<=c.
full_output : bool
If True, return all output args (xmin, fval, iter,
funcalls).
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(),
xtol=1.48e-8, maxiter=500,
**unknown_options):
_check_unknown_options(unknown_options)
tol = xtol
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
return Result(fun=fval, x=x, nit=nit, nfev=nfev)
def golden(func, args=(), brack=None, tol=_epsilon, full_output=0):
""" Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple
Additional arguments (if present), passed to func.
brack : tuple
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float
x tolerance stop criterion
full_output : bool
If True, return optional outputs.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
"""
options = {'xtol': tol}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, **unknown_options):
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
dum = xa; xa = xc; xc = dum
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (abs(xc - xb) > abs(xb - xa)):
x1 = xb
x2 = xb + _gC*(xc - xb)
else:
x2 = xb
x1 = xb - _gC*(xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
while (abs(x3 - x0) > tol*(abs(x1) + abs(x2))):
if (f2 < f1):
x0 = x1; x1 = x2; x2 = _gR*x1 + _gC*x3
f1 = f2; f2 = func(*((x2,) + args))
else:
x3 = x2; x2 = x1; x1 = _gR*x2 + _gC*x0
f2 = f1; f1 = func(*((x1,) + args))
funcalls += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
return Result(fun=fval, nfev=funcalls, x=xmin)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
dum = xa; xa = xb; xb = dum
dum = fa; fa = fb; fb = dum
xc = xb + _gold*(xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa)*(fb - fc)
tmp2 = (xb - xc)*(fb - fa)
val = tmp2 - tmp1
if abs(val) < _verysmall_num:
denom = 2.0*_verysmall_num
else:
denom = 2.0*val
w = xb - ((xb - xc)*tmp2 - (xb - xa)*tmp1) / denom
wlim = xb + grow_limit*(xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc)*(xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb; xb = w; fa = fb; fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w; fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold*(xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc; xc = w; w = xc + _gold*(xc - xb)
fb = fc; fc = fw; fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold*(xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb; xb = xc; xc = w
fa = fb; fb = fc; fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method. This method
only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple
Extra arguments passed to func.
callback : callable
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray
Initial direction set.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
Other Parameters
----------------
xtol : float
Line-search error tolerance.
ftol : float
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfun : int
Maximum number of function evaluations to make.
full_output : bool
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool
If True, print convergence messages.
retall : bool
If True, return a list of the solution at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' `method` in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop
merely iterates over the inner loop. The inner loop minimizes
over each current direction in the direction set. At the end
of the inner loop, if certain conditions are met, the direction
that gave the largest decrease is dropped and replaced with
the difference between the current estiamted x and the estimated
x from the beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['direc'], res['nit'], \
res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options for the Powell algorithm are:
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
This function is called by the `minimize` function with
`method=Powell`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
rank = len(x.shape)
if not - 1 < rank < 2:
raise ValueError("Initial guess must be a scalar or rank-1 sequence.")
if maxiter is None:
maxiter = N * 1000
if maxfun is None:
maxfun = N * 1000
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0;
ilist = range(N)
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
if (2.0*(fx - fval) <= ftol*(abs(fx) + abs(fval)) + 1e-20): break
if fcalls[0] >= maxfun: break
if iter >= maxiter: break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print "Warning: " + msg
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print "Warning: " + msg
else:
msg = _status_message['success']
if disp:
print msg
print " Current function value: %f" % fval
print " Iterations: %d" % iter
print " Function evaluations: %d" % fcalls[0]
x = squeeze(x)
result = Result(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0), message=msg,
x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print "\nOptimization terminated successfully;\n" \
"The returned value satisfies the termination criteria\n" \
"(using xtol = ", xtol, ")"
if flag == 1:
print "\nMaximum number of function evaluations exceeded --- " \
"increase maxfun argument.\n"
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin):
"""Minimize a function over a given range by brute force.
Parameters
----------
func : callable ``f(x,*args)``
Objective function to be minimized.
ranges : tuple
Each element is a tuple of parameters or a slice object to
be handed to ``numpy.mgrid``.
args : tuple
Extra arguments passed to function.
Ns : int
Default number of samples, if those are not provided.
full_output : bool
If True, return the evaluation grid.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take the initial guess
as positional argument, and take take `args`, `full_output` and `disp`
as keyword arguments. See Notes for more details.
Returns
-------
x0 : ndarray
Value of arguments to `func`, giving minimum over the grid.
fval : int
Function value at minimum.
grid : tuple
Representation of the evaluation grid. It has the same
length as x0.
Jout : ndarray
Function values over grid: ``Jout = func(*grid)``.
Notes
-----
The range is respected by the brute force minimization, but if the `finish`
keyword specifies another optimization function (including the default
`fmin`), the returned value may still be (just) outside the range. In
order to ensure the range is specified, use ``finish=None``.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more " \
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
def _scalarfunc(*params):
params = squeeze(asarray(params))
return func(params, *args)
vecfunc = vectorize(_scalarfunc)
grid = mgrid[lrange]
if (N == 1):
grid = (grid,)
Jout = vecfunc(*grid)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = zeros(N, int)
xmin = zeros(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
vals = finish(func, xmin, args=args, full_output=1, disp=0)
xmin = vals[0]
Jmin = vals[1]
if vals[-1] > 0:
print "Warning: Final optimization did not succeed"
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
def show_options(solver, method=None):
"""Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of {`minimize`, `root`}.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g. 'BFGS' for
'minimize').
Notes
-----
** minimize options
* BFGS options:
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
* Nelder-Mead options:
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxfev : int
Maximum number of function evaluations to make.
* Newton-CG options:
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
* CG options:
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
* Powell options:
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxfev : int
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
* Anneal options:
ftol : float
Relative error in ``fun(x)`` acceptable for convergence.
schedule : str
Annealing schedule to use. One of: 'fast', 'cauchy' or
'boltzmann'.
T0 : float
Initial Temperature (estimated as 1.2 times the largest
cost-function deviation over random points in the range).
Tf : float
Final goal temperature.
maxfev : int
Maximum number of function evaluations to make.
maxaccept : int
Maximum changes to accept.
boltzmann : float
Boltzmann constant in acceptance test (increase for less
stringent test at each temperature).
learn_rate : float
Scale constant for adjusting guesses.
quench, m, n : float
Parameters to alter fast_sa schedule.
lower, upper : float or ndarray
Lower and upper bounds on `x`.
dwell : int
The number of times to search the space at each temperature.
* L-BFGS-B options:
ftol : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
maxcor : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms
in an approximation to it.)
maxiter : int
Maximum number of function evaluations.
* TNC options:
ftol : float
Precision goal for the value of f in the stoping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol : float
Precision goal for the value of x in the stopping
criterion (after applying x scaling factors). If xtol <
0.0, xtol is set to sqrt(machine_precision). Defaults to
-1.
gtol : float
Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
scale : list of floats
Scaling factors to apply to each variable. If None, the
factors are up-low for interval bounded variables and
1+|x] fo the others. Defaults to None
offset : float
Value to substract from each variable. If None, the
offsets are (up+low)/2 for interval bounded variables
and x for the others.
maxCGit : int
Maximum number of hessian*vector evaluations per main
iteration. If maxCGit == 0, the direction chosen is
-gradient if maxCGit < 0, maxCGit is set to
max(1,min(50,n/2)). Defaults to -1.
maxiter : int
Maximum number of function evaluation. if None, `maxiter` is
set to max(100, 10*len(x0)). Defaults to None.
eta : float
Severity of the line search. if < 0 or > 1, set to 0.25.
Defaults to -1.
stepmx : float
Maximum step for the line search. May be increased during
call. If too small, it will be set to 10.0. Defaults to 0.
accuracy : float
Relative precision for finite difference calculations. If
<= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
minfev : float
Minimum function value estimate. Defaults to 0.
rescale : float
Scaling factor (in log10) used to trigger f value
rescaling. If 0, rescale at each iteration. If a large
value, never rescale. If < 0, rescale is set to 1.3.
* COBYLA options:
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
rhobeg : float
Reasonable initial changes to the variables.
maxfev : int
Maximum number of function evaluations.
* SLSQP options:
ftol : float
Precision goal for the value of f in the stopping criterion.
eps : float
Step size used for numerical approximation of the jacobian.
maxiter : int
Maximum number of iterations.
** root options
* hybrd options:
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between
two consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : sequence
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float
A suitable step length for the forward-difference approximation
of the Jacobian (for ``fprime=None``). If `epsfcn` is less than
the machine precision, it is assumed that the relative errors
in the functions are of the order of the machine precision.
factor : float
A parameter determining the initial step bound (``factor * ||
diag * x||``). Should be in the interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
* LM options:
col_deriv : bool
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float
Relative error desired in the sum of squares.
xtol : float
Relative error desired in the approximate solution.
gtol : float
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int
The maximum number of calls to the function. If zero, then 100*(N+1) is
the maximum where N is the number of elements in x0.
epsfcn : float
A suitable step length for the forward-difference approximation of the
Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
it is assumed that the relative errors in the functions are of the
order of the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the variables.
* Broyden1 options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD components.
Extra parameters:
- ``to_retain`: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
* Broyden2 options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD components.
Extra parameters:
- ``to_retain`: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
* Anderson options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
* LinearMixing options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
* DiagBroyden options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
* ExcitingMixing options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
* Krylov options:
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
"""
solver = solver.lower()
if solver not in ('minimize', 'root'):
raise ValueError('Unknown solver.')
solver_header = (' ' * 4 + solver + "\n" + ' ' * 4 + '~' * len(solver))
notes_header = "Notes\n -----"
all_doc = show_options.__doc__.split(notes_header)[1:]
solvers_doc = [s.strip()
for s in show_options.__doc__.split('** ')[1:]]
solver_doc = [s for s in solvers_doc
if s.lower().startswith(solver)]
if method is None:
doc = solver_doc
else:
doc = solver_doc[0].split('* ')[1:]
doc = [s.strip() for s in doc]
doc = [s for s in doc if s.lower().startswith(method.lower())]
print '\n'.join(doc)
return
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print "Nelder-Mead Simplex"
print "==================="
start = time.time()
x = fmin(rosen, x0)
print x
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print
print "Powell Direction Set Method"
print "==========================="
start = time.time()
x = fmin_powell(rosen, x0)
print x
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print
print "Nonlinear CG"
print "============"
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print x
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print
print "BFGS Quasi-Newton"
print "================="
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print x
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print
print "BFGS approximate gradient"
print "========================="
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print x
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print
print "Newton-CG with Hessian product"
print "=============================="
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print x
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print
print "Newton-CG with full Hessian"
print "==========================="
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print x
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print
print "\nMinimizing the Rosenbrock function of order 3\n"
print " Algorithm \t\t\t Seconds"
print "===========\t\t\t ========="
for k in range(len(algor)):
print algor[k], "\t -- ", times[k]
if __name__ == "__main__":
main()
|
teoliphant/scipy
|
scipy/optimize/optimize.py
|
Python
|
bsd-3-clause
| 101,264
|
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from konfera.models.abstract import FromToModel
from konfera.models.ticket import Ticket
class DiscountCode(FromToModel):
title = models.CharField(max_length=128)
hash = models.CharField(max_length=64, unique=True)
discount = models.IntegerField(
default=0,
validators=[
MaxValueValidator(100),
MinValueValidator(0)
],
help_text=_('Value is percentage discount from ticket type price.'),
)
usage = models.IntegerField(default=1, help_text=_('Amount of tickets that can be issued.'))
ticket_type = models.ForeignKey('TicketType')
def __str__(self):
return self.title
@property
def issued_tickets(self):
tickets = Ticket.objects.filter(discount_code__hash=self.hash)
return len(tickets)
@property
def is_available(self):
return (self.usage - self.issued_tickets) > 0
def clean(self):
if hasattr(self, 'ticket_type'):
if not self.date_from:
self.date_from = self.ticket_type.date_from
elif self.date_from < self.ticket_type.date_from:
raise ValidationError(
{'date_from': _('Discount code can not be available before ticket type is available for sale.')})
if not self.date_to:
self.date_to = self.ticket_type.date_to
elif self.date_to > self.ticket_type.date_to:
raise ValidationError(
{'date_to': _('Discount code can not be available after ticket type is available for sale.')})
super(DiscountCode, self).clean()
|
Matusf/django-konfera
|
konfera/models/discount_code.py
|
Python
|
mit
| 1,834
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class crm_phonecall(osv.osv):
""" Model for CRM phonecalls """
_name = "crm.phonecall"
_description = "Phonecall"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'create_date': fields.datetime('Creation Date' , readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.'),
'user_id': fields.many2one('res.users', 'Responsible'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection(
[('open', 'Confirmed'),
('cancel', 'Cancelled'),
('pending', 'Pending'),
('done', 'Held')
], string='Status', readonly=True, track_visibility='onchange',
help='The status is set to Confirmed, when a case is created.\n'
'When the call is over, the status is set to Held.\n'
'If the callis not applicable anymore, the status can be set to Cancelled.'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'date_open': fields.datetime('Opened', readonly=True),
# phonecall fields
'name': fields.char('Call Summary', size=64, required=True),
'active': fields.boolean('Active', required=False),
'duration': fields.float('Duration', help='Duration in minutes and seconds.'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',section_id),('section_id','=',False),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_phone': fields.char('Phone', size=32),
'partner_mobile': fields.char('Mobile', size=32),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Date'),
'opportunity_id': fields.many2one ('crm.lead', 'Lead/Opportunity'),
}
def _get_default_state(self, cr, uid, context=None):
if context and context.get('default_state'):
return context.get('default_state')
return 'open'
_defaults = {
'date': fields.datetime.now,
'priority': crm.AVAILABLE_PRIORITIES[2][0],
'state': _get_default_state,
'user_id': lambda self, cr, uid, ctx: uid,
'active': 1
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'partner_phone': partner.phone,
'partner_mobile': partner.mobile,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') == 'done':
values['date_closed'] = fields.datetime.now()
self.compute_duration(cr, uid, ids, context=context)
elif values.get('state') == 'open':
values['date_open'] = fields.datetime.now()
values['duration'] = 0.0
return super(crm_phonecall, self).write(cr, uid, ids, values, context=context)
def compute_duration(self, cr, uid, ids, context=None):
for phonecall in self.browse(cr, uid, ids, context=context):
if phonecall.duration <= 0:
duration = datetime.now() - datetime.strptime(phonecall.date, DEFAULT_SERVER_DATETIME_FORMAT)
values = {'duration': duration.seconds/float(60)}
self.write(cr, uid, [phonecall.id], values, context=context)
return True
def schedule_another_phonecall(self, cr, uid, ids, schedule_time, call_summary, \
user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
action :('schedule','Schedule a call'), ('log','Log a call')
"""
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for call in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = call.section_id and call.section_id.id or False
if not user_id:
user_id = call.user_id and call.user_id.id or False
if not schedule_time:
schedule_time = call.date
vals = {
'name' : call_summary,
'user_id' : user_id or False,
'categ_id' : categ_id or False,
'description' : call.description or False,
'date' : schedule_time,
'section_id' : section_id or False,
'partner_id': call.partner_id and call.partner_id.id or False,
'partner_phone' : call.partner_phone,
'partner_mobile' : call.partner_mobile,
'priority': call.priority,
'opportunity_id': call.opportunity_id and call.opportunity_id.id or False,
}
new_id = self.create(cr, uid, vals, context=context)
if action == 'log':
self.write(cr, uid, [new_id], {'state': 'done'}, context=context)
phonecall_dict[call.id] = new_id
return phonecall_dict
def _call_create_partner(self, cr, uid, phonecall, context=None):
partner = self.pool.get('res.partner')
partner_id = partner.create(cr, uid, {
'name': phonecall.name,
'user_id': phonecall.user_id.id,
'comment': phonecall.description,
'address': []
})
return partner_id
def on_change_opportunity(self, cr, uid, ids, opportunity_id, context=None):
values = {}
if opportunity_id:
opportunity = self.pool.get('crm.lead').browse(cr, uid, opportunity_id, context=context)
values = {
'section_id' : opportunity.section_id and opportunity.section_id.id or False,
'partner_phone' : opportunity.phone,
'partner_mobile' : opportunity.mobile,
'partner_id' : opportunity.partner_id and opportunity.partner_id.id or False,
}
return {'value' : values}
def _call_set_partner(self, cr, uid, ids, partner_id, context=None):
write_res = self.write(cr, uid, ids, {'partner_id' : partner_id}, context=context)
self._call_set_partner_send_note(cr, uid, ids, context)
return write_res
def _call_create_partner_address(self, cr, uid, phonecall, partner_id, context=None):
address = self.pool.get('res.partner')
return address.create(cr, uid, {
'parent_id': partner_id,
'name': phonecall.name,
'phone': phonecall.partner_phone,
})
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to specified partner_id
:param list ids: phonecalls ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_lead
partner_ids = {}
# If a partner_id is given, force this partner for all elements
force_partner_id = partner_id
for call in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if action == 'create':
partner_id = force_partner_id or self._call_create_partner(cr, uid, call, context=context)
self._call_create_partner_address(cr, uid, call, partner_id, context=context)
self._call_set_partner(cr, uid, [call.id], partner_id, context=context)
partner_ids[call.id] = partner_id
return partner_ids
def redirect_phonecall_view(self, cr, uid, phonecall_id, context=None):
model_data = self.pool.get('ir.model.data')
# Select the view
tree_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_tree_view')
form_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_form_view')
search_view = model_data.get_object_reference(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : int(phonecall_id),
'views': [(form_view and form_view[1] or False, 'form'), (tree_view and tree_view[1] or False, 'tree'), (False, 'calendar')],
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False,
}
return value
def convert_opportunity(self, cr, uid, ids, opportunity_summary=False, partner_id=False, planned_revenue=0.0, probability=0.0, context=None):
partner = self.pool.get('res.partner')
opportunity = self.pool.get('crm.lead')
opportunity_dict = {}
default_contact = False
for call in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = call.partner_id and call.partner_id.id or False
if partner_id:
address_id = partner.address_get(cr, uid, [partner_id])['default']
if address_id:
default_contact = partner.browse(cr, uid, address_id, context=context)
opportunity_id = opportunity.create(cr, uid, {
'name': opportunity_summary or call.name,
'planned_revenue': planned_revenue,
'probability': probability,
'partner_id': partner_id or False,
'mobile': default_contact and default_contact.mobile,
'section_id': call.section_id and call.section_id.id or False,
'description': call.description or False,
'priority': call.priority,
'type': 'opportunity',
'phone': call.partner_phone or False,
'email_from': default_contact and default_contact.email,
})
vals = {
'partner_id': partner_id,
'opportunity_id': opportunity_id,
'state': 'done',
}
self.write(cr, uid, [call.id], vals, context=context)
opportunity_dict[call.id] = opportunity_id
return opportunity_dict
def action_make_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule a meeting on current phonecall.
:return dict: dictionary value for created meeting view
"""
phonecall = self.browse(cr, uid, ids[0], context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_id': phonecall.partner_id and phonecall.partner_id.id or False,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_state': 'open',
'default_name': phonecall.name,
}
return res
def action_button_convert2opportunity(self, cr, uid, ids, context=None):
"""
Convert a phonecall into an opp and then redirect to the opp view.
:param list ids: list of calls ids to convert (typically contains a single id)
:return dict: containing view information
"""
if len(ids) != 1:
raise osv.except_osv(_('Warning!'),_('It\'s only possible to convert one phonecall at a time.'))
opportunity_dict = self.convert_opportunity(cr, uid, ids, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, opportunity_dict[ids[0]], context)
# ----------------------------------------
# OpenChatter
# ----------------------------------------
def _call_set_partner_send_note(self, cr, uid, ids, context=None):
return self.message_post(cr, uid, ids, body=_("Partner has been <b>created</b>."), context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
browseinfo/odoo_saas3_nicolas
|
addons/crm/crm_phonecall.py
|
Python
|
agpl-3.0
| 14,811
|
#!/usr/bin/python
import os
container_name = "goji"
host_port = "8003"
cmd = "docker ps -a | grep %s"%container_name
container_exists = len(os.popen(cmd).read().split('\n')) > 1
print cmd
print "exists:", container_exists
if container_exists:
cmd = "go build && docker restart %s"%container_name
print cmd
os.system(cmd)
else:
cmd = "docker run -d -p %(port)s:80 -v %(cwd)s:/go/src --name %(name)s dev"%({
"port":host_port,
"cwd":os.getcwd(),
"name":container_name
})
print cmd
os.system(cmd)
|
mikerjacobi/goji-skeleton
|
go.py
|
Python
|
apache-2.0
| 547
|
# A handler to render the index.html template for the MOL AngularJS SPA
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import logging
import os
import random
import webapp2
if 'SERVER_SOFTWARE' in os.environ:
PROD = not os.environ['SERVER_SOFTWARE'].startswith('Development')
else:
PROD = True
PROD = True
class BaseHandler(webapp2.RequestHandler):
def render_template(self, f, template_args):
path = os.path.join(os.path.dirname(__file__), "../templates/html", f)
logging.info(template_args)
self.response.out.write(template.render(path, template_args))
class App(BaseHandler):
def get(self):
self.render_template('index.html',{"prod":PROD,'rand':'010620171140'})
def post(self):
self.render_template('index.html',{"prod":PROD,'rand':'010620171140'})
application = webapp2.WSGIApplication(
[
('/.*',App),
('/', App),
('/location',App),
('/location/.*',App),
])
|
MapofLife/location-module
|
server/handlers/base.py
|
Python
|
bsd-3-clause
| 1,047
|
# -*- coding: utf-8 -*-
import KBEngine
from KBEDebug import *
import d_spaces
import SpaceContext
class GameObject(KBEngine.Entity):
def __init__(self):
KBEngine.Entity.__init__(self)
def getCurrSpaceBase(self):
"""
获得当前space的entity baseMailbox
"""
spaceBase = KBEngine.globalData["space_%i" % self.spaceID]
return spaceBase
def getCurrSpace(self):
"""
获得当前space的entity baseMailbox
"""
spaceBase = self.getCurrSpaceBase()
return KBEngine.entities[spaceBase.id]
def getSpaceMgr(self):
"""
获取场景管理器
"""
return KBEngine.globalData["SpaceMgr"]
def teleportSpace(self, spaceUType, position, direction, context):
"""
defined.
传送到某场景
"""
assert self.base != None
self.getSpaceMgr().teleportSpace(self.base, spaceUType, position, direction, SpaceContext.createContext(self, spaceUType))
def onTeleportSpaceCB(self, spaceCellMailbox, spaceUType, position, direction):
"""
defined.
baseapp返回teleportSpace的回调
"""
DEBUG_MSG("Avatar::onTeleportSpaceCB: %i mb=%s, spaceUType=%i, pos=%s, dir=%s." % \
(self.id, spaceCellMailbox, spaceUType, position, direction))
self.teleport(spaceCellMailbox, position, direction)
|
theheros/kbengine
|
demo/res/scripts/cell/GameObject.py
|
Python
|
lgpl-3.0
| 1,290
|
from src.config import Config
manager_config = Config()
|
wrycu/DiscordGameManager
|
src/run.py
|
Python
|
mit
| 57
|
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.orm import relationship
from estimators.database import Base, DataBase, PrimaryMixin
from estimators.datasets import DataSet
from estimators.estimators import Estimator
class EvaluationMixin:
"""A list of common methods and attributes for evaluations"""
def _get_proxy_object(self, obj, ProxyKlass, proxy_klass_attribute):
""" Returns the proxy object for an input object
If the object is already the proxy object, return it.
Otherwise set the appropriate proxy object to the proxy object's attribute
"""
proxy_object = obj
if not isinstance(obj, ProxyKlass):
proxy_object = ProxyKlass(**{proxy_klass_attribute: obj})
return proxy_object
@property
def estimator(self):
return self._estimator_proxy.estimator
@estimator.setter
def estimator(self, obj):
self._estimator_proxy = self._get_proxy_object(obj, Estimator, 'estimator')
@property
def X_test(self):
return self._X_test_proxy.data
@X_test.setter
def X_test(self, obj):
self._X_test_proxy = self._get_proxy_object(obj, DataSet, 'data')
@property
def y_test(self):
return self._y_test_proxy.data
@y_test.setter
def y_test(self, obj):
self._y_test_proxy = self._get_proxy_object(obj, DataSet, 'data')
@property
def y_predicted(self):
return self._y_predicted_proxy.data
@y_predicted.setter
def y_predicted(self, obj):
self._y_predicted_proxy = self._get_proxy_object(obj, DataSet, 'data')
class Evaluator(EvaluationMixin):
"""Instantiates an evaluation plan.
An evaluator object takes an estimator, X_test and y_test as params.
Those can be DataSet objects or data in themselves.
Once set, the evaluator aka evaluation plan runs .evaluate()
"""
def __init__(self, **options):
self.estimator = options.pop('estimator', None)
self.X_test = options.pop('X_test', None)
self.y_test = options.pop('y_test', None)
self.y_predicted = options.pop('y_predicted', None)
self.session = options.pop('session', None)
if not self.session:
db = DataBase()
self.session = db.Session()
def evaluate(self, persist=True):
result = self.estimator.predict(self.X_test)
options = {
'y_predicted': result,
'estimator': self.estimator,
'X_test': self.X_test,
'y_test': self.y_test,
}
er = EvaluationResult(**options)
self.persist_results(er)
return er
def persist_results(self, er):
try:
self.session.add(er._estimator_proxy)
self._estimator_proxy.persist()
self.session.add(er._X_test_proxy)
self._X_test_proxy.persist()
self.session.add(er._y_test_proxy)
self._y_test_proxy.persist()
self.session.add(er._y_predicted_proxy)
self._y_predicted_proxy.persist()
self.session.commit()
except:
self.session.rollback()
finally:
self.session.close()
def __repr__(self):
return '<Evaluator(X_test=%s estimator=%s)>' % (
self.X_test, self.estimator)
class EvaluationResult(EvaluationMixin, PrimaryMixin, Base):
"""A database model for evaluation results.
The EvaluationResult class is the data model for the table `result`.
The EvaluationResult has relationships to an Estimator
object and to 3 DataSet objects: X_test, y_test, y_predicted
"""
__tablename__ = 'result'
estimator_id = Column(Integer, ForeignKey('estimator.id'))
X_test_id = Column(Integer, ForeignKey('dataset.id'), nullable=False)
y_test_id = Column(Integer, ForeignKey('dataset.id'))
y_predicted_id = Column(Integer, ForeignKey('dataset.id'))
_estimator_proxy = relationship("Estimator", backref="EvaluationResult")
_X_test_proxy = relationship("DataSet", foreign_keys=X_test_id)
_y_test_proxy = relationship("DataSet", foreign_keys=y_test_id)
_y_predicted_proxy = relationship("DataSet", foreign_keys=y_predicted_id)
def __repr__(self):
return '<EvaluationResult(id=%s X_test=%s estimator=%s)>' % (
self.id, self.X_test, self.estimator)
|
fridiculous/estimators
|
estimators/evaluations.py
|
Python
|
mit
| 4,396
|
# -*- coding: utf-8 -*-
"""
lists
~~~~~
Collections based on list interface.
"""
import collections
from .base import RedisCollection
class List(RedisCollection, collections.MutableSequence):
"""Mutable **sequence** collection aiming to have the same API as the
standard sequence type, :class:`list`. See `list
<http://docs.python.org/2/library/functions.html#list>`_ for
further details. The Redis implementation is based on the
`list <http://redis.io/commands#list>`_ type.
.. warning::
In comparing with original :class:`list` type, :class:`List` does not
implement methods :func:`sort` and :func:`reverse`.
.. note::
Some operations, which are usually not used so often, can be more
efficient than their "popular" equivalents. Some operations are
shortened in their capabilities and can raise
:exc:`NotImplementedError` for some inputs (e.g. most of the slicing
functionality).
"""
def __init__(self, *args, **kwargs):
"""
:param data: Initial data.
:type data: iterable
:param redis: Redis client instance. If not provided, default Redis
connection is used.
:type redis: :class:`redis.StrictRedis`
:param key: Redis key of the collection. Collections with the same key
point to the same data. If not provided, default random
string is generated.
:type key: str
:param pickler: Implementation of data serialization. Object with two
methods is expected: :func:`dumps` for conversion
of data to string and :func:`loads` for the opposite
direction. Examples::
import json, pickle
Dict(pickler=json)
Dict(pickler=pickle) # default
Of course, you can construct your own pickling object
(it can be class, module, whatever). Default
serialization implementation uses :mod:`pickle`.
.. note::
:func:`uuid.uuid4` is used for default key generation.
If you are not satisfied with its `collision
probability <http://stackoverflow.com/a/786541/325365>`_,
make your own implementation by subclassing and overriding
internal method :func:`_create_key`.
"""
super(List, self).__init__(*args, **kwargs)
def __len__(self):
"""Length of the sequence."""
return self.redis.llen(self.key)
def _data(self, pipe=None):
redis = pipe if pipe is not None else self.redis
values = redis.lrange(self.key, 0, -1)
return (self._unpickle(v) for v in values)
def __iter__(self):
"""Return an iterator over the sequence."""
return self._data()
def __reversed__(self):
"""Returns iterator for the sequence in reversed order."""
values = self.redis.lrange(self.key, 0, -1)
return (self._unpickle(v) for v in reversed(values))
def _recalc_slice(self, start, stop):
"""Slicing in Redis takes also the item at 'stop' index, so there is
some recalculation to be done. Method returns tuple ``(start, stop)``
where both values are recalculated to numbers in Redis terms.
:param start: Index starting the range (in Python terms).
:param stop: Index where the range ends (in Python terms).
"""
start = start or 0
if stop is None:
stop = -1
else:
stop = stop - 1 if stop != 0 else stop
return start, stop
def _calc_overflow(self, size, index):
"""Index overflow detection. Returns :obj:`True` if *index* is out of
range for the given *size*. Otherwise returns :obj:`False`.
:param size: Size of the collection.
:param index: Index to be examined.
"""
return (index >= size) if (index >= 0) else (abs(index) > size)
def _get_slice(self, index):
"""Helper for getting a slice."""
assert isinstance(index, slice)
def slice_trans(pipe):
start, stop = self._recalc_slice(index.start, index.stop)
values = pipe.lrange(self.key, start, stop)
if index.step:
# step implemented by pure Python slicing
values = values[::index.step]
values = map(self._unpickle, values)
pipe.multi()
return self._create_new(values, pipe=pipe)
return self._transaction(slice_trans)
def __getitem__(self, index):
"""Returns item of sequence on *index*.
Origin of indexes is 0. Accepts also slicing.
.. note::
Due to implementation on Redis side, ``l[index]`` is not very
efficient operation. If possible, use :func:`get`. Slicing without
steps is efficient. Steps are implemented only on Python side.
"""
if isinstance(index, slice):
return self._get_slice(index)
with self.redis.pipeline() as pipe:
pipe.llen(self.key)
pipe.lindex(self.key, index)
size, value = pipe.execute()
if self._calc_overflow(size, index):
raise IndexError(index)
return self._unpickle(value)
def get(self, index, default=None):
"""Return the value for *index* if *index* is not out of range, else
*default*. If *default* is not given, it defaults to :obj:`None`, so
that this method never raises a :exc:`IndexError`.
.. note::
Due to implementation on Redis side, this method of retrieving
items is more efficient than classic approach over using the
:func:`__getitem__` protocol.
"""
value = self.redis.lindex(self.key, index)
return self._unpickle(value) or default
def _set_slice(self, index, value):
"""Helper for setting a slice."""
assert isinstance(index, slice)
if value:
# assigning anything else than empty lists not supported
raise NotImplementedError(self.not_impl_msg)
self.__delitem__(index)
def __setitem__(self, index, value):
"""Item of *index* is replaced by *value*.
.. warning::
Slicing is generally not supported. Only empty lists are accepted
if the operation leads into trimming::
l[2:] = []
l[:2] = []
l[:] = []
"""
if isinstance(index, slice):
self._set_slice(index, value)
else:
def set_trans(pipe):
size = pipe.llen(self.key)
if self._calc_overflow(size, index):
raise IndexError(index)
pipe.multi()
pipe.lset(self.key, index, self._pickle(value))
self._transaction(set_trans)
def _del_slice(self, index):
"""Helper for deleting a slice."""
assert isinstance(index, slice)
begin = 0
end = -1
if index.step:
# stepping not supported
raise NotImplementedError(self.not_impl_msg)
start, stop = self._recalc_slice(index.start, index.stop)
if start == begin and stop == end:
# trim from beginning to end
self.clear()
return
with self.redis.pipeline() as pipe:
if start != begin and stop == end:
# right trim
pipe.ltrim(self.key, begin, start - 1)
elif start == begin and stop != end:
# left trim
pipe.ltrim(self.key, stop + 1, end)
else:
# only trimming is supported
raise NotImplementedError(self.not_impl_msg)
pipe.execute()
def __delitem__(self, index):
"""Item of *index* is deleted.
.. warning::
Slicing is generally not supported. Only empty lists are accepted
if the operation leads into trimming::
del l[2:]
del l[:2]
del l[:]
"""
begin = 0
end = -1
if isinstance(index, slice):
self._del_slice(index)
else:
if index == begin:
self.redis.lpop(self.key)
elif index == end:
self.redis.rpop(self.key)
else:
raise NotImplementedError(self.not_impl_msg)
def remove(self, value):
"""Remove the first occurence of *value*."""
self.redis.lrem(self.key, 1, self._pickle(value))
def index(self, value, start=None, stop=None):
"""Returns index of the first occurence of *value*.
If *start* or *stop* are provided, returns the smallest
index such that ``s[index] == value`` and ``start <= index < stop``.
"""
start, stop = self._recalc_slice(start, stop)
values = self.redis.lrange(self.key, start, stop)
for i, v in enumerate(self._unpickle(v) for v in values):
if v == value:
return i + start
raise ValueError(value)
def count(self, value):
"""Returns number of occurences of *value*.
.. note::
Implemented only on Python side.
"""
return list(self._data()).count(value)
def insert(self, index, value):
"""Insert *value* before *index*. Can only work with index == 0.
"""
if index != 0:
# Redis has no commands for *inserting* into a list by index.
# LINSERT requires assumptions about contents of the list values.
raise NotImplementedError(self.not_impl_msg)
self.redis.lpush(self.key, self._pickle(value))
def append(self, value):
"""Insert *value* at end of list.
"""
self.redis.rpush(self.key, self._pickle(value))
def _update(self, data, pipe=None):
super(List, self)._update(data, pipe)
redis = pipe if pipe is not None else self.redis
values = map(self._pickle, data)
redis.rpush(self.key, *values)
def extend(self, values):
"""*values* are appended at the end of the list. Any iterable
is accepted.
"""
if isinstance(values, RedisCollection):
# wrap into transaction
def extend_trans(pipe):
d = values._data(pipe=pipe) # retrieve
pipe.multi()
self._update(d, pipe=pipe) # store
self._transaction(extend_trans)
else:
self._update(values)
def pop(self, index=-1):
"""Item on *index* is removed and returned.
.. warning::
Only indexes ``0`` and ``-1`` (default) are supported, otherwise
:exc:`NotImplementedError` is raised.
"""
if index == 0:
value = self.redis.lpop(self.key)
elif index == -1:
value = self.redis.rpop(self.key)
else:
raise NotImplementedError(self.not_impl_msg)
return self._unpickle(value)
def __add__(self, values):
"""Returns concatenation of the list and given iterable. New
:class:`List` instance is returned.
"""
def add_trans(pipe):
d1 = list(self._data(pipe=pipe)) # retrieve
if isinstance(values, RedisCollection):
d2 = list(values._data(pipe=pipe)) # retrieve
else:
d2 = list(values)
pipe.multi()
return self._create_new(d1 + d2, pipe=pipe) # store
return self._transaction(add_trans)
def __radd__(self, values):
return self.__add__(values)
def __mul__(self, n):
"""Returns *n* copies of the list, concatenated. New :class:`List`
instance is returned.
"""
if not isinstance(n, int):
raise TypeError('Cannot multiply sequence by non-int.')
def mul_trans(pipe):
data = list(self._data(pipe=pipe)) # retrieve
pipe.multi()
return self._create_new(data * n, pipe=pipe) # store
return self._transaction(mul_trans)
def __rmul__(self, n):
return self.__mul__(n)
def _repr_data(self, data):
return repr(list(data))
|
burakbostancioglu/redis-collections
|
redis_collections/lists.py
|
Python
|
isc
| 12,464
|
"""
* Experimental *
Like the map function, but can use a pool of threads.
Really easy to use threads. eg. tmap(f, alist)
If you know how to use the map function, you can use threads.
"""
__author__ = "Rene Dudfield"
__version__ = "0.3.0"
__license__ = 'Python license'
import traceback, sys
from pygame.compat import geterror
if sys.version_info[0] == 3:
from queue import Queue
from queue import Empty
elif (sys.version_info[0] == 2 and sys.version_info[1] < 5):
from Py25Queue import Queue
from Py25Queue import Empty
else:
# use up to date version
from Queue import Queue
from Queue import Empty
import threading
Thread = threading.Thread
STOP = object()
FINISH = object()
# DONE_ONE = object()
# DONE_TWO = object()
# a default worker queue.
_wq = None
# if we are using threads or not. This is the number of workers.
_use_workers = 0
# Set this to the maximum for the amount of Cores/CPUs
# Note, that the tests early out.
# So it should only test the best number of workers +2
MAX_WORKERS_TO_TEST = 64
def init(number_of_workers = 0):
""" Does a little test to see if threading is worth it.
Sets up a global worker queue if it's worth it.
Calling init() is not required, but is generally better to do.
"""
global _wq, _use_workers
if number_of_workers:
_use_workers = number_of_workers
else:
_use_workers = benchmark_workers()
# if it is best to use zero workers, then use that.
_wq = WorkerQueue(_use_workers)
def quit():
""" cleans up everything.
"""
global _wq, _use_workers
_wq.stop()
_wq = None
_use_workers = False
def benchmark_workers(a_bench_func = None, the_data = None):
""" does a little test to see if workers are at all faster.
Returns the number of workers which works best.
Takes a little bit of time to run, so you should only really call
it once.
You can pass in benchmark data, and functions if you want.
a_bench_func - f(data)
the_data - data to work on.
"""
global _use_workers
#TODO: try and make this scale better with slower/faster cpus.
# first find some variables so that using 0 workers takes about 1.0 seconds.
# then go from there.
# note, this will only work with pygame 1.8rc3+
# replace the doit() and the_data with something that releases the GIL
import pygame
import pygame.transform
import time
if not a_bench_func:
def doit(x):
return pygame.transform.scale(x, (544, 576))
else:
doit = a_bench_func
if not the_data:
thedata = []
for x in range(10):
thedata.append(pygame.Surface((155,155), 0, 32))
else:
thedata = the_data
best = time.time() + 100000000
best_number = 0
last_best = -1
for num_workers in range(0, MAX_WORKERS_TO_TEST):
wq = WorkerQueue(num_workers)
t1 = time.time()
for xx in range(20):
print ("active count:%s" % threading.activeCount())
results = tmap(doit, thedata, worker_queue = wq)
t2 = time.time()
wq.stop()
total_time = t2 - t1
print ("total time num_workers:%s: time:%s:" % (num_workers, total_time))
if total_time < best:
last_best = best_number
best_number =num_workers
best = total_time
if num_workers - best_number > 1:
# We tried to add more, but it didn't like it.
# so we stop with testing at this number.
break
return best_number
class WorkerQueue(object):
def __init__(self, num_workers = 20):
self.queue = Queue()
self.pool = []
self._setup_workers(num_workers)
def _setup_workers(self, num_workers):
""" Sets up the worker threads
NOTE: undefined behaviour if you call this again.
"""
self.pool = []
for _ in range(num_workers):
self.pool.append(Thread(target=self.threadloop))
for a_thread in self.pool:
a_thread.setDaemon(True)
a_thread.start()
def do(self, f, *args, **kwArgs):
""" puts a function on a queue for running later.
"""
self.queue.put((f, args, kwArgs))
def stop(self):
""" Stops the WorkerQueue, waits for all of the threads to finish up.
"""
self.queue.put(STOP)
for thread in self.pool:
thread.join()
def threadloop(self): #, finish = False):
""" Loops until all of the tasks are finished.
"""
while True:
args = self.queue.get()
if args is STOP:
self.queue.put(STOP)
self.queue.task_done()
break
else:
try:
args[0](*args[1], **args[2])
finally:
# clean up the queue, raise the exception.
self.queue.task_done()
#raise
def wait(self):
""" waits until all tasks are complete.
"""
self.queue.join()
class FuncResult:
""" Used for wrapping up a function call so that the results are stored
inside the instances result attribute.
"""
def __init__(self, f, callback = None, errback = None):
""" f - is the function we that we call
callback(result) - this is called when the function(f) returns
errback(exception) - this is called when the function(f) raises
an exception.
"""
self.f = f
self.exception = None
self.callback = callback
self.errback = errback
def __call__(self, *args, **kwargs):
#we try to call the function here. If it fails we store the exception.
try:
self.result = self.f(*args, **kwargs)
if self.callback:
self.callback(self.result)
except Exception:
self.exception = geterror()
if self.errback:
self.errback(self.exception)
def tmap(f, seq_args, num_workers = 20, worker_queue = None, wait = True, stop_on_error = True):
""" like map, but uses a thread pool to execute.
num_workers - the number of worker threads that will be used. If pool
is passed in, then the num_workers arg is ignored.
worker_queue - you can optionally pass in an existing WorkerQueue.
wait - True means that the results are returned when everything is finished.
False means that we return the [worker_queue, results] right away instead.
results, is returned as a list of FuncResult instances.
stop_on_error -
"""
if worker_queue:
wq = worker_queue
else:
# see if we have a global queue to work with.
if _wq:
wq = _wq
else:
if num_workers == 0:
return map(f, seq_args)
wq = WorkerQueue(num_workers)
# we short cut it here if the number of workers is 0.
# normal map should be faster in this case.
if len(wq.pool) == 0:
return map(f, seq_args)
#print ("queue size:%s" % wq.queue.qsize())
#TODO: divide the data (seq_args) into even chunks and
# then pass each thread a map(f, equal_part(seq_args))
# That way there should be less locking, and overhead.
results = []
for sa in seq_args:
results.append(FuncResult(f))
wq.do(results[-1], sa)
#wq.stop()
if wait:
#print ("wait")
wq.wait()
#print ("after wait")
#print ("queue size:%s" % wq.queue.qsize())
if wq.queue.qsize():
raise Exception("buggy threadmap")
# if we created a worker queue, we need to stop it.
if not worker_queue and not _wq:
#print ("stoping")
wq.stop()
if wq.queue.qsize():
um = wq.queue.get()
if not um is STOP:
raise Exception("buggy threadmap")
# see if there were any errors. If so raise the first one. This matches map behaviour.
# TODO: the traceback doesn't show up nicely.
# NOTE: TODO: we might want to return the results anyway? This should be an option.
if stop_on_error:
error_ones = list(filter(lambda x:x.exception, results))
if error_ones:
raise error_ones[0].exception
return map(lambda x:x.result, results)
else:
return [wq, results]
|
mark-me/Pi-Jukebox
|
venv/Lib/site-packages/pygame/threads/__init__.py
|
Python
|
agpl-3.0
| 8,709
|
import sys
sys.path.append('../..')
import codestudio
z = codestudio.load('s1level83')
def draw_square():
for count in range(4):
z.move_forward(100)
z.turn_right(90)
def draw_triangle():
for count in range(3):
z.move(100)
z.right(120)
draw_triangle()
z.check()
|
skilstak/code-dot-org-python
|
solutions/stage15-artist4/s1level83.py
|
Python
|
unlicense
| 305
|
import pytest
def test_unknown_virtual_host_is_503(docker_compose, nginxproxy):
r = nginxproxy.get("http://unknown.nginx-proxy.tld/port")
assert r.status_code == 503
def test_webA_is_forwarded(docker_compose, nginxproxy):
r = nginxproxy.get("http://webA.nginx-proxy.tld/port")
assert r.status_code == 200
assert r.text == "answer from port 81\n"
def test_webB_is_forwarded(docker_compose, nginxproxy):
r = nginxproxy.get("http://webB.nginx-proxy.tld/port")
assert r.status_code == 200
assert r.text == "answer from port 81\n"
|
jwilder/nginx-proxy
|
test/test_multiple-hosts.py
|
Python
|
mit
| 562
|
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shoop.utils import update_module_attributes
from ._context import TaxingContext
from ._line_tax import LineTax, SourceLineTax
from ._module import get_tax_module, TaxModule
from ._price import TaxedPrice
from ._tax_summary import TaxSummary
from ._taxable import TaxableItem
__all__ = [
"LineTax",
"SourceLineTax",
"TaxModule",
"TaxSummary",
"TaxableItem",
"TaxedPrice",
"TaxingContext",
"get_tax_module",
]
update_module_attributes(__all__, __name__)
|
akx/shoop
|
shoop/core/taxing/__init__.py
|
Python
|
agpl-3.0
| 712
|
"""
problem59.py
https://projecteuler.net/problem=59
Each character on a computer is assigned a unique code and the preferred
standard is ASCII (American Standard Code for Information Interchange). For
example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.
A modern encryption method is to take a text file, convert the bytes to ASCII,
then XOR each byte with a given value, taken from a secret key. The advantage
with the XOR function is that using the same encryption key on the cipher text,
restores the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.
For unbreakable encryption, the key is the same length as the plain text
message, and the key is made up of random bytes. The user would keep the
encrypted message and the encryption key in different locations, and without
both "halves", it is impossible to decrypt the message.
Unfortunately, this method is impractical for most users, so the modified
method is to use a password as a key. If the password is shorter than the
message, which is likely, the key is repeated cyclically throughout the
message. The balance for this method is using a sufficiently long password key
for security, but short enough to be memorable.
Your task has been made easy, as the encryption key consists of three lower
case characters. Using cipher.txt, a file containing the encrypted ASCII codes,
and the knowledge that the plain text must contain common English words,
decrypt the message and find the sum of the ASCII values in the original text.
"""
from collections import Counter
from itertools import cycle
def find_key(cipher):
for i in range(3):
# The most common item of the sublist starting at i, taking every 3rd element.
item, _ = Counter(cipher[i::3]).most_common()[0]
# We assume it's an int representing the ord of the space character
yield item ^ ord(' ')
def problem59():
with open("data/cipher.txt", "r") as f:
cipher = [int(x) for x in f.read().split(',')]
key = find_key(cipher)
decrypted = (c ^ k for c, k in zip(cipher, cycle(key)))
return sum(decrypted)
if __name__ == "__main__":
print(problem59())
|
mjwestcott/projecteuler
|
python/problem59.py
|
Python
|
mit
| 2,167
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import types
from google.appengine.ext import db
from django import VERSION
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields import Field
from django.db.models.options import Options
from django.db.models.loading import register_models, get_model
class ModelManager(object):
"""Replacement for the default Django model manager."""
def __init__(self, owner):
self.owner = owner
def __getattr__(self, name):
"""Pass all attribute requests through to the real model"""
return getattr(self.owner, name)
class ModelOptions(object):
"""Replacement for the default Django options class.
This class sits at ._meta of each model. The primary information supplied by
this class that needs to be stubbed out is the list of fields on the model.
"""
def __init__(self, cls):
self.object_name = cls.__name__
self.module_name = self.object_name.lower()
model_module = sys.modules[cls.__module__]
self.app_label = model_module.__name__.split('.')[-2]
self.abstract = False
class pk:
"""Stub the primary key to always be 'key_name'"""
name = "key_name"
def __str__(self):
return "%s.%s" % (self.app_label, self.module_name)
@property
def many_to_many(self):
"""The datastore does not support many to many relationships."""
return []
class Relation(object):
def __init__(self, to):
self.field_name = "key_name"
def PropertyWrapper(prop):
"""Wrapper for db.Property to make it look like a Django model Property"""
if isinstance(prop, db.Reference):
prop.rel = Relation(prop.reference_class)
else:
prop.rel = None
prop.serialize = True
return prop
class PropertiedClassWithDjango(db.PropertiedClass):
"""Metaclass for the combined Django + App Engine model class.
This metaclass inherits from db.PropertiedClass in the appengine library.
This metaclass has two additional purposes:
1) Register each model class created with Django (the parent class will take
care of registering it with the appengine libraries).
2) Add the (minimum number) of attributes and methods to make Django believe
the class is a normal Django model.
The resulting classes are still not generally useful as Django classes and
are intended to be used by Django only in limited situations such as loading
and dumping fixtures.
"""
def __new__(cls, name, bases, attrs):
"""Creates a combined appengine and Django model.
The resulting model will be known to both the appengine libraries and
Django.
"""
if name == 'BaseModel':
# This metaclass only acts on subclasses of BaseModel.
return super(PropertiedClassWithDjango, cls).__new__(cls, name,
bases, attrs)
new_class = super(PropertiedClassWithDjango, cls).__new__(cls, name,
bases, attrs)
new_class._meta = ModelOptions(new_class)
new_class.objects = ModelManager(new_class)
new_class._default_manager = new_class.objects
new_class.DoesNotExist = types.ClassType('DoesNotExist',
(ObjectDoesNotExist,), {})
m = get_model(new_class._meta.app_label, name, False)
if m:
return m
register_models(new_class._meta.app_label, new_class)
return get_model(new_class._meta.app_label, name, False)
def __init__(cls, name, bases, attrs):
"""Initialises the list of Django properties.
This method takes care of wrapping the properties created by the superclass
so that they look like Django properties and installing them into the
._meta object of the class so that Django can find them at the appropriate
time.
"""
super(PropertiedClassWithDjango, cls).__init__(name, bases, attrs)
if name == 'BaseModel':
# This metaclass only acts on subclasses of BaseModel.
return
fields = [PropertyWrapper(p) for p in cls._properties.values()]
cls._meta.local_fields = fields
class BaseModel(db.Model):
"""Combined appengine and Django model.
All models used in the application should derive from this class.
"""
__metaclass__ = PropertiedClassWithDjango
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def _get_pk_val(self):
"""Return the string representation of the model's key"""
return unicode(self.key())
def __repr__(self):
# Returns a string representation that can be used to construct an
# equivalent object. First, creates a dictionary of property names and
# values. Note that property values, not property objects, has to be passed
# in to constructor.
d = dict([(k, self.__getattribute__(k)) for k in self.properties()])
return "%s(**%s)" % (self.__class__.__name__, repr(d))
class RegistrationTestModel(BaseModel):
"""Used to check registration with Django is working correctly.
Django 0.96 only recognises models defined within an applications models
module when get_models() is called so this definition must be here rather
than within the associated test (tests/model_test.py).
"""
pass
|
kailIII/geraldo
|
site/newsite/site-geraldo/appengine_django/models.py
|
Python
|
lgpl-3.0
| 5,908
|
__author__ = 'mpetyx'
from rdflib import Graph, ConjunctiveGraph
# import rdflib.plugin
# from django.conf import settings
import datetime
import os
# register('SQLite', Store, 'rdflib.store.SQLite', 'SQLite')
def random_file_generating():
basename = "deepGraphFile"
suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
middle = os.urandom(16).encode('hex')
filename = "_".join([basename, middle, suffix])
return filename
class DeepGraphStore():
store_name = 'SQLite'
def __init__(self, create=False, parse=None):
self.parse = parse
self.create = create
self.graph = None
def setUp(self):
self.path = "" + random_file_generating()
self.graph = Graph(store=self.store_name)
self.graph.open(self.path, create=self.create)
if self.create:
if not self.parse:
self.graph.parse("http://njh.me/foaf.rdf", format='xml')
else:
self.graph.parse(self.parse)
self.graph.commit()
def open(self, path):
self.graph = ConjunctiveGraph(self.store_name)
self.path = path
self.graph.open(self.path, create=False)
def query(self, sparql_query):
return self.graph.query(sparql_query)
def parse(self, path_to_file_):
self.graph.parse(path_to_file_)
def load(self, triples):
self.graph.load(triples)
def close(self):
self.graph.close()
def size(self):
size = self.graph.__len__()
size = len(self.graph)
# self.close()
return size
|
LinDA-tools/LindaWorkbench
|
linda/graphdb/dgraphdbstore.py
|
Python
|
mit
| 1,595
|
#!/usr/bin/env python3
class Prepend(object):
# Add the methods of the class here
def __init__(self, some_string):
self.text = some_string
def write(self, new_string):
print(self.text + new_string)
def main():
p = Prepend("+++ ")
p.write("Hello");
if __name__ == "__main__":
main()
|
mohanprasath/Course-Work
|
data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part02-e08_prepend/src/prepend.py
|
Python
|
gpl-3.0
| 302
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-23 11:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ja_social', '0013_auto_20161023_1015'),
]
operations = [
migrations.RenameField(
model_name='photorecord',
old_name='uploader',
new_name='user_profile',
),
migrations.AlterField(
model_name='photorecord',
name='photo',
field=models.ImageField(upload_to='uploads/gallery_pictures/'),
),
]
|
Bryconc/JA-Social
|
ja_social/migrations/0014_auto_20161023_1102.py
|
Python
|
mit
| 633
|
#!/usr/bin/env python
# Copyright (c) 2002-2008 ActiveState Software
# Author: Trent Mick (trentm@gmail.com)
"""Quick directory changing (super-cd)
'go' is a simple command line script to simplify jumping between
directories in the shell. You can create shortcut names for commonly
used directories and invoke 'go <shortcut>' to switch to that directory
-- among other little features.
"""
import os
import sys
from distutils.core import setup
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "lib"))
try:
import go
finally:
del sys.path[0]
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2
Topic :: Software Development :: Libraries :: Python Modules
"""
if sys.version_info < (2, 3):
# Distutils before Python 2.3 doesn't accept classifiers.
_setup = setup
def setup(**kwargs):
if kwargs.has_key("classifiers"):
del kwargs["classifiers"]
_setup(**kwargs)
doclines = __doc__.split("\n")
setup(
name="go",
version=go.__version__,
maintainer="Trent Mick",
maintainer_email="trentm@gmail.com",
url="http://code.google.com/p/go-tool/",
license="http://www.opensource.org/licenses/mit-license.php",
platforms=["any"],
py_modules=["go"],
package_dir={"": "lib"},
description=doclines[0],
classifiers=filter(None, classifiers.split("\n")),
long_description="\n".join(doclines[2:]),
)
|
trentm/go-tool
|
setup.py
|
Python
|
mit
| 1,571
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# A simple and fast sub domains brute tool for pentesters
# my[at]lijiejie.com (http://www.lijiejie.com)
import Queue
import sys
import dns.resolver
import threading
import time
import optparse
import os
from lib.consle_width import getTerminalSize
class DNSBrute(object):
def __init__(self, target, names_file, ignore_intranet, threads_num, output):
self.target = target.strip()
self.names_file = names_file
self.ignore_intranet = ignore_intranet
self.thread_count = self.threads_num = threads_num
self.scan_count = self.found_count = 0
self.lock = threading.Lock()
self.console_width = getTerminalSize()[0] - 2 # Cal terminal width when starts up
self.resolvers = [dns.resolver.Resolver() for _ in range(threads_num)]
self._load_dns_servers()
self._load_sub_names()
self._load_next_sub()
outfile = target + '.txt' if not output else output
self.outfile = open(outfile, 'w') # won't close manually
self.ip_dict = {}
self.STOP_ME = False
def _load_dns_servers(self):
dns_servers = []
with open('dict/dns_servers.txt') as f:
for line in f:
server = line.strip()
if server.count('.') == 3 and server not in dns_servers:
dns_servers.append(server)
self.dns_servers = dns_servers
self.dns_count = len(dns_servers)
def _load_sub_names(self):
self.queue = Queue.Queue()
file = 'dict/' + self.names_file if not os.path.exists(self.names_file) else self.names_file
with open(file) as f:
for line in f:
sub = line.strip()
if sub: self.queue.put(sub)
def _load_next_sub(self):
next_subs = []
with open('dict/next_sub.txt') as f:
for line in f:
sub = line.strip()
if sub and sub not in next_subs:
next_subs.append(sub)
self.next_subs = next_subs
def _update_scan_count(self):
self.lock.acquire()
self.scan_count += 1
self.lock.release()
def _print_progress(self):
self.lock.acquire()
msg = '%s found | %s remaining | %s scanned in %.2f seconds' % (
self.found_count, self.queue.qsize(), self.scan_count, time.time() - self.start_time)
sys.stdout.write('\r' + ' ' * (self.console_width - len(msg)) + msg)
sys.stdout.flush()
self.lock.release()
@staticmethod
def is_intranet(ip):
ret = ip.split('.')
if not len(ret) == 4:
return True
if ret[0] == '10':
return True
if ret[0] == '172' and 16 <= int(ret[1]) <= 32:
return True
if ret[0] == '192' and ret[1] == '168':
return True
return False
def _scan(self):
thread_id = int(threading.currentThread().getName())
self.resolvers[thread_id].nameservers.insert(0, self.dns_servers[thread_id % self.dns_count])
self.resolvers[thread_id].lifetime = self.resolvers[thread_id].timeout = 10.0
while self.queue.qsize() > 0 and not self.STOP_ME and self.found_count < 4000: # limit found count to 4000
sub = self.queue.get(timeout=1.0)
for _ in range(6):
try:
cur_sub_domain = sub + '.' + self.target
answers = d.resolvers[thread_id].query(cur_sub_domain)
is_wildcard_record = False
if answers:
for answer in answers:
self.lock.acquire()
if answer.address not in self.ip_dict:
self.ip_dict[answer.address] = 1
else:
self.ip_dict[answer.address] += 1
if self.ip_dict[answer.address] > 2: # a wildcard DNS record
is_wildcard_record = True
self.lock.release()
if is_wildcard_record:
self._update_scan_count()
self._print_progress()
continue
ips = ', '.join([answer.address for answer in answers])
if (not self.ignore_intranet) or (not DNSBrute.is_intranet(answers[0].address)):
self.lock.acquire()
self.found_count += 1
msg = cur_sub_domain.ljust(30) + ips
sys.stdout.write('\r' + msg + ' ' * (self.console_width - len(msg)) + '\n\r')
sys.stdout.flush()
self.outfile.write(cur_sub_domain.ljust(30) + '\t' + ips + '\n')
self.lock.release()
for i in self.next_subs:
self.queue.put(i + '.' + sub)
break
except dns.resolver.NoNameservers, e:
break
except Exception, e:
pass
self._update_scan_count()
self._print_progress()
self._print_progress()
self.lock.acquire()
self.thread_count -= 1
self.lock.release()
def run(self):
self.start_time = time.time()
for i in range(self.threads_num):
t = threading.Thread(target=self._scan, name=str(i))
t.setDaemon(True)
t.start()
while self.thread_count > 1:
try:
time.sleep(1.0)
except KeyboardInterrupt, e:
msg = '[WARNING] User aborted, wait all slave threads to exit...'
sys.stdout.write('\r' + msg + ' ' * (self.console_width - len(msg)) + '\n\r')
sys.stdout.flush()
self.STOP_ME = True
if __name__ == '__main__':
parser = optparse.OptionParser('usage: %prog [options] target.com')
parser.add_option('-t', '--threads', dest='threads_num',
default=60, type='int',
help='Number of threads. default = 30')
parser.add_option('-f', '--file', dest='names_file', default='dict/subnames.txt',
type='string', help='Dict file used to brute sub names')
parser.add_option('-i', '--ignore-intranet', dest='i', default=False, action='store_true',
help='Ignore domains pointed to private IPs.')
parser.add_option('-o', '--output', dest='output', default=None,
type='string', help='Output file name. default is {target}.txt')
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(0)
d = DNSBrute(target=args[0], names_file=options.names_file,
ignore_intranet=options.i,
threads_num=options.threads_num,
output=options.output)
d.run()
|
RicterZ/moescan
|
tools/subDomainsBrute/subDomainsBrute.py
|
Python
|
mit
| 7,122
|
#!/usr/bin/env python
##############################################################################
# Addon Releng Script
#
# Prepares an addon for release.
# Depends on version numbers being in the x.y.z
##############################################################################
import time
import argparse
import sys
import os.path
import xml.etree.ElementTree as ET
import zipfile
import tempfile
import shutil
parser = argparse.ArgumentParser(description='Addon Releng Script')
parser.add_argument('--addon', help='Root directory of addon', dest='addon', required=True)
parser.add_argument('--repo', help='Repo directory', dest='repo', required=True)
parser.add_argument('--patch', help='Increase patch version', dest='patch', action='store_true')
parser.add_argument('--minor', help='Increase minor version', dest='minor', action='store_true')
parser.add_argument('--major', help='Increase major version', dest='major', action='store_true')
def log(msg):
sys.stdout.write("[%s] %s\n" % (time.strftime("%c"), msg))
args = parser.parse_args()
repo_dir = os.path.abspath(args.repo)
addon_dir = os.path.abspath(args.addon)
tree = ET.parse("%s/addon.xml" % addon_dir)
root = tree.getroot()
version = root.attrib['version']
id = root.attrib['id']
log("Found addon: %s v%s" % (id, version))
ver_parts = version.split('.')
if (len(ver_parts) == 1):
ver_parts[1] = 0
ver_parts[2] = 0
elif (len(ver_parts) == 2):
ver_parts[2] = 0
if (args.patch):
ver_parts[2] = int(ver_parts[2]) + 1
if (args.minor):
ver_parts[1] = int(ver_parts[1]) + 1
if (args.major):
ver_parts[0] = int(ver_parts[0]) + 1
new_version = '.'.join("{0}".format(n) for n in ver_parts)
root.attrib['version'] = new_version
tree.write("%s/addon.xml" % addon_dir)
log("Addon %s updated to version %s" % (id, new_version))
temp_zip = tempfile.gettempdir() + "/psikon-xbmc/releng/%s-%s.zip" % (id, new_version)
dest_zip = "%s/%s/%s-%s.zip" % (repo_dir, id, id, new_version)
log("Creating zipfile at %s" % temp_zip)
if not os.path.exists(os.path.dirname(temp_zip)):
os.makedirs(os.path.dirname(temp_zip))
if not os.path.exists(os.path.dirname(dest_zip)):
os.makedirs(os.path.dirname(dest_zip))
if os.path.isfile(temp_zip):
os.remove(temp_zip)
zf = zipfile.ZipFile(temp_zip, mode='w')
try:
for root, dirs, files in os.walk(addon_dir):
zip_dir = root.replace(addon_dir, id)
for f in files:
zf.write(os.path.join(root, f), arcname=zip_dir + "/" + f)
shutil.move(temp_zip, dest_zip)
finally:
zf.close()
|
alex-dow/xbmc
|
addon_release.py
|
Python
|
mit
| 2,568
|
# -*- coding: utf-8 -*-
#
# Python Fedora Module documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 9 08:12:44 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os, re
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import pkgdb.release
# If your extensions are in another directory, add it here.
#sys.path.append(os.path.dirname(__file__))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = pkgdb.release.NAME
copyright = pkgdb.release.COPYRIGHT
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = pkgdb.release.VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps page names to templates.
#html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = pkgdb.release.URL
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sphinxdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Fedora Package Database.tex', 'Fedora Package Database Documentation', 'Toshio Kuratomi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
automodule_skip_lines = 4
|
fedora-infra/packagedb
|
docs/conf.py
|
Python
|
gpl-2.0
| 5,223
|
"""Support for Minut Point binary sensors."""
import logging
from homeassistant.components.binary_sensor import DOMAIN, BinarySensorDevice
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import MinutPointEntity
from .const import DOMAIN as POINT_DOMAIN, POINT_DISCOVERY_NEW, SIGNAL_WEBHOOK
_LOGGER = logging.getLogger(__name__)
EVENTS = {
"battery": ("battery_low", ""), # On means low, Off means normal
"button_press": ( # On means the button was pressed, Off means normal
"short_button_press",
"",
),
"cold": ( # On means cold, Off means normal
"temperature_low",
"temperature_risen_normal",
),
"connectivity": ( # On means connected, Off means disconnected
"device_online",
"device_offline",
),
"dry": ( # On means too dry, Off means normal
"humidity_low",
"humidity_risen_normal",
),
"heat": ( # On means hot, Off means normal
"temperature_high",
"temperature_dropped_normal",
),
"moisture": ( # On means wet, Off means dry
"humidity_high",
"humidity_dropped_normal",
),
"sound": ( # On means sound detected, Off means no sound (clear)
"avg_sound_high",
"sound_level_dropped_normal",
),
"tamper": ("tamper", ""), # On means the point was removed or attached
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a Point's binary sensors based on a config entry."""
async def async_discover_sensor(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[POINT_DOMAIN][config_entry.entry_id]
async_add_entities(
(
MinutPointBinarySensor(client, device_id, device_class)
for device_class in EVENTS
),
True,
)
async_dispatcher_connect(
hass, POINT_DISCOVERY_NEW.format(DOMAIN, POINT_DOMAIN), async_discover_sensor
)
class MinutPointBinarySensor(MinutPointEntity, BinarySensorDevice):
"""The platform class required by Home Assistant."""
def __init__(self, point_client, device_id, device_class):
"""Initialize the binary sensor."""
super().__init__(point_client, device_id, device_class)
self._async_unsub_hook_dispatcher_connect = None
self._events = EVENTS[device_class]
self._is_on = None
async def async_added_to_hass(self):
"""Call when entity is added to HOme Assistant."""
await super().async_added_to_hass()
self._async_unsub_hook_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_WEBHOOK, self._webhook_event
)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
await super().async_will_remove_from_hass()
if self._async_unsub_hook_dispatcher_connect:
self._async_unsub_hook_dispatcher_connect()
async def _update_callback(self):
"""Update the value of the sensor."""
if not self.is_updated:
return
if self._events[0] in self.device.ongoing_events:
self._is_on = True
else:
self._is_on = None
self.async_schedule_update_ha_state()
@callback
def _webhook_event(self, data, webhook):
"""Process new event from the webhook."""
if self.device.webhook != webhook:
return
_type = data.get("event", {}).get("type")
_device_id = data.get("event", {}).get("device_id")
if _type not in self._events or _device_id != self.device.device_id:
return
_LOGGER.debug("Received webhook: %s", _type)
if _type == self._events[0]:
self._is_on = True
if _type == self._events[1]:
self._is_on = None
self.async_schedule_update_ha_state()
@property
def is_on(self):
"""Return the state of the binary sensor."""
if self.device_class == "connectivity":
# connectivity is the other way around.
return not self._is_on
return self._is_on
|
fbradyirl/home-assistant
|
homeassistant/components/point/binary_sensor.py
|
Python
|
apache-2.0
| 4,219
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from github3.models import BaseComment
from github3.users import User
class IssueComment(BaseComment):
"""The :class:`IssueComment <IssueComment>` object. This structures and
handles the comments on issues specifically.
Two comment instances can be checked like so::
c1 == c2
c1 != c2
And is equivalent to::
c1.id == c2.id
c1.id != c2.id
See also: http://developer.github.com/v3/issues/comments/
"""
def __init__(self, comment, session=None):
super(IssueComment, self).__init__(comment, session)
user = comment.get('user')
#: :class:`User <github3.users.User>` who made the comment
self.user = User(user, self) if user else None
#: Issue url (not a template)
self.issue_url = comment.get('issue_url')
def _repr(self):
return '<Issue Comment [{0}]>'.format(self.user.login)
|
liresearchgroup/submtr
|
submtr/lib/github3/issues/comment.py
|
Python
|
mit
| 968
|
"""rippl URL Configuration"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from .registration.forms import RecaptchaRegView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/register/$', RecaptchaRegView.as_view()),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'go', TemplateView.as_view(template_name='app.html'), name='go'),
url(
r'^$',
TemplateView.as_view(template_name='mission_statement.html'),
name='mission',
),
url(
r'^privacy_terms',
TemplateView.as_view(template_name='tos.html'),
name='tos',
),
url(r'^legislature/', include('legislature.urls', namespace='leg')),
url(r'^rippl/', include('questing.urls', namespace='rippl')),
url(r'^laws/', include('bills.urls', namespace='laws')),
]
|
gnmerritt/dailyrippl
|
rippl/rippl/urls.py
|
Python
|
mit
| 911
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000011'
addresses_name = 'parl.2017-06-08/Version 1/Huntingdonshire Democracy_Club__08June2017.tsv'
stations_name = 'parl.2017-06-08/Version 1/Huntingdonshire Democracy_Club__08June2017.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
|
chris48s/UK-Polling-Stations
|
polling_stations/apps/data_collection/management/commands/import_huntingdonshire.py
|
Python
|
bsd-3-clause
| 416
|
"""
Example: parse URLs.
"""
from peglet import *
# N.B. The grammar at
# http://www.udacity.com/view#Course/cs212/CourseRev/apr2012/Unit/207010/Nugget/152008
# uses . for '.' and this accepts any character there, right? Looks like a bug.
# Another: ftpaddress's (; ftptype) should be optional.
# Another: port: digits should not have (| path)
# Another?: path: void | ... (wrong order for PEG parsing)
def maker(name):
return lambda value: (name, value)
mk_host = maker('host')
mk_path = maker('path')
mk_search = maker('search')
mk_fragment = maker('fragment')
mk_port = maker('port')
# Following http://www.w3.org/Addressing/URL/5_BNF.html
# but leaving out some of the alternatives for url.
# This clearly isn't the modern definition of an httpaddress
# (see the 'right=wrong' example below).
url_parse = Parser(r"""
start = url $
url = httpaddress | ftpaddress | mailtoaddress
ftpaddress = ftp:// login [/] path opt_ftptype
opt_ftptype = ; ftptype |
login = opt_userpass hostport
opt_userpass = user : password @
| user @
|
user = alphanum2 user
| alphanum2
password = alphanum2 password
| alphanum2
alphanum2 = alpha | digit | [-_.+]
ftptype = [AE] formcode
| [I]
| [L] digits
formcode = [NTC]
mailtoaddress = mailto: xalphas @ hostname
httpaddress = http:// hostport opt_path opt_search opt_fragment
opt_path = / path join mk_path |
opt_search = [?] search join mk_search |
opt_fragment = # fragmentid join mk_fragment |
hostport = host : port
| host
host = host_syntax join mk_host
host_syntax = hostname | hostnumber
hostname = ialpha ([.]) hostname
| ialpha
hostnumber = digits ([.]) digits ([.]) digits ([.]) digits
port = digits join mk_port
path = segment (/) path
| segment
|
segment = xpalphas
search = xalphas ([+]) search
| xalphas
fragmentid = xalphas
xalpha = alpha | digit | safe | extra | escape
xalphas = xalpha xalphas
| xalpha
xpalpha = xalpha | [+]
xpalphas = xpalpha xpalphas
| xpalpha
ialpha = alpha xalphas
| alpha
alpha = ([a-zA-Z])
digit = (\d)
safe = ([$_@.&+-])
extra = ([!*"'(),])
escape = (%) hex hex
hex = ([\dA-Fa-f])
digits = (\d+)
alphanum = alpha | digit
alphanums = alphanum alphanums
| alphanum
reserved = [=;/#?: ]
national = [{}|\[\]\\^~]
punctuation = [<>]
""", **globals())
## attempt(url_parse, 'true')
## attempt(url_parse, 'http://google.com')
#. (('host', 'google.com'),)
## url_parse('http://en.wikipedia.org/wiki/Uniform_resource_locator')
#. (('host', 'en.wikipedia.org'), ('path', 'wiki/Uniform_resource_locator'))
## attempt(url_parse, 'http://wry.me/fun/toys/yes.html?right=wrong#fraggle')
## url_parse('http://wry.me/fun/toys/yes.html?rightwrong#fraggle')
#. (('host', 'wry.me'), ('path', 'fun/toys/yes.html'), ('search', 'rightwrong'), ('fragment', 'fraggle'))
|
JaDogg/__py_playground
|
reference/peglet/examples/url.py
|
Python
|
mit
| 3,221
|
from __future__ import print_function, division
import os
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cpu,floatX=float32"
import theano
from theano import tensor
import numpy as np
import scipy as sp
import dnntoolkit
import unittest
import h5py
import lasagne
# ===========================================================================
# Model
# ===========================================================================
def test():
l_in = lasagne.layers.InputLayer(shape=(None, 28, 28))
l_in = lasagne.layers.FlattenLayer(l_in)
l_in = lasagne.layers.DropoutLayer(l_in, p=0.3)
l_hid = lasagne.layers.DenseLayer(l_in, num_units=128)
l_hid = lasagne.layers.DropoutLayer(l_hid, p=0.3)
l_out = lasagne.layers.DenseLayer(l_hid, num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ===========================================================================
# Test
# ===========================================================================
ds = dnntoolkit.dataset.load_mnist()
print(ds)
m = dnntoolkit.model('/volumes/backup/model/mnist.ai')
m.set_model(test, 'lasagne')
ai = m.create_model()
input_var = [i.input_var
for i in lasagne.layers.find_layers(ai, types=lasagne.layers.InputLayer)]
y = tensor.matrix('y')
y_pred_stoch = lasagne.layers.get_output(ai, deterministic=False)
y_pred_deter = lasagne.layers.get_output(ai, deterministic=True)
cost_monitor = lasagne.objectives.categorical_accuracy(y_pred_deter, y).mean()
cost_train = lasagne.objectives.categorical_crossentropy(y_pred_stoch, y).mean()
regu = lasagne.regularization.L1L2(l2=dnntoolkit.dnn.calc_weights_decay(m.get_nweights()))
cost_regu = regu.apply_network(ai)
cost_train += cost_regu
params = lasagne.layers.get_all_params(ai, trainable=True)
grad = tensor.grad(cost_train, params)
print('Suggest learning rate: %f' % dnntoolkit.dnn.calc_lr(m.get_nweights(), m.get_nlayers()))
lr = dnntoolkit.tensor.shared_scalar(
dnntoolkit.dnn.calc_lr(m.get_nweights(), m.get_nlayers()))
updates = lasagne.updates.rmsprop(grad, params,
learning_rate=lr)
f_cost = theano.function(
inputs=input_var + [y],
outputs=cost_monitor,
allow_input_downcast=True)
f_update = theano.function(
inputs=input_var + [y],
outputs=cost_train,
updates=updates,
allow_input_downcast=True)
# ===========================================================================
# Callback
# ===========================================================================
# => valid Stats: Mean:0.9354 Var:0.00 Med:0.94 Min:0.91 Max:0.97
stopAllow = 3
def validend(trainer):
m.record(np.mean(trainer.cost), 'validend')
cost = [1. - i for i in m.select('validend')]
save, stop = dnntoolkit.dnn.earlystop(cost, generalization_loss=True, threshold=3)
if save:
print('\nSaving !!!')
m.save()
if stop:
global stopAllow
if stopAllow > 0:
print('\nDecreasing lr !!!')
lr.set_value(lr.get_value() / 2)
# m.rollback()
else:
print('\nStopping !!!')
trainer.stop()
stopAllow -= 1
# ===========================================================================
# TRainer
# ===========================================================================
Xcross = np.random.rand(1000, 28, 28)
ycross = dnntoolkit.tensor.to_categorical(np.random.randint(0, 10, size=1000))
def cross_it(*args):
# print('**cross_it**')
idx = range(1000)
for i, j in zip(idx, idx[1:]):
yield Xcross[i:j], ycross[i:j]
def Xcross_it(*args):
# print('**IT**')
idx = range(1000)
for i, j in zip(idx, idx[1:]):
yield Xcross[i:j]
def ycross_it(*args):
idx = range(1000)
for i, j in zip(idx, idx[1:]):
yield ycross[i:j]
def Xcross_it_new(size, shuffle, seed, mode):
# print('IT new')
np.random.seed(seed)
batches = dnntoolkit.mpi.segment_job(range(1000), int(1000 / size))
np.random.shuffle(batches)
for i in batches:
yield Xcross[i[0]:i[-1]]
def ycross_it_new(size, shuffle, seed, mode):
np.random.seed(seed)
batches = dnntoolkit.mpi.segment_job(range(1000), int(1000 / size))
np.random.shuffle(batches)
for i in batches:
yield ycross[i[0]:i[-1]]
trainer = dnntoolkit.trainer()
trainer.set_callback(valid_end=validend)
trainer.set_dataset(ds,
valid=['X_valid', ds['y_valid'].value],
test=['X_test', 'y_test'],
cross=[cross_it],
pcross=0.1
)
trainer.set_model(f_cost, f_update)
trainer.set_strategy(
task='train',
epoch=100,
batch=128,
validfreq=0.6,
shuffle=True,
data=[ds['X_train'].value, ds['y_train']],
seed=12082518,
# cross=[Xcross_it, ycross_it],
pcross=0.2).set_strategy(
task='test',
batch=128)
print(trainer)
trainer.run()
|
trungnt13/dnntoolkit
|
tests/mnist_crosstraining.py
|
Python
|
apache-2.0
| 4,825
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from .forms import (
AdminUserCreationForm,
AdminUserChangeForm
)
User = get_user_model()
def ban_users(modeladmin, news, queryset):
queryset.update(is_banned=True)
ban_users.short_description = "Ban selected Users"
class UserAdmin(BaseUserAdmin):
form = AdminUserChangeForm
add_form = AdminUserCreationForm
list_display = ['username', 'email', 'date_joined', 'last_login', 'is_banned', 'is_admin']
list_filter = ['last_login', 'date_joined', 'is_banned', 'is_admin']
fieldsets = (
(None, {'fields': ('username', 'email', 'password')}),
('Permissions', {'fields': ('is_banned','is_admin')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ( 'username', 'email', 'password1', 'password2')}
),
)
search_fields = ['username', 'email']
actions = [ban_users]
ordering = ['username']
filter_horizontal = ()
admin.site.register(User, UserAdmin)
admin.site.unregister(Group)
|
kasper190/SPAforum
|
accounts/admin.py
|
Python
|
mit
| 1,194
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sistema', '0010_deuda_sociodeuda'),
]
operations = [
migrations.AddField(
model_name='lote',
name='socio',
field=models.ForeignKey(default=1, to='sistema.Socio'),
preserve_default=False,
),
]
|
gabrielf10/webAmpunc
|
sistema/migrations/0011_lote_socio.py
|
Python
|
bsd-3-clause
| 448
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.