repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
biocore/qiime | refs/heads/master | scripts/split_libraries_fastq.py | 11 | #!/usr/bin/env python
# File created on 07 Jun 2011
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso", "Emily TerAvest", "Yoshiki Vazquez Baeza",
"Rob Knight"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from os import rename
from skbio.util import safe_md5, create_dir
from skbio.sequence import DNA
from skbio.format.sequences import format_fastq_record
from qiime.util import parse_command_line_parameters, make_option, gzip_open
from qiime.parse import parse_mapping_file, parse_items
from qiime.split_libraries_fastq import (process_fastq_single_end_read_file,
BARCODE_DECODER_LOOKUP, process_fastq_single_end_read_file_no_barcode)
from qiime.split_libraries import check_map
from qiime.split_libraries_fastq import get_illumina_qual_chars
from qiime.golay import get_invalid_golay_barcodes
script_info = {}
script_info['brief_description'] = ("This script performs demultiplexing of "
"Fastq sequence data where barcodes and sequences are contained in two "
"separate fastq files (common on Illumina runs).")
script_info['script_description'] = ""
script_info['script_usage'] = []
script_info['script_usage'].append(("Demultiplex and quality filter "
"(at Phred >= Q20) one lane of Illumina fastq data and write results to "
"./slout_q20.", "", "%prog -i lane1_read1.fastq.gz "
"-b lane1_barcode.fastq.gz --rev_comp_mapping_barcodes -o slout_q20/ "
"-m map.txt -q 19"))
script_info['script_usage'].append(("Demultiplex and quality filter "
"(at Phred >= Q20) one lane of Illumina fastq data and write results to "
"./slout_q20. Store trimmed quality scores in addition to sequence data.",
"", "%prog -i lane1_read1.fastq.gz -b lane1_barcode.fastq.gz "
"--rev_comp_mapping_barcodes -o slout_q20/ -m map.txt "
"--store_qual_scores -q 19"))
script_info['script_usage'].append(("Demultiplex and quality filter "
"(at Phred >= Q20) two lanes of Illumina fastq data and write results to "
"./slout_q20.", "", "%prog -i lane1_read1.fastq.gz,lane2_read1.fastq.gz "
"-b lane1_barcode.fastq.gz,lane2_barcode.fastq.gz "
"--rev_comp_mapping_barcodes -o slout_q20/ -m map.txt,map.txt "
"--store_qual_scores -q 19"))
script_info['script_usage'].append(("Quality filter (at Phred >= Q20) one "
"non-multiplexed lane of Illumina fastq data and write results "
"to ./slout_single_sample_q20.", "", "%prog -i lane1_read1.fastq.gz "
"--sample_ids my.sample.1 -o slout_single_sample_q20/ "
"-q 19 --barcode_type 'not-barcoded'"))
script_info['script_usage'].append(("Quality filter (at Phred >= Q20) two "
"non-multiplexed lanes of Illumina fastq data with different samples in "
"each and write results to ./slout_not_multiplexed_q20.", "",
"%prog -i lane1_read1.fastq.gz,lane2_read1.fastq.gz "
"--sample_ids my.sample.1,my.sample.2 -o slout_not_multiplexed_q20/ "
"-q 19 --barcode_type 'not-barcoded'"))
script_info['output_description'] = ""
script_info['required_options'] = [
make_option('-i', '--sequence_read_fps', type="existing_filepaths",
help='the sequence read fastq files (comma-separated if more than '
'one)'),
make_option('-o', '--output_dir', type="new_dirpath", help='directory to '
'store output files'),
]
script_info['optional_options'] = [
make_option('-m', '--mapping_fps', type="existing_filepaths",
help='metadata mapping files (comma-separated if more than'
' one) [default: %default]', default=None),
make_option('-b', '--barcode_read_fps', type="existing_filepaths",
default=None, help='the barcode read fastq files (comma-separated '
'if more than one) [default: %default]'),
make_option("--store_qual_scores", default=False, action='store_true',
help='store qual strings in .qual files [default: %default]'),
make_option("--sample_ids", default=None, help='comma-separated list of '
'samples ids to be applied to all sequences, must be one per input '
'file path (used when data is not multiplexed) [default: %default]'),
make_option("--store_demultiplexed_fastq", default=False,
action='store_true', help='write demultiplexed fastq files '
'[default: %default]'),
make_option("--retain_unassigned_reads", default=False,
action='store_true', help="retain sequences which don't map to a "
'barcode in the mapping file (sample ID will be "Unassigned") '
'[default: %default]'),
make_option('-r', '--max_bad_run_length', type='int', help='max number '
'of consecutive low quality base calls allowed before truncating a '
'read [default: %default]', default=3),
make_option('-p', '--min_per_read_length_fraction', type='float',
default=0.75, help='min number of consecutive high quality base calls '
'to include a read (per single end read) as a fraction of the input '
'read length [default: %default]'),
make_option('-n', '--sequence_max_n', type='int', help='maximum number '
'of N characters allowed in a sequence to retain it -- this is '
'applied after quality trimming, and is total over combined paired '
'end reads if applicable [default: %default]', default=0),
make_option('-s', '--start_seq_id', type='int', help='start seq_ids as '
'ascending integers beginning with start_seq_id [default: %default]',
default=0),
make_option('--rev_comp_barcode', action='store_true', help='reverse '
'complement barcode reads before lookup [default: %default]',
default=False),
make_option('--rev_comp_mapping_barcodes', action='store_true',
help='reverse complement barcode in mapping before lookup (useful if '
'barcodes in mapping file are reverse complements of golay codes) '
'[default: %default]', default=False),
make_option('--rev_comp', action='store_true', help='reverse complement '
'sequence before writing to output file (useful for reverse-'
'orientation reads) [default: %default]', default=False),
make_option('-q', '--phred_quality_threshold', type='int', help='the '
'maximum unacceptable Phred quality score (e.g., for Q20 and better, '
'specify -q 19) [default: %default]', default=3),
make_option('--last_bad_quality_char', help='DEPRECATED: use -q instead. '
'This method of setting is not robust to different versions of '
'CASAVA.'),
make_option('--barcode_type', type='string', help='The type of barcode '
'used. This can be an integer, e.g. for length 6 barcodes, or '
'"golay_12" for golay error-correcting barcodes. Error correction will '
'only be applied for "golay_12" barcodes. If data is not barcoded, pass '
'"not-barcoded". [default: %default]',
default='golay_12'),
make_option('--max_barcode_errors', default=1.5, type='float',
help='maximum number of errors in barcode [default: %default]'),
make_option('--phred_offset', default=None, type="choice",
choices=['33', '64'], help="the ascii offset to use when "
"decoding phred scores (either 33 or 64). Warning: in most "
"cases you don't need to pass this value "
"[default: determined automatically]"),
make_option('--read_arguments_from_file', default=False,
action='store_true', help='If this flag is enabled, then the '
'inputs to "-i" or "--sequence_read_fps", "-b" or '
'"--barcode_read_fps", "-m" or "--mapping_fps" and '
'"--sample_ids" will each be interpreted as a single text file'
', where the contents are one file-path or sample identifier '
'per line (depending on the flag). NOTE: In most cases regular'
' users don\'t need to use this flag, as it is intended for '
'use in multiple_split_libraries_fastq.py [default: %default]')
# NEED TO FIX THIS FUNCTIONALITY - CURRENTLY READING THE WRONG FIELD
# make_option('--filter_bad_illumina_qual_digit',
# action='store_true',
# help='filter sequences which are tagged as not passing the Illumina'+
# ' quality filter [default: %default]',
# default=False),
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
read_arguments_from_file = opts.read_arguments_from_file
# these arguments can optionally be read from a file, reasoning is to
# allow arguments that would span over hundreds of samples and would be
# prohibitive to execute as a command line call
if read_arguments_from_file:
# sample_ids is the only one of these arguments that's returned as a
# string, the rest of them are lists
if opts.sample_ids:
opts.sample_ids = ','.join(parse_items(opts.sample_ids))
if opts.sequence_read_fps:
opts.sequence_read_fps = parse_items(opts.sequence_read_fps[0])
if opts.barcode_read_fps:
opts.barcode_read_fps = parse_items(opts.barcode_read_fps[0])
if opts.mapping_fps:
opts.mapping_fps = parse_items(opts.mapping_fps[0])
sequence_read_fps = opts.sequence_read_fps
barcode_read_fps = opts.barcode_read_fps
sample_ids = None
if opts.sample_ids is not None:
sample_ids = opts.sample_ids.split(',')
mapping_fps = opts.mapping_fps
phred_quality_threshold = opts.phred_quality_threshold
retain_unassigned_reads = opts.retain_unassigned_reads
min_per_read_length_fraction = opts.min_per_read_length_fraction
max_bad_run_length = opts.max_bad_run_length
rev_comp = opts.rev_comp
rev_comp_barcode = opts.rev_comp_barcode
rev_comp_mapping_barcodes = opts.rev_comp_mapping_barcodes
seq_max_N = opts.sequence_max_n
start_seq_id = opts.start_seq_id
# NEED TO FIX THIS FUNCTIONALITY - CURRENTLY READING THE WRONG FIELD
# opts.filter_bad_illumina_qual_digit
filter_bad_illumina_qual_digit = False
store_qual_scores = opts.store_qual_scores
store_demultiplexed_fastq = opts.store_demultiplexed_fastq
barcode_type = opts.barcode_type
max_barcode_errors = opts.max_barcode_errors
# if this is not a demultiplexed run,
if barcode_type == 'not-barcoded':
if sample_ids is None:
option_parser.error("If not providing barcode reads (because "
"your data is not multiplexed), must provide --sample_ids.")
if len(sample_ids) != len(sequence_read_fps):
option_parser.error("If providing --sample_ids (because "
"your data is not multiplexed), must provide the same number "
"of sample ids as sequence read filepaths.")
barcode_read_fps = [None] * len(sequence_read_fps)
mapping_fps = [None] * len(sequence_read_fps)
elif barcode_read_fps is None:
option_parser.error("Must provide --barcode_read_fps if "
"--barcode_type is not 'not-barcoded'")
elif mapping_fps is None:
option_parser.error("Must provide --mapping_fps if "
"--barcode_type is not 'not-barcoded'")
phred_offset = opts.phred_offset
if phred_offset is not None:
try:
phred_offset = int(phred_offset)
except ValueError:
# shouldn't be able to get here...
option_parser.error(
"If --phred_offset is provided, it must be a valid integer.")
if opts.last_bad_quality_char is not None:
option_parser.error('--last_bad_quality_char is no longer supported. '
'Use -q instead (see option help text by passing -h)')
if not (0 < min_per_read_length_fraction <= 1):
option_parser.error('--min_per_read_length_fraction must be greater '
'than 0 and less than or equal to 1. You passed '
'%1.5f.' % min_per_read_length_fraction)
barcode_correction_fn = BARCODE_DECODER_LOOKUP.get(barcode_type, None)
if len(mapping_fps) == 1 and len(sequence_read_fps) > 1:
mapping_fps = mapping_fps * len(sequence_read_fps)
if len(set([len(sequence_read_fps), len(barcode_read_fps),
len(mapping_fps)])) > 1:
option_parser.error("Same number of sequence, barcode, and mapping "
"files must be provided.")
output_dir = opts.output_dir
create_dir(output_dir)
output_fp_temp = '%s/seqs.fna.incomplete' % output_dir
output_fp = '%s/seqs.fna' % output_dir
output_f = open(output_fp_temp, 'w')
qual_fp_temp = '%s/qual.fna.incomplete' % output_dir
qual_fp = '%s/seqs.qual' % output_dir
output_fastq_fp_temp = '%s/seqs.fastq.incomplete' % output_dir
output_fastq_fp = '%s/seqs.fastq' % output_dir
if store_qual_scores:
qual_f = open(qual_fp_temp, 'w')
# define a qual writer whether we're storing
# qual strings or not so we don't have to check
# every time through the for loop below
def qual_writer(h, q):
qual_f.write('>%s\n%s\n' % (h, q))
else:
def qual_writer(h, q):
pass
if store_demultiplexed_fastq:
output_fastq_f = open(output_fastq_fp_temp, 'w')
# define a fastq writer whether we're storing
# qual strings or not so we don't have to check
# every time through the for loop below
def fastq_writer(h, s, q):
output_fastq_f.write(format_fastq_record(h, s, q))
else:
def fastq_writer(h, s, q):
pass
log_fp = '%s/split_library_log.txt' % output_dir
log_f = open(log_fp, 'w')
histogram_fp = '%s/histograms.txt' % output_dir
histogram_f = open(histogram_fp, 'w')
for i in range(len(sequence_read_fps)):
sequence_read_fp = sequence_read_fps[i]
barcode_read_fp = barcode_read_fps[i]
mapping_fp = mapping_fps[i]
if mapping_fp is not None:
mapping_f = open(mapping_fp, 'U')
_, _, barcode_to_sample_id, _, _, _, _ = check_map(mapping_f,
disable_primer_check=True,
has_barcodes=barcode_read_fp is not None)
else:
mapping_f = None
barcode_to_sample_id = {}
if rev_comp_mapping_barcodes:
barcode_to_sample_id = {str(DNA(k).rc()): v for k, v in
barcode_to_sample_id.iteritems()}
if barcode_type == 'golay_12':
invalid_golay_barcodes = get_invalid_golay_barcodes(
barcode_to_sample_id.keys())
if len(invalid_golay_barcodes) > 0:
option_parser.error("Some or all barcodes are not valid golay "
"codes. Do they need to be reverse complemented? If these "
"are not golay barcodes pass --barcode_type 12 to disable "
"barcode error correction, or pass --barcode_type # if "
"the barcodes are not 12 base pairs, where # is the size "
"of the barcodes. Invalid codes:\n\t%s" %
' '.join(invalid_golay_barcodes))
log_f.write("Input file paths\n")
if mapping_fp is not None:
log_f.write('Mapping filepath: %s (md5: %s)\n' %
(mapping_fp, safe_md5(open(mapping_fp)).hexdigest()))
log_f.write('Sequence read filepath: %s (md5: %s)\n' %
(sequence_read_fp,
str(safe_md5(open(sequence_read_fp)).hexdigest())))
if sequence_read_fp.endswith('.gz'):
sequence_read_f = gzip_open(sequence_read_fp)
else:
sequence_read_f = open(sequence_read_fp, 'U')
seq_id = start_seq_id
if barcode_read_fp is not None:
log_f.write('Barcode read filepath: %s (md5: %s)\n\n' %
(barcode_read_fp,
safe_md5(open(barcode_read_fp)).hexdigest()))
if barcode_read_fp.endswith('.gz'):
barcode_read_f = gzip_open(barcode_read_fp)
else:
barcode_read_f = open(barcode_read_fp, 'U')
seq_generator = process_fastq_single_end_read_file(
sequence_read_f, barcode_read_f, barcode_to_sample_id,
store_unassigned=retain_unassigned_reads,
max_bad_run_length=max_bad_run_length,
phred_quality_threshold=phred_quality_threshold,
min_per_read_length_fraction=min_per_read_length_fraction,
rev_comp=rev_comp, rev_comp_barcode=rev_comp_barcode,
seq_max_N=seq_max_N, start_seq_id=start_seq_id,
filter_bad_illumina_qual_digit=filter_bad_illumina_qual_digit,
log_f=log_f, histogram_f=histogram_f,
barcode_correction_fn=barcode_correction_fn,
max_barcode_errors=max_barcode_errors,
phred_offset=phred_offset)
else:
seq_generator = process_fastq_single_end_read_file_no_barcode(
sequence_read_f, sample_ids[i],
store_unassigned=retain_unassigned_reads,
max_bad_run_length=max_bad_run_length,
phred_quality_threshold=phred_quality_threshold,
min_per_read_length_fraction=min_per_read_length_fraction,
rev_comp=rev_comp, seq_max_N=seq_max_N,
start_seq_id=start_seq_id,
filter_bad_illumina_qual_digit=filter_bad_illumina_qual_digit,
log_f=log_f, histogram_f=histogram_f,
phred_offset=phred_offset)
for fasta_header, sequence, quality, seq_id in seq_generator:
output_f.write('>%s\n%s\n' % (fasta_header, sequence))
qual_writer(fasta_header, quality)
fastq_writer(fasta_header, sequence, quality)
start_seq_id = seq_id + 1
log_f.write('\n---\n\n')
output_f.close()
rename(output_fp_temp, output_fp)
# process the optional output files, as necessary
if store_qual_scores:
qual_f.close()
rename(qual_fp_temp, qual_fp)
if store_demultiplexed_fastq:
output_fastq_f.close()
rename(output_fastq_fp_temp, output_fastq_fp)
if __name__ == "__main__":
main()
|
mariusbaumann/pyload | refs/heads/stable | module/plugins/hoster/DdlstorageCom.py | 5 | # -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
class DdlstorageCom(DeadHoster):
__name__ = "DdlstorageCom"
__type__ = "hoster"
__version__ = "1.02"
__pattern__ = r'https?://(?:www\.)?ddlstorage\.com/\w+'
__description__ = """DDLStorage.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
("stickell", "l.stickell@yahoo.it")]
getInfo = create_getInfo(DdlstorageCom)
|
zhaogaolong/oneFinger | refs/heads/master | openstack/test/log.py | 1 | #!/usr/bin/env python
# coding:utf8
import logging
# logging.basicConfig(level=logging.DEBUG,
# format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
# datefmt='%a, %d %b %Y %H:%M:%S',
# filename='myapp.log',
# filemode='w')
logging.debug('This is debug message')
logging.info('This is info message')
logging.warning('This is warning message')
logger = logging.getLogger('one_finger')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('spam.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
# 'application' code
logger.debug('debug message')
logger.info('info message')
logger.warn('warn message')
logger.error('error message')
logger.critical('critical message')
logger = logging.getLogger("test_user1") #写一个用户名,对应下面的formatter中的name
logger.setLevel(logging.DEBUG) #全局日志级别
#屏幕输出
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING) #屏幕输出的日志界别
#写目志
fh = logging.FileHandler("log2.log")
fh.setLevel(logging.DEBUG) #写入日志的级别
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter) #应用屏幕格式
fh.setFormatter(formatter) # 应用文 件格式
#把屏幕和file 句柄交给logger接口执行
logger.addHandler(ch) #输出日志
logger.addHandler(fh) #输出日志
|
goldsborough/.emacs | refs/heads/master | .emacs.d/.python-environments/default/lib/python3.5/site-packages/setuptools/ssl_support.py | 100 | import os
import socket
import atexit
import re
from setuptools.extern.six.moves import urllib, http_client, map
import pkg_resources
from pkg_resources import ResolutionError, ExtractionError
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
/usr/local/share/certs/ca-root-nss.crt
""".strip().split()
try:
HTTPSHandler = urllib.request.HTTPSHandler
HTTPSConnection = http_client.HTTPSConnection
except AttributeError:
HTTPSHandler = HTTPSConnection = object
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
# change self.host to mean the proxy server host when tunneling is
# being used. Adapt, since we are interested in the destination
# host for the match_hostname() comparison.
actual_host = self._tunnel_host
else:
actual_host = self.host
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), actual_host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib.request.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
_wincerts = None
def get_win_certfile():
global _wincerts
if _wincerts is not None:
return _wincerts.name
try:
from wincertstore import CertFile
except ImportError:
return None
class MyCertFile(CertFile):
def __init__(self, stores=(), certs=()):
CertFile.__init__(self)
for store in stores:
self.addstore(store)
self.addcerts(certs)
atexit.register(self.close)
def close(self):
try:
super(MyCertFile, self).close()
except OSError:
pass
_wincerts = MyCertFile(stores=['CA', 'ROOT'])
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
if os.name=='nt':
return get_win_certfile()
else:
for cert_path in cert_paths:
if os.path.isfile(cert_path):
return cert_path
try:
return pkg_resources.resource_filename('certifi', 'cacert.pem')
except (ImportError, ResolutionError, ExtractionError):
return None
|
drwyrm/Flexget | refs/heads/develop | flexget/plugins/sites/rarbg.py | 1 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.requests import Session, get, TimedLimiter, RequestException
from flexget.utils.search import normalize_scene
log = logging.getLogger('rarbg')
requests = Session()
requests.add_domain_limiter(TimedLimiter('torrentapi.org', '3 seconds')) # they only allow 1 request per 2 seconds
CATEGORIES = {
'all': 0,
# Movies
'x264': 17,
'x264 720p': 45,
'x264 1080p': 44,
'x264 3D': 47,
'XviD': 14,
'XviD 720p': 48,
'Full BD': 42,
# TV
'HDTV': 41,
'SDTV': 18,
# Adult
'XXX': 4,
# Music
'MusicMP3': 23,
'MusicFLAC': 25,
# Games
'Games/PC ISO': 27,
'Games/PC RIP': 28,
'Games/PS3': 40,
'Games/XBOX-360': 32,
'Software/PC ISO': 33,
# E-Books
'e-Books': 35
}
class SearchRarBG(object):
"""
RarBG search plugin.
To perform search against single category:
rarbg:
category: x264 720p
To perform search against multiple categories:
rarbg:
category:
- x264 720p
- x264 1080p
Movie categories accepted: x264 720p, x264 1080p, XviD, Full BD
TV categories accepted: HDTV, SDTV
You can use also use category ID manually if you so desire (eg. x264 720p is actually category id '45')
"""
schema = {
'type': 'object',
'properties': {
'category': one_or_more({
'oneOf': [
{'type': 'integer'},
{'type': 'string', 'enum': list(CATEGORIES)},
]}),
'sorted_by': {'type': 'string', 'enum': ['seeders', 'leechers', 'last'], 'default': 'last'},
# min_seeders and min_leechers seem to be working again
'min_seeders': {'type': 'integer', 'default': 0},
'min_leechers': {'type': 'integer', 'default': 0},
'limit': {'type': 'integer', 'enum': [25, 50, 100], 'default': 25},
'ranked': {'type': 'boolean', 'default': True},
'use_tvdb': {'type': 'boolean', 'default': False},
},
"additionalProperties": False
}
base_url = 'https://torrentapi.org/pubapi_v2.php'
def get_token(self):
# Don't use a session as tokens are not affected by domain limit
try:
r = get(self.base_url, params={'get_token': 'get_token', 'format': 'json'}).json()
token = r.get('token')
log.debug('RarBG token: %s' % token)
return token
except RequestException as e:
log.debug('Could not retrieve RarBG token: %s', e.args[0])
@plugin.internet(log)
def search(self, task, entry, config):
"""
Search for entries on RarBG
"""
categories = config.get('category', 'all')
# Ensure categories a list
if not isinstance(categories, list):
categories = [categories]
# Convert named category to its respective category id number
categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
category_url_fragment = ';'.join(str(c) for c in categories)
entries = set()
token = self.get_token()
if not token:
log.error('Could not retrieve token. Abandoning search.')
return entries
params = {'mode': 'search', 'token': token, 'ranked': int(config['ranked']),
'min_seeders': config['min_seeders'], 'min_leechers': config['min_leechers'],
'sort': config['sorted_by'], 'category': category_url_fragment, 'format': 'json_extended',
'app_id': 'flexget'}
for search_string in entry.get('search_strings', [entry['title']]):
params.pop('search_string', None)
params.pop('search_imdb', None)
params.pop('search_tvdb', None)
if entry.get('movie_name'):
params['search_imdb'] = entry.get('imdb_id')
else:
query = normalize_scene(search_string)
query_url_fragment = query.encode('utf8')
params['search_string'] = query_url_fragment
if config['use_tvdb']:
plugin.get_plugin_by_name('thetvdb_lookup').instance.lazy_series_lookup(entry, 'en')
params['search_tvdb'] = entry.get('tvdb_id')
log.debug('Using tvdb id %s', entry.get('tvdb_id'))
try:
page = requests.get(self.base_url, params=params)
log.debug('requesting: %s', page.url)
except RequestException as e:
log.error('RarBG request failed: %s' % e.args[0])
continue
r = page.json()
# error code 10 and 20 just mean no results were found
if r.get('error_code') in [10, 20]:
searched_string = params.get('search_string') or 'imdb={0}'.format(params.get('search_imdb')) or \
'tvdb={0}'.format(params.get('tvdb_id'))
log.debug('No results found for %s. Message from rarbg: %s', searched_string, r.get('error'))
continue
elif r.get('error'):
log.error('Error code %s: %s', r.get('error_code'), r.get('error'))
continue
else:
for result in r.get('torrent_results'):
e = Entry()
e['title'] = result.get('title')
e['url'] = result.get('download')
e['torrent_seeds'] = int(result.get('seeders'))
e['torrent_leeches'] = int(result.get('leechers'))
e['content_size'] = int(result.get('size')) / 1024 / 1024
episode_info = result.get('episode_info')
if episode_info:
e['imdb_id'] = episode_info.get('imdb')
e['tvdb_id'] = episode_info.get('tvdb')
e['tvrage_id'] = episode_info.get('tvrage')
entries.add(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchRarBG, 'rarbg', groups=['search'], api_ver=2)
|
mckerrj/ansible | refs/heads/devel | contrib/inventory/spacewalk.py | 16 | #!/usr/bin/env python
"""
Spacewalk external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is dependent upon the spacealk-reports package being installed
on the same machine. It is basically a CSV-to-JSON converter from the
output of "spacewalk-report system-groups-systems|inventory".
Tested with Ansible 1.9.2 and spacewalk 2.3
"""
#
# Author:: Jon Miller <jonEbird@gmail.com>
# Copyright:: Copyright (c) 2013, Jon Miller
#
# Extended for support of multiple organizations and
# adding the "_meta" dictionary to --list output by
# Bernhard Lichtinger <bernhard.lichtinger@lrz.de> 2015
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import sys
import os
import time
from optparse import OptionParser
import subprocess
import ConfigParser
from six import iteritems
try:
import json
except:
import simplejson as json
base_dir = os.path.dirname(os.path.realpath(__file__))
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
INI_FILE = os.path.join(base_dir, "spacewalk.ini")
# Sanity check
if not os.path.exists(SW_REPORT):
print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr)
sys.exit(1)
# Pre-startup work
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
os.chmod(CACHE_DIR, 0o2775)
# Helper functions
#------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
spacewalk-report
"""
cache_filename = os.path.join(CACHE_DIR, name)
if not os.path.exists(cache_filename) or \
(time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
# Update the cache
fh = open(cache_filename, 'w')
p = subprocess.Popen([SW_REPORT, name], stdout=fh)
p.wait()
fh.close()
lines = open(cache_filename, 'r').readlines()
keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys
keys = [ 'spacewalk_' + key for key in keys ]
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
yield dict(zip(keys, values))
# Options
#------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
parser.add_option('-H', '--human', dest="human",
default=False, action="store_true",
help="Produce a friendlier version of either server list or host detail")
parser.add_option('-o', '--org', default=None, dest="org_number",
help="Limit to spacewalk organization number")
parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
help="Prefix the group name with the organization number")
(options, args) = parser.parse_args()
# read spacewalk.ini if present
#------------------------------
if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser()
config.read(INI_FILE)
if config.has_option('spacewalk' , 'cache_age'):
CACHE_AGE = config.get('spacewalk' , 'cache_age')
if not options.org_number and config.has_option('spacewalk' , 'org_number'):
options.org_number = config.get('spacewalk' , 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk' , 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk' , 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id
#------------------------------
org_groups = {}
try:
for group in spacewalk_report('system-groups'):
org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
except (OSError) as e:
print('Problem executing the command "%s system-groups": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
# List out the known server from Spacewalk
#------------------------------
if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {}
try:
for item in spacewalk_report('inventory'):
host_vars[ item['spacewalk_profile_name'] ] = dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in item.items() )
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
groups = {}
meta = { "hostvars" : {} }
try:
for system in spacewalk_report('system-groups-systems'):
# first get org_id of system
org_id = org_groups[ system['spacewalk_group_id'] ]
# shall we add the org_id as prefix to the group name:
if options.prefix_org_name:
prefix = org_id + "-"
group_name = prefix + system['spacewalk_group_name']
else:
group_name = system['spacewalk_group_name']
# if we are limited to one organization:
if options.org_number:
if org_id == options.org_number:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
# or we list all groups and systems:
else:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
except (OSError) as e:
print('Problem executing the command "%s system-groups-systems": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
for group, systems in iteritems(groups):
print('[%s]\n%s\n' % (group, '\n'.join(systems)))
else:
final = dict( [ (k, list(s)) for k, s in iteritems(groups) ] )
final["_meta"] = meta
print(json.dumps( final ))
#print(json.dumps(groups))
sys.exit(0)
# Return a details information concerning the spacewalk server
#------------------------------
elif options.host:
host_details = {}
try:
for system in spacewalk_report('inventory'):
if system['spacewalk_hostname'] == options.host:
host_details = system
break
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
(SW_REPORT, str(e)), file=sys.stderr)
sys.exit(2)
if options.human:
print('Host: %s' % options.host)
for k, v in iteritems(host_details):
print(' %s: %s' % (k, '\n '.join(v.split(';'))))
else:
print( json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) ) )
sys.exit(0)
else:
parser.print_help()
sys.exit(1)
|
amith01994/intellij-community | refs/heads/master | python/helpers/pydev/_pydev_getopt.py | 108 |
#=======================================================================================================================
# getopt code copied since gnu_getopt is not available on jython 2.1
#=======================================================================================================================
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
def gnu_getopt(args, shortopts, longopts=[]):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if type('') == type(longopts):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
all_options_first = False
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i + 1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i + 1)
raise GetoptError('option -%s not recognized' % opt, opt)
#=======================================================================================================================
# End getopt code
#=======================================================================================================================
|
coolsee/node-gyp | refs/heads/master | gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
RuudBurger/CouchPotatoV1 | refs/heads/master | cherrypy/_cptree.py | 82 | """CherryPy Application and Tree objects."""
import os
import sys
import cherrypy
from cherrypy._cpcompat import ntou, py3k
from cherrypy import _cpconfig, _cplogging, _cprequest, _cpwsgi, tools
from cherrypy.lib import httputil
class Application(object):
"""A CherryPy Application.
Servers and gateways should not instantiate Request objects directly.
Instead, they should ask an Application object for a request object.
An instance of this class may also be used as a WSGI callable
(WSGI application object) for itself.
"""
root = None
"""The top-most container of page handlers for this app. Handlers should
be arranged in a hierarchy of attributes, matching the expected URI
hierarchy; the default dispatcher then searches this hierarchy for a
matching handler. When using a dispatcher other than the default,
this value may be None."""
config = {}
"""A dict of {path: pathconf} pairs, where 'pathconf' is itself a dict
of {key: value} pairs."""
namespaces = _cpconfig.NamespaceSet()
toolboxes = {'tools': cherrypy.tools}
log = None
"""A LogManager instance. See _cplogging."""
wsgiapp = None
"""A CPWSGIApp instance. See _cpwsgi."""
request_class = _cprequest.Request
response_class = _cprequest.Response
relative_urls = False
def __init__(self, root, script_name="", config=None):
self.log = _cplogging.LogManager(id(self), cherrypy.log.logger_root)
self.root = root
self.script_name = script_name
self.wsgiapp = _cpwsgi.CPWSGIApp(self)
self.namespaces = self.namespaces.copy()
self.namespaces["log"] = lambda k, v: setattr(self.log, k, v)
self.namespaces["wsgi"] = self.wsgiapp.namespace_handler
self.config = self.__class__.config.copy()
if config:
self.merge(config)
def __repr__(self):
return "%s.%s(%r, %r)" % (self.__module__, self.__class__.__name__,
self.root, self.script_name)
script_name_doc = """The URI "mount point" for this app. A mount point is that portion of
the URI which is constant for all URIs that are serviced by this
application; it does not include scheme, host, or proxy ("virtual host")
portions of the URI.
For example, if script_name is "/my/cool/app", then the URL
"http://www.example.com/my/cool/app/page1" might be handled by a
"page1" method on the root object.
The value of script_name MUST NOT end in a slash. If the script_name
refers to the root of the URI, it MUST be an empty string (not "/").
If script_name is explicitly set to None, then the script_name will be
provided for each call from request.wsgi_environ['SCRIPT_NAME'].
"""
def _get_script_name(self):
if self._script_name is None:
# None signals that the script name should be pulled from WSGI environ.
return cherrypy.serving.request.wsgi_environ['SCRIPT_NAME'].rstrip("/")
return self._script_name
def _set_script_name(self, value):
if value:
value = value.rstrip("/")
self._script_name = value
script_name = property(fget=_get_script_name, fset=_set_script_name,
doc=script_name_doc)
def merge(self, config):
"""Merge the given config into self.config."""
_cpconfig.merge(self.config, config)
# Handle namespaces specified in config.
self.namespaces(self.config.get("/", {}))
def find_config(self, path, key, default=None):
"""Return the most-specific value for key along path, or default."""
trail = path or "/"
while trail:
nodeconf = self.config.get(trail, {})
if key in nodeconf:
return nodeconf[key]
lastslash = trail.rfind("/")
if lastslash == -1:
break
elif lastslash == 0 and trail != "/":
trail = "/"
else:
trail = trail[:lastslash]
return default
def get_serving(self, local, remote, scheme, sproto):
"""Create and return a Request and Response object."""
req = self.request_class(local, remote, scheme, sproto)
req.app = self
for name, toolbox in self.toolboxes.items():
req.namespaces[name] = toolbox
resp = self.response_class()
cherrypy.serving.load(req, resp)
cherrypy.engine.publish('acquire_thread')
cherrypy.engine.publish('before_request')
return req, resp
def release_serving(self):
"""Release the current serving (request and response)."""
req = cherrypy.serving.request
cherrypy.engine.publish('after_request')
try:
req.close()
except:
cherrypy.log(traceback=True, severity=40)
cherrypy.serving.clear()
def __call__(self, environ, start_response):
return self.wsgiapp(environ, start_response)
class Tree(object):
"""A registry of CherryPy applications, mounted at diverse points.
An instance of this class may also be used as a WSGI callable
(WSGI application object), in which case it dispatches to all
mounted apps.
"""
apps = {}
"""
A dict of the form {script name: application}, where "script name"
is a string declaring the URI mount point (no trailing slash), and
"application" is an instance of cherrypy.Application (or an arbitrary
WSGI callable if you happen to be using a WSGI server)."""
def __init__(self):
self.apps = {}
def mount(self, root, script_name="", config=None):
"""Mount a new app from a root object, script_name, and config.
root
An instance of a "controller class" (a collection of page
handler methods) which represents the root of the application.
This may also be an Application instance, or None if using
a dispatcher other than the default.
script_name
A string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the
URL at which to mount the given root. For example, if root.index()
will handle requests to "http://www.example.com:8080/dept/app1/",
then the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the
root of the URI, it MUST be an empty string (not "/").
config
A file or dict containing application config.
"""
if script_name is None:
raise TypeError(
"The 'script_name' argument may not be None. Application "
"objects may, however, possess a script_name of None (in "
"order to inpect the WSGI environ for SCRIPT_NAME upon each "
"request). You cannot mount such Applications on this Tree; "
"you must pass them to a WSGI server interface directly.")
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip("/")
if isinstance(root, Application):
app = root
if script_name != "" and script_name != app.script_name:
raise ValueError("Cannot specify a different script name and "
"pass an Application instance to cherrypy.mount")
script_name = app.script_name
else:
app = Application(root, script_name)
# If mounted at "", add favicon.ico
if (script_name == "" and root is not None
and not hasattr(root, "favicon_ico")):
favicon = os.path.join(os.getcwd(), os.path.dirname(__file__),
"favicon.ico")
root.favicon_ico = tools.staticfile.handler(favicon)
if config:
app.merge(config)
self.apps[script_name] = app
return app
def graft(self, wsgi_callable, script_name=""):
"""Mount a wsgi callable at the given script_name."""
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip("/")
self.apps[script_name] = wsgi_callable
def script_name(self, path=None):
"""The script_name of the app at the given path, or None.
If path is None, cherrypy.request is used.
"""
if path is None:
try:
request = cherrypy.serving.request
path = httputil.urljoin(request.script_name,
request.path_info)
except AttributeError:
return None
while True:
if path in self.apps:
return path
if path == "":
return None
# Move one node up the tree and try again.
path = path[:path.rfind("/")]
def __call__(self, environ, start_response):
# If you're calling this, then you're probably setting SCRIPT_NAME
# to '' (some WSGI servers always set SCRIPT_NAME to '').
# Try to look up the app using the full path.
env1x = environ
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
env1x = _cpwsgi.downgrade_wsgi_ux_to_1x(environ)
path = httputil.urljoin(env1x.get('SCRIPT_NAME', ''),
env1x.get('PATH_INFO', ''))
sn = self.script_name(path or "/")
if sn is None:
start_response('404 Not Found', [])
return []
app = self.apps[sn]
# Correct the SCRIPT_NAME and PATH_INFO environ entries.
environ = environ.copy()
if not py3k:
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
# Python 2/WSGI u.0: all strings MUST be of type unicode
enc = environ[ntou('wsgi.url_encoding')]
environ[ntou('SCRIPT_NAME')] = sn.decode(enc)
environ[ntou('PATH_INFO')] = path[len(sn.rstrip("/")):].decode(enc)
else:
# Python 2/WSGI 1.x: all strings MUST be of type str
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip("/")):]
else:
if environ.get(ntou('wsgi.version')) == (ntou('u'), 0):
# Python 3/WSGI u.0: all strings MUST be full unicode
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip("/")):]
else:
# Python 3/WSGI 1.x: all strings MUST be ISO-8859-1 str
environ['SCRIPT_NAME'] = sn.encode('utf-8').decode('ISO-8859-1')
environ['PATH_INFO'] = path[len(sn.rstrip("/")):].encode('utf-8').decode('ISO-8859-1')
return app(environ, start_response)
|
egeriicw/Django_Tutorials | refs/heads/master | A_Complete_Beginners_Guide_to_Django/myproject/myproject/urls.py | 1 | """myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from boards import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^admin/', admin.site.urls),
]
|
JoeMighty/shouldly | refs/heads/master | docs/conf.py | 5 | # -*- coding: utf-8 -*-
#
# Shouldly documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 01 06:42:41 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Shouldly'
copyright = u'2015, Dave Newman, Xerxes Battiwalla, Anthony Egerton, Peter van der Woude, Jake Ginnivan'
author = u'Dave Newman, Xerxes Battiwalla, Anthony Egerton, Peter van der Woude, Jake Ginnivan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.6.0'
# The full version, including alpha/beta/rc tags.
release = '2.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Shouldlydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Shouldly.tex', u'Shouldly Documentation',
u'Dave Newman, Xerxes Battiwalla, Anthony Egerton, Peter van der Woude, Jake Ginnivan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'shouldly', u'Shouldly Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Shouldly', u'Shouldly Documentation',
author, 'Shouldly', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
Suninus/NewsBlur | refs/heads/master | vendor/oauth2client/appengine.py | 20 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import base64
import httplib2
import logging
import pickle
import time
import clientsecrets
from google.appengine.api import app_identity
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
from oauth2client import util
from oauth2client.anyjson import simplejson
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import AssertionCredentials
from oauth2client.client import Credentials
from oauth2client.client import Flow
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import Storage
logger = logging.getLogger(__name__)
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
pass
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for
the purpose of accessing data stored under an account assigned to the App
Engine application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or list of strings, scope(s) of the credentials being
requested.
"""
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
super(AppAssertionCredentials, self).__init__(
'ignored' # assertion_type is ignore in this subclass.
)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
(token, _) = app_identity.get_access_token(self.scope)
except app_identity.Error, e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retreival of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logger.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logger.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logger.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
class StorageByKeyName(Storage):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
@util.positional(4)
def __init__(self, model, key_name, property_name, cache=None):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
cache: memcache, a write-through cache to put in front of the datastore
"""
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
if self._cache:
json = self._cache.get(self._key_name)
if json:
return Credentials.new_from_json(json)
credential = None
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
credential = getattr(entity, self._property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
if self._cache:
self._cache.set(self._key_name, credential.to_json())
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
entity = self._model.get_by_key_name(self._key_name)
if entity is not None:
entity.delete()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
@util.positional(4)
def __init__(self, client_id, client_secret, scope,
auth_uri='https://accounts.google.com/o/oauth2/auth',
token_uri='https://accounts.google.com/o/oauth2/token',
user_agent=None,
message=None,
callback_path='/oauth2callback',
**kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or list of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
callback_path: string, The absolute path to use as the callback URI. Note
that this must match up with the URI given when registering the
application in the APIs Console.
**kwargs: dict, Keyword arguments are be passed along as kwargs to the
OAuth2WebServerFlow constructor.
"""
self.flow = None
self.credentials = None
self._client_id = client_id
self._client_secret = client_secret
self._scope = scope
self._auth_uri = auth_uri
self._token_uri = token_uri
self._user_agent = user_agent
self._kwargs = kwargs
self._message = message
self._in_error = False
self._callback_path = callback_path
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(self._message)
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = request_handler.request.url
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
return check_oauth
def _create_flow(self, request_handler):
"""Create the Flow object.
The Flow is calculated lazily since we don't know where this app is
running until it receives a request, at which point redirect_uri can be
calculated and then the Flow object can be constructed.
Args:
request_handler: webapp.RequestHandler, the request handler.
"""
if self.flow is None:
redirect_uri = request_handler.request.relative_url(
self._callback_path) # Usually /oauth2callback
self.flow = OAuth2WebServerFlow(self._client_id, self._client_secret,
self._scope, redirect_uri=redirect_uri,
user_agent=self._user_agent,
auth_uri=self._auth_uri,
token_uri=self._token_uri, **self._kwargs)
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
self.flow.params['state'] = request_handler.request.url
self.credentials = StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').get()
method(request_handler, *args, **kwargs)
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
url = self.flow.step1_get_authorize_url()
return str(url)
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
@property
def callback_path(self):
"""The absolute path where the callback will occur.
Note this is the absolute path, not the absolute URI, that will be
calculated by the decorator at runtime. See callback_handler() for how this
should be used.
Returns:
The callback path as a string.
"""
return self._callback_path
def callback_handler(self):
"""RequestHandler for the OAuth 2.0 redirect callback.
Usage:
app = webapp.WSGIApplication([
('/index', MyIndexHandler),
...,
(decorator.callback_path, decorator.callback_handler())
])
Returns:
A webapp.RequestHandler that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
decorator = self
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % errormsg)
else:
user = users.get_current_user()
decorator._create_flow(self)
credentials = decorator.flow.step2_exchange(self.request.params)
StorageByKeyName(
CredentialsModel, user.user_id(), 'credentials').put(credentials)
self.redirect(str(self.request.get('state')))
return OAuth2Handler
def callback_application(self):
"""WSGI application for handling the OAuth 2.0 redirect callback.
If you need finer grained control use `callback_handler` which returns just
the webapp.RequestHandler.
Returns:
A webapp.WSGIApplication that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
return webapp.WSGIApplication([
(self.callback_path, self.callback_handler())
])
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
Example:
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
@util.positional(3)
def __init__(self, filename, scope, message=None, cache=None):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
"""
try:
client_type, client_info = clientsecrets.loadfile(filename, cache=cache)
if client_type not in [clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError('OAuth2Decorator doesn\'t support this OAuth 2.0 flow.')
super(OAuth2DecoratorFromClientSecrets,
self).__init__(
client_info['client_id'],
client_info['client_secret'],
scope,
auth_uri=client_info['auth_uri'],
token_uri=client_info['token_uri'],
message=message)
except clientsecrets.InvalidClientSecretsError:
self._in_error = True
if message is not None:
self._message = message
else:
self._message = "Please configure your application for OAuth 2.0"
@util.positional(2)
def oauth2decorator_from_clientsecrets(filename, scope,
message=None, cache=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope,
message=message, cache=cache)
|
PeteW/luigi | refs/heads/master | luigi/contrib/sqla.py | 5 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Gouthaman Balaraman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""
Support for SQLAlchmey. Provides SQLAlchemyTarget for storing in databases
supported by SQLAlchemy. The user would be responsible for installing the
required database driver to connect using SQLAlchemy.
Minimal example of a job to copy data to database using SQLAlchemy is as shown
below:
.. code-block:: python
from sqlalchemy import String
import luigi
from luigi.contrib import sqla
class SQLATask(sqla.CopyToTable):
# columns defines the table schema, with each element corresponding
# to a column in the format (args, kwargs) which will be sent to
# the sqlalchemy.Column(*args, **kwargs)
columns = [
(["item", String(64)], {"primary_key": True}),
(["property", String(64)], {})
]
connection_string = "sqlite://" # in memory SQLite database
table = "item_property" # name of the table to store data
def rows(self):
for row in [("item1", "property1"), ("item2", "property2")]:
yield row
if __name__ == '__main__':
task = SQLATask()
luigi.build([task], local_scheduler=True)
If the target table where the data needs to be copied already exists, then
the column schema definition can be skipped and instead the reflect flag
can be set as True. Here is a modified version of the above example:
.. code-block:: python
from sqlalchemy import String
import luigi
from luigi.contrib import sqla
class SQLATask(sqla.CopyToTable):
# If database table is already created, then the schema can be loaded
# by setting the reflect flag to True
reflect = True
connection_string = "sqlite://" # in memory SQLite database
table = "item_property" # name of the table to store data
def rows(self):
for row in [("item1", "property1"), ("item2", "property2")]:
yield row
if __name__ == '__main__':
task = SQLATask()
luigi.build([task], local_scheduler=True)
In the above examples, the data that needs to be copied was directly provided by
overriding the rows method. Alternately, if the data comes from another task, the
modified example would look as shown below:
.. code-block:: python
from sqlalchemy import String
import luigi
from luigi.contrib import sqla
from luigi.mock import MockTarget
class BaseTask(luigi.Task):
def output(self):
return MockTarget("BaseTask")
def run(self):
out = self.output().open("w")
TASK_LIST = ["item%d\\tproperty%d\\n" % (i, i) for i in range(10)]
for task in TASK_LIST:
out.write(task)
out.close()
class SQLATask(sqla.CopyToTable):
# columns defines the table schema, with each element corresponding
# to a column in the format (args, kwargs) which will be sent to
# the sqlalchemy.Column(*args, **kwargs)
columns = [
(["item", String(64)], {"primary_key": True}),
(["property", String(64)], {})
]
connection_string = "sqlite://" # in memory SQLite database
table = "item_property" # name of the table to store data
def requires(self):
return BaseTask()
if __name__ == '__main__':
task1, task2 = SQLATask(), BaseTask()
luigi.build([task1, task2], local_scheduler=True)
In the above example, the output from `BaseTask` is copied into the
database. Here we did not have to implement the `rows` method because
by default `rows` implementation assumes every line is a row with
column values separated by a tab. One can define `column_separator`
option for the task if the values are say comma separated instead of
tab separated.
You can pass in database specific connection arguments by setting the connect_args
dictionary. The options will be passed directly to the DBAPI's connect method as
keyword arguments.
The other option to `sqla.CopyToTable` that can be of help with performance aspect is the
`chunk_size`. The default is 5000. This is the number of rows that will be inserted in
a transaction at a time. Depending on the size of the inserts, this value can be tuned
for performance.
See here for a `tutorial on building task pipelines using luigi
<http://gouthamanbalaraman.com/blog/building-luigi-task-pipeline.html>`_ and
using `SQLAlchemy in workflow pipelines <http://gouthamanbalaraman.com/blog/sqlalchemy-luigi-workflow-pipeline.html>`_.
Author: Gouthaman Balaraman
Date: 01/02/2015
"""
import abc
import collections
import datetime
import itertools
import logging
import luigi
import os
import sqlalchemy
class SQLAlchemyTarget(luigi.Target):
"""
Database target using SQLAlchemy.
This will rarely have to be directly instantiated by the user.
Typical usage would be to override `luigi.contrib.sqla.CopyToTable` class
to create a task to write to the database.
"""
marker_table = None
_engine_dict = {} # dict of sqlalchemy engine instances
Connection = collections.namedtuple("Connection", "engine pid")
def __init__(self, connection_string, target_table, update_id, echo=False, connect_args=None):
"""
Constructor for the SQLAlchemyTarget.
:param connection_string: SQLAlchemy connection string
:type connection_string: str
:param target_table: The table name for the data
:type target_table: str
:param update_id: An identifier for this data set
:type update_id: str
:param echo: Flag to setup SQLAlchemy logging
:type echo: bool
:param connect_args: A dictionary of connection arguments
:type connect_args: dict
:return:
"""
if connect_args is None:
connect_args = {}
self.target_table = target_table
self.update_id = update_id
self.connection_string = connection_string
self.echo = echo
self.connect_args = connect_args
self.marker_table_bound = None
@property
def engine(self):
"""
Return an engine instance, creating it if it doesn't exist.
Recreate the engine connection if it wasn't originally created
by the current process.
"""
pid = os.getpid()
conn = SQLAlchemyTarget._engine_dict.get(self.connection_string)
if not conn or conn.pid != pid:
# create and reset connection
engine = sqlalchemy.create_engine(
self.connection_string,
connect_args=self.connect_args,
echo=self.echo
)
SQLAlchemyTarget._engine_dict[self.connection_string] = self.Connection(engine, pid)
return SQLAlchemyTarget._engine_dict[self.connection_string].engine
def touch(self):
"""
Mark this update as complete.
"""
if self.marker_table_bound is None:
self.create_marker_table()
table = self.marker_table_bound
id_exists = self.exists()
with self.engine.begin() as conn:
if not id_exists:
ins = table.insert().values(update_id=self.update_id, target_table=self.target_table,
inserted=datetime.datetime.now())
else:
ins = table.update().where(sqlalchemy.and_(table.c.update_id == self.update_id,
table.c.target_table == self.target_table)).\
values(update_id=self.update_id, target_table=self.target_table,
inserted=datetime.datetime.now())
conn.execute(ins)
assert self.exists()
def exists(self):
row = None
if self.marker_table_bound is None:
self.create_marker_table()
with self.engine.begin() as conn:
table = self.marker_table_bound
s = sqlalchemy.select([table]).where(sqlalchemy.and_(table.c.update_id == self.update_id,
table.c.target_table == self.target_table)).limit(1)
row = conn.execute(s).fetchone()
return row is not None
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
if self.marker_table is None:
self.marker_table = luigi.configuration.get_config().get('sqlalchemy', 'marker-table', 'table_updates')
engine = self.engine
with engine.begin() as con:
metadata = sqlalchemy.MetaData()
if not con.dialect.has_table(con, self.marker_table):
self.marker_table_bound = sqlalchemy.Table(
self.marker_table, metadata,
sqlalchemy.Column("update_id", sqlalchemy.String(128), primary_key=True),
sqlalchemy.Column("target_table", sqlalchemy.String(128)),
sqlalchemy.Column("inserted", sqlalchemy.DateTime, default=datetime.datetime.now()))
metadata.create_all(engine)
else:
metadata.reflect(only=[self.marker_table], bind=engine)
self.marker_table_bound = metadata.tables[self.marker_table]
def open(self, mode):
raise NotImplementedError("Cannot open() SQLAlchemyTarget")
class CopyToTable(luigi.Task):
"""
An abstract task for inserting a data set into SQLAlchemy RDBMS
Usage:
* subclass and override the required `connection_string`, `table` and `columns` attributes.
* optionally override the `schema` attribute to use a different schema for
the target table.
"""
_logger = logging.getLogger('luigi-interface')
echo = False
connect_args = {}
@abc.abstractproperty
def connection_string(self):
return None
@abc.abstractproperty
def table(self):
return None
# specify the columns that define the schema. The format for the columns is a list
# of tuples. For example :
# columns = [
# (["id", sqlalchemy.Integer], dict(primary_key=True)),
# (["name", sqlalchemy.String(64)], {}),
# (["value", sqlalchemy.String(64)], {})
# ]
# The tuple (args_list, kwargs_dict) here is the args and kwargs
# that need to be passed to sqlalchemy.Column(*args, **kwargs).
# If the tables have already been setup by another process, then you can
# completely ignore the columns. Instead set the reflect value to True below
columns = []
# Specify the database schema of the target table, if supported by the
# RDBMS. Note that this doesn't change the schema of the marker table.
# The schema MUST already exist in the database, or this will task fail.
schema = ''
# options
column_separator = "\t" # how columns are separated in the file copied into postgres
chunk_size = 5000 # default chunk size for insert
reflect = False # Set this to true only if the table has already been created by alternate means
def create_table(self, engine):
"""
Override to provide code for creating the target table.
By default it will be created using types specified in columns.
If the table exists, then it binds to the existing table.
If overridden, use the provided connection object for setting up the table in order to
create the table and insert data using the same transaction.
:param engine: The sqlalchemy engine instance
:type engine: object
"""
def construct_sqla_columns(columns):
retval = [sqlalchemy.Column(*c[0], **c[1]) for c in columns]
return retval
needs_setup = (len(self.columns) == 0) or (False in [len(c) == 2 for c in self.columns]) if not self.reflect else False
if needs_setup:
# only names of columns specified, no types
raise NotImplementedError("create_table() not implemented for %r and columns types not specified" % self.table)
else:
# if columns is specified as (name, type) tuples
with engine.begin() as con:
if self.schema:
metadata = sqlalchemy.MetaData(schema=self.schema)
else:
metadata = sqlalchemy.MetaData()
try:
if not con.dialect.has_table(con, self.table, self.schema or None):
sqla_columns = construct_sqla_columns(self.columns)
self.table_bound = sqlalchemy.Table(self.table, metadata, *sqla_columns)
metadata.create_all(engine)
else:
full_table = '.'.join([self.schema, self.table]) if self.schema else self.table
metadata.reflect(only=[self.table], bind=engine)
self.table_bound = metadata.tables[full_table]
except Exception as e:
self._logger.exception(self.table + str(e))
def update_id(self):
"""
This update id will be a unique identifier for this insert on this table.
"""
return self.task_id
def output(self):
return SQLAlchemyTarget(
connection_string=self.connection_string,
target_table=self.table,
update_id=self.update_id(),
connect_args=self.connect_args,
echo=self.echo)
def rows(self):
"""
Return/yield tuples or lists corresponding to each row to be inserted.
This method can be overridden for custom file types or formats.
"""
with self.input().open('r') as fobj:
for line in fobj:
yield line.strip("\n").split(self.column_separator)
def run(self):
self._logger.info("Running task copy to table for update id %s for table %s" % (self.update_id(), self.table))
output = self.output()
engine = output.engine
self.create_table(engine)
with engine.begin() as conn:
rows = iter(self.rows())
ins_rows = [dict(zip(("_" + c.key for c in self.table_bound.c), row))
for row in itertools.islice(rows, self.chunk_size)]
while ins_rows:
self.copy(conn, ins_rows, self.table_bound)
ins_rows = [dict(zip(("_" + c.key for c in self.table_bound.c), row))
for row in itertools.islice(rows, self.chunk_size)]
self._logger.info("Finished inserting %d rows into SQLAlchemy target" % len(ins_rows))
output.touch()
self._logger.info("Finished inserting rows into SQLAlchemy target")
def copy(self, conn, ins_rows, table_bound):
"""
This method does the actual insertion of the rows of data given by ins_rows into the
database. A task that needs row updates instead of insertions should overload this method.
:param conn: The sqlalchemy connection object
:param ins_rows: The dictionary of rows with the keys in the format _<column_name>. For example
if you have a table with a column name "property", then the key in the dictionary
would be "_property". This format is consistent with the bindparam usage in sqlalchemy.
:param table_bound: The object referring to the table
:return:
"""
bound_cols = dict((c, sqlalchemy.bindparam("_" + c.key)) for c in table_bound.columns)
ins = table_bound.insert().values(bound_cols)
conn.execute(ins, ins_rows)
|
zooba/PTVS | refs/heads/master | Python/Tests/GlassTests/PythonTests/Python/StepOverBreakpoint/py_mod.py | 4 | def global_func():
def inner_func():
pass
inner_func()
print('ok')
global_func()
|
okolisny/integration_tests | refs/heads/master | scripts/latest_template_tester_report.py | 7 | #!/usr/bin/env python2
import argparse
import datetime
import re
import sys
from contextlib import closing
from jinja2 import Environment, FileSystemLoader
from urllib2 import urlopen, HTTPError
from cfme.utils import trackerbot
from cfme.utils.conf import cfme_data
from cfme.utils.path import template_path, log_path
from cfme.utils.providers import list_provider_keys
from cfme.utils.ssh import SSHClient
from cfme.utils.wait import wait_for
template_env = Environment(
loader=FileSystemLoader(template_path.strpath)
)
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument("--tracketbot-url", dest="trackerbot_url",
help="tracker bot url to make api call",
default='http://10.16.4.32/trackerbot/api')
parser.add_argument("--stream", dest="stream",
help="stream to generate the template test result")
parser.add_argument("--template", dest="appliance_template",
help="appliance/latest template name")
parser.add_argument("--provider", dest="provider",
help="provider under test")
parser.add_argument("--output", dest="output", help="target file name",
default=log_path.join('template_tester_results.log').strpath)
args = parser.parse_args()
return args
# TODO is this completely unused?
def make_ssh_client(rhevip, sshname, sshpass):
connect_kwargs = {
'username': sshname,
'password': sshpass,
'hostname': rhevip
}
return SSHClient(**connect_kwargs)
def get_latest_tested_template_on_stream(api, template_stream_name, template_name):
stream = {}
try:
wait_for_images_on_web_repo(template_stream_name, template_name)
wait_for_templates_on_providers(api, template_stream_name, template_name)
except Exception as e:
print(e)
print("less than three provider images are uploaded to latest directory")
for temp in api.template.get(
limit=1, tested=True, group__name=template_stream_name).get('objects', []):
stream['template_name'] = temp['name']
passed_on_providers = []
failed_on_providers = []
usable_providers = temp['usable_providers']
all_providers = temp['providers']
if len(usable_providers) == len(all_providers):
passed_on_providers = all_providers
elif not usable_providers:
failed_on_providers = all_providers
else:
passed_on_providers = usable_providers
failed_on_providers = list(set(all_providers) - set(usable_providers))
stream['passed_on_providers'] = passed_on_providers
stream['failed_on_providers'] = failed_on_providers
stream['group_name'] = temp['group']['name']
stream['datestamp'] = temp['datestamp']
return stream
def images_uploaded(stream):
"""Checks for the uploaded build images at the latest directory.
the stream name in the weburl for latest directory is formatted
differently on trackerbot. This method formats the 'stream' before
browsing the web url.
Args:
stream: stream name in trackerbot stream name format
e.g. downstream-55z, downstream-nightly, upstream etc..
returns: dictionary with key/value 'provider type and image names uploaded'.
"""
dir_url = cfme_data['basic_info']['cfme_images_url'][stream]
name_dict = {}
try:
with closing(urlopen(dir_url)) as urlpath:
string_from_url = urlpath.read()
except HTTPError as e:
print(str(e))
return None
rhevm_pattern = re.compile(r'<a href="?\'?([^"\']*(?:rhevm|ovirt)[^"\'>]*)')
rhevm_image_name = rhevm_pattern.findall(string_from_url)
rhos_pattern = re.compile(r'<a href="?\'?([^"\']*(?:rhos|openstack|rhelosp)[^"\'>]*)')
rhos_image_name = rhos_pattern.findall(string_from_url)
scvmm_pattern = re.compile(r'<a href="?\'?([^"\']*hyperv[^"\'>]*)')
scvmm_image_name = scvmm_pattern.findall(string_from_url)
vsphere_pattern = re.compile(r'<a href="?\'?([^"\']*vsphere[^"\'>]*)')
vsphere_image_name = vsphere_pattern.findall(string_from_url)
if len(rhevm_image_name) is not 0:
name_dict['template_rhevm'] = rhevm_image_name[0]
if len(rhos_image_name) is not 0:
name_dict['template_rhos'] = rhos_image_name[0]
if len(scvmm_image_name) is not 0:
name_dict['template_scvmm'] = scvmm_image_name[0]
if len(vsphere_image_name) is not 0:
name_dict['template_vsphere'] = vsphere_image_name[0]
return name_dict
def all_images_uploaded(stream, template=None):
if get_untested_templates(api, stream, template):
print('report will not be generated, proceed with the next untested provider')
sys.exit()
if 'template_rhevm' not in images_uploaded(stream):
return False
if 'template_rhos' not in images_uploaded(stream):
return False
if 'template_vsphere' not in images_uploaded(stream):
return False
if 'template_scvmm' not in images_uploaded(stream):
return False
return True
def wait_for_images_on_web_repo(stream, template):
try:
print('wait for images upload to latest directory')
wait_for(all_images_uploaded, [stream, template],
fail_condition=False, delay=5, timeout='30m')
return True
except Exception as e:
print(e)
return False
def templates_uploaded_on_providers(api, stream, template):
if get_untested_templates(api, stream, template):
print('report will not be generated, proceed with the next untested provider')
sys.exit()
for temp in api.template.get(
limit=1, tested=False, group__name=stream).get('objects', []):
if 'template_rhevm' in images_uploaded(stream):
if not provider_in_the_list(list_provider_keys('rhevm'), temp['providers']):
return False
if 'template_rhos' in images_uploaded(stream):
if not provider_in_the_list(list_provider_keys('openstack'), temp['providers']):
return False
if 'template_vsphere' in images_uploaded(stream):
if not provider_in_the_list(list_provider_keys('virtualcenter'), temp['providers']):
return False
if 'template_scvmm' in images_uploaded(stream):
if not provider_in_the_list(list_provider_keys('scvmm'), temp['providers']):
return False
return True
def wait_for_templates_on_providers(api, stream, template):
try:
print('wait for templates upload to providers')
wait_for(templates_uploaded_on_providers,
[api, stream, template], fail_condition=False, delay=5, timeout='40m')
except Exception as e:
print(e)
return False
def get_untested_templates(api, stream_group, appliance_template=None):
return api.untestedtemplate.get(
template__group__name=stream_group, template=appliance_template).get('objects', [])
def provider_in_the_list(provider_list, list_criteria):
return [provider for provider in provider_list if provider in list_criteria]
def generate_html_report(api, stream, filename, appliance_template):
status = 'PASSED'
number_of_images_before = len(images_uploaded(stream))
if get_untested_templates(api, stream, appliance_template):
print('report will not be generated, proceed with the next untested provider')
sys.exit()
stream_data = get_latest_tested_template_on_stream(api, stream, appliance_template)
if len(images_uploaded(stream)) > number_of_images_before:
print("new images are uploaded on latest directory, wait for upload on providers")
wait_for_templates_on_providers(api, stream, appliance_template)
if appliance_template and appliance_template != stream_data['template_name']:
print("the report will be generated only for the latest templates")
sys.exit()
if stream_data and not get_untested_templates(api, stream_data['group_name'],
appliance_template):
print("Found tested template for {}".format(stream))
print("Gathering tested template data for {}".format(stream))
print("Updating the template log")
stream_html = [stream_data['template_name'], stream_data['passed_on_providers'],
stream_data['failed_on_providers'], stream_data['group_name'],
stream_data['datestamp']]
if 'html' in filename:
data = template_env.get_template('template_tester_report.html').render(
upstream=stream_html)
with open(filename, 'w') as report:
report.write(data)
else:
with open(filename, 'a+') as report:
if 'template_rhos' not in images_uploaded(stream):
print('\n\nMISSING: Image for OpenStack in latest directory')
report.write('\n\nMISSING: Image for OpenStack in latest directory')
elif provider_in_the_list(list_provider_keys('openstack'),
stream_data['passed_on_providers']):
report.write('\n\nPASSED: {}'.format(images_uploaded(stream)['template_rhos']))
map(lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list(
list_provider_keys('openstack'), stream_data['passed_on_providers']))
elif provider_in_the_list(list_provider_keys('openstack'),
stream_data['failed_on_providers']):
report.write('\n\nFAILED: {}'.format(images_uploaded(stream)['template_rhos']))
map(lambda (x): report.write('\n{}: Failed'.format(x)),
provider_in_the_list(list_provider_keys('openstack'),
stream_data['failed_on_providers']))
else:
print('\n\nMISSING: OpenStack template is not available on any '
'rhos providers yet')
report.write('\n\nMISSING: OpenStack template is not available on any '
'rhos providers yet')
if 'template_rhevm' not in images_uploaded(stream):
print('\n\nMISSING: Image for RHEVM in latest directory')
report.write('\n\nMISSING: Image for RHEVM in latest directory')
elif provider_in_the_list(list_provider_keys('rhevm'),
stream_data['passed_on_providers']):
report.write('\n\nPASSED: {}'.format(
images_uploaded(stream)['template_rhevm']))
map(lambda(x): report.write('\n{}: Passed'.format(x)), provider_in_the_list(
list_provider_keys('rhevm'), stream_data['passed_on_providers']))
elif provider_in_the_list(list_provider_keys('rhevm'),
stream_data['failed_on_providers']):
report.write('\n\nFAILED: {}'.format(
images_uploaded(stream)['template_rhevm']))
map(lambda(x): report.write('\n{}: Failed'.format(x)),
provider_in_the_list(list_provider_keys('rhevm'),
stream_data['failed_on_providers']))
else:
print('\n\nMISSING: RHEVM template is not available on any '
'rhevm providers yet')
report.write('\n\nMISSING: RHEVM template is not available on any '
'rhevm providers yet')
if 'template_vsphere' not in images_uploaded(stream):
print('\n\nMISSING: Image for VIRTUALCENTER in latest directory')
report.write('\n\nMISSING: Image for VIRTUALCENTER in latest directory')
elif provider_in_the_list(list_provider_keys('virtualcenter'),
stream_data['passed_on_providers']):
report.write('\n\nPASSED: {}'.format(
images_uploaded(stream)['template_vsphere']))
map(lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list(
list_provider_keys('virtualcenter'), stream_data['passed_on_providers']))
elif provider_in_the_list(list_provider_keys('virtualcenter'),
stream_data['failed_on_providers']):
report.write('\n\nFAILED: {}'.format(
images_uploaded(stream)['template_vsphere']))
map(lambda (x): report.write('\n{}: Failed'.format(x)),
provider_in_the_list(list_provider_keys('virtualcenter'),
stream_data['failed_on_providers']))
else:
print('\n\nMISSING: VIRTUALCENTER template is not available on any '
'vmware providers yet')
report.write('\n\nMISSING: VIRTUALCENTER template is not available on any '
'vmware providers yet')
if 'template_scvmm' not in images_uploaded(stream):
print('\n\nMISSING: Image for SCVMM in latest directory')
report.write('\n\nMISSING: Image for SCVMM in latest directory')
elif provider_in_the_list(list_provider_keys('scvmm'),
stream_data['passed_on_providers']):
report.write('\n\nPASSED: {}'.format(
images_uploaded(stream)['template_scvmm']))
map(lambda (x): report.write('\n{}: Passed'.format(x)), provider_in_the_list(
list_provider_keys('scvmm'), stream_data['passed_on_providers']))
elif provider_in_the_list(list_provider_keys('scvmm'),
stream_data['failed_on_providers']):
report.write('\n\nFAILED: {}'.format(
images_uploaded(stream)['template_scvmm']))
map(lambda (x): report.write('\n{}: Failed'.format(x)),
provider_in_the_list(list_provider_keys('scvmm'),
stream_data['failed_on_providers']))
else:
print('\n\nMISSING: SCVMM template is not available on any '
'scvmm providers yet')
report.write('\n\nMISSING: SCVMM template is not available on any '
'scvmm providers yet')
report.seek(0, 0)
lines = report.readlines()
template_missing = filter(lambda (x): "MISSING" in x, lines)
template_passed = filter(lambda (x): "PASSED" in x, lines)
template_failed = filter(lambda (x): "FAILED" in x, lines)
if template_failed:
status = "FAILED"
if template_missing and not (template_passed or template_failed):
report.close()
sys.exit("Template is MISSING....Please verify uploads....")
print("template_tester_results report generated:{}".format(status))
else:
print("No Templates tested on: {}".format(datetime.datetime.now()))
if __name__ == '__main__':
args = parse_cmd_line()
api = trackerbot.api(args.trackerbot_url)
if not args.stream or not args.appliance_template:
sys.exit("stream and appliance_template "
"cannot be None, specify the stream as --stream <stream-name>"
"and template as --template <template-name>")
generate_html_report(api, args.stream, args.output, args.appliance_template)
|
WQuanfeng/wagtail | refs/heads/master | wagtail/wagtailimages/formats.py | 31 | from django.utils.html import escape
from wagtail.utils.apps import get_app_submodules
from wagtail.wagtailimages.models import SourceImageIOError
class Format(object):
def __init__(self, name, label, classnames, filter_spec):
self.name = name
self.label = label
self.classnames = classnames
self.filter_spec = filter_spec
def editor_attributes(self, image, alt_text):
"""
Return string of additional attributes to go on the HTML element
when outputting this image within a rich text editor field
"""
return 'data-embedtype="image" data-id="%d" data-format="%s" data-alt="%s" ' % (
image.id, self.name, alt_text
)
def image_to_editor_html(self, image, alt_text):
return self.image_to_html(
image, alt_text, self.editor_attributes(image, alt_text)
)
def image_to_html(self, image, alt_text, extra_attributes=''):
try:
rendition = image.get_rendition(self.filter_spec)
except SourceImageIOError:
# Image file is (probably) missing from /media/original_images - generate a dummy
# rendition so that we just output a broken image, rather than crashing out completely
# during rendering
Rendition = image.renditions.model # pick up any custom Image / Rendition classes that may be in use
rendition = Rendition(image=image, width=0, height=0)
rendition.file.name = 'not-found'
if self.classnames:
class_attr = 'class="%s" ' % escape(self.classnames)
else:
class_attr = ''
return '<img %s%ssrc="%s" width="%d" height="%d" alt="%s">' % (
extra_attributes, class_attr,
escape(rendition.url), rendition.width, rendition.height, alt_text
)
FORMATS = []
FORMATS_BY_NAME = {}
def register_image_format(format):
if format.name in FORMATS_BY_NAME:
raise KeyError("Image format '%s' is already registered" % format.name)
FORMATS_BY_NAME[format.name] = format
FORMATS.append(format)
def unregister_image_format(format_name):
global FORMATS
# handle being passed a format object rather than a format name string
try:
format_name = format_name.name
except AttributeError:
pass
try:
del FORMATS_BY_NAME[format_name]
FORMATS = [fmt for fmt in FORMATS if fmt.name != format_name]
except KeyError:
raise KeyError("Image format '%s' is not registered" % format_name)
def get_image_formats():
search_for_image_formats()
return FORMATS
def get_image_format(name):
search_for_image_formats()
return FORMATS_BY_NAME[name]
_searched_for_image_formats = False
def search_for_image_formats():
global _searched_for_image_formats
if not _searched_for_image_formats:
list(get_app_submodules('image_formats'))
_searched_for_image_formats = True
# Define default image formats
register_image_format(Format('fullwidth', 'Full width', 'richtext-image full-width', 'width-800'))
register_image_format(Format('left', 'Left-aligned', 'richtext-image left', 'width-500'))
register_image_format(Format('right', 'Right-aligned', 'richtext-image right', 'width-500'))
|
JeyZeta/Dangerous | refs/heads/master | Dangerous/Golismero/tools/sqlmap/waf/jiasule.py | 8 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTP_HEADER
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "Jiasule Web Application Firewall (Jiasule)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = re.search(r"jiasule-WAF", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
retval |= re.search(r"static\.jiasule\.com/static/js/http_error\.js", page, re.I) is not None
if retval:
break
return retval
|
hkawasaki/kawasaki-aio8-1 | refs/heads/gacco2/master | cms/djangoapps/contentstore/views/__init__.py | 46 | # pylint: disable=W0401, W0511
"All view functions for contentstore, broken out into submodules"
# Disable warnings about import from wildcard
# All files below declare exports with __all__
from .assets import *
from .checklist import *
from .component import *
from .course import *
from .error import *
from .helpers import *
from .item import *
from .import_export import *
from .preview import *
from .public import *
from .export_git import *
from .user import *
from .tabs import *
from .transcripts_ajax import *
try:
from .dev import *
except ImportError:
pass
|
marcoantoniooliveira/labweb | refs/heads/master | oscar/lib/python2.7/site-packages/sphinx/util/texescape.py | 11 | # -*- coding: utf-8 -*-
"""
sphinx.util.texescape
~~~~~~~~~~~~~~~~~~~~~
TeX escaping helper.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
tex_replacements = [
# map TeX special chars
(u'$', ur'\$'),
(u'%', ur'\%'),
(u'&', ur'\&'),
(u'#', ur'\#'),
(u'_', ur'\_'),
(u'{', ur'\{'),
(u'}', ur'\}'),
(u'[', ur'{[}'),
(u']', ur'{]}'),
(u'`', ur'{}`'),
(u'\\',ur'\textbackslash{}'),
(u'~', ur'\textasciitilde{}'),
(u'<', ur'\textless{}'),
(u'>', ur'\textgreater{}'),
(u'^', ur'\textasciicircum{}'),
# map special Unicode characters to TeX commands
(u'¶', ur'\P{}'),
(u'§', ur'\S{}'),
(u'€', ur'\texteuro{}'),
(u'∞', ur'\(\infty\)'),
(u'±', ur'\(\pm\)'),
(u'→', ur'\(\rightarrow\)'),
(u'‣', ur'\(\rightarrow\)'),
# used to separate -- in options
(u'', ur'{}'),
# map some special Unicode characters to similar ASCII ones
(u'─', ur'-'),
(u'⎽', ur'\_'),
(u'╲', ur'\textbackslash{}'),
(u'|', ur'\textbar{}'),
(u'│', ur'\textbar{}'),
(u'ℯ', ur'e'),
(u'ⅈ', ur'i'),
(u'₁', ur'1'),
(u'₂', ur'2'),
# map Greek alphabet
(u'α', ur'\(\alpha\)'),
(u'β', ur'\(\beta\)'),
(u'γ', ur'\(\gamma\)'),
(u'δ', ur'\(\delta\)'),
(u'ε', ur'\(\epsilon\)'),
(u'ζ', ur'\(\zeta\)'),
(u'η', ur'\(\eta\)'),
(u'θ', ur'\(\theta\)'),
(u'ι', ur'\(\iota\)'),
(u'κ', ur'\(\kappa\)'),
(u'λ', ur'\(\lambda\)'),
(u'μ', ur'\(\mu\)'),
(u'ν', ur'\(\nu\)'),
(u'ξ', ur'\(\xi\)'),
(u'ο', ur'o'),
(u'π', ur'\(\pi\)'),
(u'ρ', ur'\(\rho\)'),
(u'σ', ur'\(\sigma\)'),
(u'τ', ur'\(\tau\)'),
(u'υ', u'\\(\\upsilon\\)'),
(u'φ', ur'\(\phi\)'),
(u'χ', ur'\(\chi\)'),
(u'ψ', ur'\(\psi\)'),
(u'ω', ur'\(\omega\)'),
(u'Α', ur'A'),
(u'Β', ur'B'),
(u'Γ', ur'\(\Gamma\)'),
(u'Δ', ur'\(\Delta\)'),
(u'Ε', ur'E'),
(u'Ζ', ur'Z'),
(u'Η', ur'H'),
(u'Θ', ur'\(\Theta\)'),
(u'Ι', ur'I'),
(u'Κ', ur'K'),
(u'Λ', ur'\(\Lambda\)'),
(u'Μ', ur'M'),
(u'Ν', ur'N'),
(u'Ξ', ur'\(\Xi\)'),
(u'Ο', ur'O'),
(u'Π', ur'\(\Pi\)'),
(u'Ρ', ur'P'),
(u'Σ', ur'\(\Sigma\)'),
(u'Τ', ur'T'),
(u'Υ', u'\\(\\Upsilon\\)'),
(u'Φ', ur'\(\Phi\)'),
(u'Χ', ur'X'),
(u'Ψ', ur'\(\Psi\)'),
(u'Ω', ur'\(\Omega\)'),
(u'Ω', ur'\(\Omega\)'),
]
tex_escape_map = {}
tex_replace_map = {}
tex_hl_escape_map_new = {}
def init():
for a, b in tex_replacements:
tex_escape_map[ord(a)] = b
tex_replace_map[ord(a)] = u'_'
for a, b in tex_replacements:
if a in u'[]{}\\': continue
tex_hl_escape_map_new[ord(a)] = b
|
n1889/gitinspector | refs/heads/master | setup.py | 50 | # coding: utf-8
#
# Copyright © 2013 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from gitinspector.version import __version__
from glob import glob
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "gitinspector",
version = __version__,
author = "Ejwa Software",
author_email = "gitinspector@ejwa.se",
description = ("A statistical analysis tool for git repositories."),
license = "GNU GPL v3",
keywords = "analysis analyzer git python statistics stats vc vcs timeline",
url = "http://gitinspector.googlecode.com",
long_description = read("DESCRIPTION.txt"),
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Topic :: Software Development :: Version Control",
"Topic :: Utilities"
],
packages = find_packages(exclude = ['tests']),
package_data = {"": ["html/*", "translations/*"]},
data_files = [("share/doc/gitinspector", glob("*.txt"))],
entry_points = {"console_scripts": ["gitinspector = gitinspector.gitinspector:main"]},
zip_safe = False
)
|
jackjennings/Mechanic | refs/heads/master | Mechanic.roboFontExt/lib/site-packages/requests/packages/chardet/jisfreq.py | 3130 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
|
virus-warnning/HelloDjango | refs/heads/master | HelloDjango/settings.py | 1 | """
Django settings for HelloDjango project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!1+^7ps3g3($$hl4-$87dwz(&_1&ua_zzk-*=kg2#^%i87xx=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'HelloDjango.urls'
WSGI_APPLICATION = 'HelloDjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
araseyuta/Newsstand-analytics | refs/heads/master | gdata/tlslite/integration/AsyncStateMachine.py | 238 | """
A state machine for using TLS Lite with asynchronous I/O.
"""
class AsyncStateMachine:
"""
This is an abstract class that's used to integrate TLS Lite with
asyncore and Twisted.
This class signals wantsReadsEvent() and wantsWriteEvent(). When
the underlying socket has become readable or writeable, the event
should be passed to this class by calling inReadEvent() or
inWriteEvent(). This class will then try to read or write through
the socket, and will update its state appropriately.
This class will forward higher-level events to its subclass. For
example, when a complete TLS record has been received,
outReadEvent() will be called with the decrypted data.
"""
def __init__(self):
self._clear()
def _clear(self):
#These store the various asynchronous operations (i.e.
#generators). Only one of them, at most, is ever active at a
#time.
self.handshaker = None
self.closer = None
self.reader = None
self.writer = None
#This stores the result from the last call to the
#currently active operation. If 0 it indicates that the
#operation wants to read, if 1 it indicates that the
#operation wants to write. If None, there is no active
#operation.
self.result = None
def _checkAssert(self, maxActive=1):
#This checks that only one operation, at most, is
#active, and that self.result is set appropriately.
activeOps = 0
if self.handshaker:
activeOps += 1
if self.closer:
activeOps += 1
if self.reader:
activeOps += 1
if self.writer:
activeOps += 1
if self.result == None:
if activeOps != 0:
raise AssertionError()
elif self.result in (0,1):
if activeOps != 1:
raise AssertionError()
else:
raise AssertionError()
if activeOps > maxActive:
raise AssertionError()
def wantsReadEvent(self):
"""If the state machine wants to read.
If an operation is active, this returns whether or not the
operation wants to read from the socket. If an operation is
not active, this returns None.
@rtype: bool or None
@return: If the state machine wants to read.
"""
if self.result != None:
return self.result == 0
return None
def wantsWriteEvent(self):
"""If the state machine wants to write.
If an operation is active, this returns whether or not the
operation wants to write to the socket. If an operation is
not active, this returns None.
@rtype: bool or None
@return: If the state machine wants to write.
"""
if self.result != None:
return self.result == 1
return None
def outConnectEvent(self):
"""Called when a handshake operation completes.
May be overridden in subclass.
"""
pass
def outCloseEvent(self):
"""Called when a close operation completes.
May be overridden in subclass.
"""
pass
def outReadEvent(self, readBuffer):
"""Called when a read operation completes.
May be overridden in subclass."""
pass
def outWriteEvent(self):
"""Called when a write operation completes.
May be overridden in subclass."""
pass
def inReadEvent(self):
"""Tell the state machine it can read from the socket."""
try:
self._checkAssert()
if self.handshaker:
self._doHandshakeOp()
elif self.closer:
self._doCloseOp()
elif self.reader:
self._doReadOp()
elif self.writer:
self._doWriteOp()
else:
self.reader = self.tlsConnection.readAsync(16384)
self._doReadOp()
except:
self._clear()
raise
def inWriteEvent(self):
"""Tell the state machine it can write to the socket."""
try:
self._checkAssert()
if self.handshaker:
self._doHandshakeOp()
elif self.closer:
self._doCloseOp()
elif self.reader:
self._doReadOp()
elif self.writer:
self._doWriteOp()
else:
self.outWriteEvent()
except:
self._clear()
raise
def _doHandshakeOp(self):
try:
self.result = self.handshaker.next()
except StopIteration:
self.handshaker = None
self.result = None
self.outConnectEvent()
def _doCloseOp(self):
try:
self.result = self.closer.next()
except StopIteration:
self.closer = None
self.result = None
self.outCloseEvent()
def _doReadOp(self):
self.result = self.reader.next()
if not self.result in (0,1):
readBuffer = self.result
self.reader = None
self.result = None
self.outReadEvent(readBuffer)
def _doWriteOp(self):
try:
self.result = self.writer.next()
except StopIteration:
self.writer = None
self.result = None
def setHandshakeOp(self, handshaker):
"""Start a handshake operation.
@type handshaker: generator
@param handshaker: A generator created by using one of the
asynchronous handshake functions (i.e. handshakeServerAsync, or
handshakeClientxxx(..., async=True).
"""
try:
self._checkAssert(0)
self.handshaker = handshaker
self._doHandshakeOp()
except:
self._clear()
raise
def setServerHandshakeOp(self, **args):
"""Start a handshake operation.
The arguments passed to this function will be forwarded to
L{tlslite.TLSConnection.TLSConnection.handshakeServerAsync}.
"""
handshaker = self.tlsConnection.handshakeServerAsync(**args)
self.setHandshakeOp(handshaker)
def setCloseOp(self):
"""Start a close operation.
"""
try:
self._checkAssert(0)
self.closer = self.tlsConnection.closeAsync()
self._doCloseOp()
except:
self._clear()
raise
def setWriteOp(self, writeBuffer):
"""Start a write operation.
@type writeBuffer: str
@param writeBuffer: The string to transmit.
"""
try:
self._checkAssert(0)
self.writer = self.tlsConnection.writeAsync(writeBuffer)
self._doWriteOp()
except:
self._clear()
raise
|
Juniper/neutron | refs/heads/master | neutron/plugins/hyperv/agent_notifier_api.py | 21 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Alessandro Pilotti, Cloudbase Solutions Srl
from neutron.common import topics
from neutron.openstack.common import log as logging
from neutron.openstack.common.rpc import proxy
from neutron.plugins.hyperv.common import constants
LOG = logging.getLogger(__name__)
class AgentNotifierApi(proxy.RpcProxy):
'''Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
self.topic_port_delete = topics.get_topic_name(topic,
topics.PORT,
topics.DELETE)
self.topic_tunnel_update = topics.get_topic_name(topic,
constants.TUNNEL,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
def port_delete(self, context, port_id):
self.fanout_cast(context,
self.make_msg('port_delete',
port_id=port_id),
topic=self.topic_port_delete)
def tunnel_update(self, context, tunnel_ip, tunnel_id):
self.fanout_cast(context,
self.make_msg('tunnel_update',
tunnel_ip=tunnel_ip,
tunnel_id=tunnel_id),
topic=self.topic_tunnel_update)
|
okadate/romspy | refs/heads/master | romspy/reshape_wind.py | 1 | # coding: utf-8
# (c) 2015-11-18 Teruhisa Okada
import pandas as pd
import numpy as np
import math
from pykrige.ok import OrdinaryKriging
from scipy.interpolate import Rbf
import matplotlib.pyplot as plt
import matplotlib as mpl
import netCDF4
import datetime
import romspy
mpl.rc('image', cmap='jet')
def reshape_wind(windfiles, stafiles, grdfile, method='kriging', smooth=1, plot=False):
u, v = {}, {}
timedelta = datetime.timedelta(hours=3)
for windfile in windfiles:
print windfile
if 'mp' in windfile:
names = ['name', 'date', 'direction', 'wind']
df = pd.read_csv(windfile, encoding='Shift_JIS', names=names, skiprows=1, na_values='*', index_col='date')
df['u'] = -np.sin(math.pi * df.direction / 180.0) * df.wind
df['v'] = -np.cos(math.pi * df.direction / 180.0) * df.wind
df.index = df.index.map(lambda t: datetime.datetime.strptime(t, '%Y/%m/%d %H:%M'))
df = df.resample('12H', how='mean', loffset=timedelta)
#df = df.resample('H', how='mean')
station = int(windfile[-26:-24])
u[station] = df.u.dropna()
v[station] = df.v.dropna()
elif 'jma' in windfile:
df = pd.read_csv(windfile, encoding='Shift_JIS', na_values='--', index_col='id')
df['u'] = -np.sin(math.pi * df.wind_direction / 180.0) * df.wind_velocity
df['v'] = -np.cos(math.pi * df.wind_direction / 180.0) * df.wind_velocity
df.index = df.index.map(lambda t: datetime.datetime.strptime(t, '%Y/%m/%d %H:%M'))
df = df.resample('12H', how='mean', loffset=timedelta)
#df = df.resample('H', how='mean')
station = windfile[windfile.find('jma_')+4:-22]
u[station] = df.u.dropna()
v[station] = df.v.dropna()
sta_mp = pd.read_csv(stafiles['mp'], index_col='station')
sta_jma = pd.read_csv(stafiles['jma'], index_col='station')
grd = netCDF4.Dataset(grdfile, 'r')
xmesh = grd.variables['lon_rho'][:,:]
ymesh = grd.variables['lat_rho'][:,:]
grd.close()
xgrid = xmesh[0,:]
ygrid = ymesh[:,0]
td = len(df)
xd = len(xgrid)
yd = len(ygrid)
u3d = np.zeros(shape=[td, yd, xd])
v3d = np.zeros_like(u3d)
time_wind = np.zeros(shape=[td])
for i, t in enumerate(df.index):
X, Y, U, V = [], [], [], []
for station in u.keys():
try:
U.append(u[station][t])
V.append(v[station][t])
if type(station) == int:
X.append(sta_mp.lon[station])
Y.append(sta_mp.lat[station])
elif type(station) == str:
X.append(sta_jma.x[station])
Y.append(sta_jma.y[station])
except:
pass
print i, t, len(Y)
if method == 'kriging_gaussian':
ukrig = OrdinaryKriging(X, Y, U, variogram_model='gaussian', verbose=False, enable_plotting=False)
vkrig = OrdinaryKriging(X, Y, V, variogram_model='gaussian', verbose=False, enable_plotting=False)
umesh, ss = ukrig.execute('grid', xgrid, ygrid)
vmesh, ss = vkrig.execute('grid', xgrid, ygrid)
elif method == 'kriging_linear':
ukrig = OrdinaryKriging(X, Y, U, variogram_model='linear', verbose=False, enable_plotting=False)
vkrig = OrdinaryKriging(X, Y, V, variogram_model='linear', verbose=False, enable_plotting=False)
umesh, ss = ukrig.execute('grid', xgrid, ygrid)
vmesh, ss = vkrig.execute('grid', xgrid, ygrid)
elif method == 'rbf':
urbf = Rbf(X, Y, U)
vrbf = Rbf(X, Y, V)
umesh = urbf(xmesh, ymesh)
vmesh = vrbf(xmesh, ymesh)
elif method == 'rbf_gaussian':
urbf = Rbf(X, Y, U, function='inverse')
vrbf = Rbf(X, Y, V, function='inverse')
umesh = urbf(xmesh, ymesh)
vmesh = vrbf(xmesh, ymesh)
elif method == 'rbf_linear':
urbf = Rbf(X, Y, U, function='inverse')
vrbf = Rbf(X, Y, V, function='inverse')
umesh = urbf(xmesh, ymesh)
vmesh = vrbf(xmesh, ymesh)
elif method == 'rbf_inverse':
urbf = Rbf(X, Y, U, function='inverse')
vrbf = Rbf(X, Y, V, function='inverse')
umesh = urbf(xmesh, ymesh)
vmesh = vrbf(xmesh, ymesh)
elif method == 'rbf_inverse_smooth':
urbf = Rbf(X, Y, U, function='inverse', smooth=smooth)
vrbf = Rbf(X, Y, V, function='inverse', smooth=smooth)
umesh = urbf(xmesh, ymesh)
vmesh = vrbf(xmesh, ymesh)
if plot == 'pcolor' and i == 0:
plt.figure(figsize=[15, 5])
plt.subplot(1,2,1)
plt.pcolor(xmesh, ymesh, umesh, vmin=-1, vmax=1)
plt.colorbar()
plt.subplot(1,2,2)
plt.pcolor(xmesh, ymesh, vmesh, vmin=-1, vmax=1)
plt.colorbar()
plt.show()
elif plot == 'quiver' and i == 0:
plt.figure(figsize=[10, 10])
romspy.basemap('F:/okada/notebook/deg_OsakaBayMap_okada.bln')
plt.quiver(xmesh[::3, ::3], ymesh[::3, ::3], umesh[::3, ::3], vmesh[::3, ::3], units='width', angles='xy', scale=100)
plt.quiver(X, Y, U, V, color='r', units='xy', angles='xy', scale=100)
plt.show()
u3d[i,:,:] = umesh
v3d[i,:,:] = vmesh
time_wind[i] = netCDF4.date2num(t, romspy.JST_days)
return u3d, v3d, time_wind
if __name__ == '__main__':
windfiles = ['F:/okada/Dropbox/Data/mp/mp_003_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/mp/mp_005_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/mp/mp_006_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/mp/mp_012_C_20120101_20121231.csv']
windfiles = ['F:/okada/Dropbox/Data/jma/jma_akashi_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_gunge_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kansaiAP_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kobe_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kobeAP_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kumatori_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_osaka_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_sakai_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_sumoto_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_tomogashima_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_toyonaka_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_wakayama_20120101_20121231.csv']
windfiles = ['F:/okada/Dropbox/Data/mp/mp_003_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/mp/mp_005_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/mp/mp_006_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/mp/mp_012_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_akashi_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_gunge_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kansaiAP_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kobe_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kobeAP_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kumatori_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_osaka_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_sakai_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_sumoto_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_tomogashima_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_toyonaka_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_wakayama_20120101_20121231.csv']
windfiles = ['F:/okada/Dropbox/Data/mp/mp_003_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/mp/mp_005_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/mp/mp_006_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/mp/mp_012_C_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_akashi_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_gunge_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kansaiAP_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kobe_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_kobeAP_20120101_20121231.csv',
'F:/okada/Dropbox/Data/jma/jma_tomogashima_20120101_20121231.csv']
stafiles = {'mp':'F:/okada/Dropbox/Data/stations13.csv'}
stafiles['jma'] = 'F:/okada/Dropbox/Data/stations_jma.csv'
grdfile = 'F:/okada/Dropbox/Data/ob500_grd-10.nc'
#reshape_wind(windfiles, stafiles, grdfile, method='kriging_gaussian')
#reshape_wind(windfiles, stafiles, grdfile, method='kriging_linear')
#reshape_wind(windfiles, stafiles, grdfile, method='rbf')
#reshape_wind(windfiles, stafiles, grdfile, method='rbf_gaussian')
#reshape_wind(windfiles, stafiles, grdfile, method='rbf_linear')
reshape_wind(windfiles, stafiles, grdfile, method='rbf_inverse', plot='quiver')
#reshape_wind(windfiles, stafiles, grdfile, method='rbf_inverse_smooth', smooth=0.5)
#reshape_wind(windfiles, stafiles, grdfile, method='rbf_inverse_smooth', smooth=1)
#reshape_wind(windfiles, stafiles, grdfile, method='rbf_inverse_smooth', smooth=2)
|
eeshangarg/oh-mainline | refs/heads/master | vendor/packages/celery/docs/slidesource/slide-example1.py | 23 | from celery.task import Task
class MyTask(Task):
def run(self, x, y):
return x * y
|
hbrunn/OCB | refs/heads/8.0 | addons/website_crm_partner_assign/controllers/main.py | 271 | # -*- coding: utf-8 -*-
import werkzeug
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug, unslug
from openerp.tools.translate import _
class WebsiteCrmPartnerAssign(http.Controller):
_references_per_page = 40
@http.route([
'/partners',
'/partners/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>',
'/partners/grade/<model("res.partner.grade"):grade>/page/<int:page>',
'/partners/country/<model("res.country"):country>',
'/partners/country/<model("res.country"):country>/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>/page/<int:page>',
], type='http', auth="public", website=True)
def partners(self, country=None, grade=None, page=0, **post):
country_all = post.pop('country_all', False)
partner_obj = request.registry['res.partner']
country_obj = request.registry['res.country']
search = post.get('search', '')
base_partner_domain = [('is_company', '=', True), ('grade_id.website_published', '=', True), ('website_published', '=', True)]
if search:
base_partner_domain += ['|', ('name', 'ilike', search), ('website_description', 'ilike', search)]
# group by grade
grade_domain = list(base_partner_domain)
if not country and not country_all:
country_code = request.session['geoip'].get('country_code')
if country_code:
country_ids = country_obj.search(request.cr, request.uid, [('code', '=', country_code)], context=request.context)
if country_ids:
country = country_obj.browse(request.cr, request.uid, country_ids[0], context=request.context)
if country:
grade_domain += [('country_id', '=', country.id)]
grades = partner_obj.read_group(
request.cr, SUPERUSER_ID, grade_domain, ["id", "grade_id"],
groupby="grade_id", orderby="grade_id DESC", context=request.context)
grades_partners = partner_obj.search(
request.cr, SUPERUSER_ID, grade_domain,
context=request.context, count=True)
# flag active grade
for grade_dict in grades:
grade_dict['active'] = grade and grade_dict['grade_id'][0] == grade.id
grades.insert(0, {
'grade_id_count': grades_partners,
'grade_id': (0, _("All Categories")),
'active': bool(grade is None),
})
# group by country
country_domain = list(base_partner_domain)
if grade:
country_domain += [('grade_id', '=', grade.id)]
countries = partner_obj.read_group(
request.cr, SUPERUSER_ID, country_domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_partners = partner_obj.search(
request.cr, SUPERUSER_ID, country_domain,
context=request.context, count=True)
# flag active country
for country_dict in countries:
country_dict['active'] = country and country_dict['country_id'] and country_dict['country_id'][0] == country.id
countries.insert(0, {
'country_id_count': countries_partners,
'country_id': (0, _("All Countries")),
'active': bool(country is None),
})
# current search
if grade:
base_partner_domain += [('grade_id', '=', grade.id)]
if country:
base_partner_domain += [('country_id', '=', country.id)]
# format pager
if grade and not country:
url = '/partners/grade/' + slug(grade)
elif country and not grade:
url = '/partners/country/' + slug(country)
elif country and grade:
url = '/partners/grade/' + slug(grade) + '/country/' + slug(country)
else:
url = '/partners'
url_args = {}
if search:
url_args['search'] = search
if country_all:
url_args['country_all'] = True
partner_count = partner_obj.search_count(
request.cr, SUPERUSER_ID, base_partner_domain,
context=request.context)
pager = request.website.pager(
url=url, total=partner_count, page=page, step=self._references_per_page, scope=7,
url_args=url_args)
# search partners matching current search parameters
partner_ids = partner_obj.search(
request.cr, SUPERUSER_ID, base_partner_domain,
order="grade_id DESC",
context=request.context) # todo in trunk: order="grade_id DESC, implemented_count DESC", offset=pager['offset'], limit=self._references_per_page
partners = partner_obj.browse(request.cr, SUPERUSER_ID, partner_ids, request.context)
# remove me in trunk
partners = sorted(partners, key=lambda x: (x.grade_id.sequence if x.grade_id else 0, len([i for i in x.implemented_partner_ids if i.website_published])), reverse=True)
partners = partners[pager['offset']:pager['offset'] + self._references_per_page]
google_map_partner_ids = ','.join(map(str, [p.id for p in partners]))
values = {
'countries': countries,
'current_country': country,
'grades': grades,
'current_grade': grade,
'partners': partners,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'searches': post,
'search_path': "%s" % werkzeug.url_encode(post),
}
return request.website.render("website_crm_partner_assign.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/partners/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, partner_name='', **post):
_, partner_id = unslug(partner_id)
current_grade, current_country = None, None
grade_id = post.get('grade_id')
country_id = post.get('country_id')
if grade_id:
grade_ids = request.registry['res.partner.grade'].exists(request.cr, request.uid, int(grade_id), context=request.context)
if grade_ids:
current_grade = request.registry['res.partner.grade'].browse(request.cr, request.uid, grade_ids[0], context=request.context)
if country_id:
country_ids = request.registry['res.country'].exists(request.cr, request.uid, int(country_id), context=request.context)
if country_ids:
current_country = request.registry['res.country'].browse(request.cr, request.uid, country_ids[0], context=request.context)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {
'main_object': partner,
'partner': partner,
'current_grade': current_grade,
'current_country': current_country
}
return request.website.render("website_crm_partner_assign.partner", values)
return self.partners(**post)
|
MER-GROUP/intellij-community | refs/heads/master | python/testData/override/singleStar_after.py | 83 | class A:
def f1(self, *, a = 1):
pass
class B(A):
def f1(self, *, a=1):
<selection>super().f1(a=a)</selection>
|
Samael500/pelican-plugins | refs/heads/master | gzip_cache/gzip_cache.py | 40 | '''
Copyright (c) 2012 Matt Layman
Gzip cache
----------
A plugin to create .gz cache files for optimization.
'''
import logging
import os
import zlib
from pelican import signals
logger = logging.getLogger(__name__)
# A list of file types to exclude from possible compression
EXCLUDE_TYPES = [
# Compressed types
'.bz2',
'.gz',
# Audio types
'.aac',
'.flac',
'.mp3',
'.wma',
# Image types
'.gif',
'.jpg',
'.jpeg',
'.png',
# Video types
'.avi',
'.mov',
'.mp4',
'.webm',
# Internally-compressed fonts. gzip can often shave ~50 more bytes off,
# but it's not worth it.
'.woff',
]
COMPRESSION_LEVEL = 9 # Best Compression
""" According to zlib manual: 'Add 16 to
windowBits to write a simple gzip header and trailer around the
compressed data instead of a zlib wrapper. The gzip header will
have no file name, no extra data, no comment, no modification
time (set to zero), no header crc, and the operating system
will be set to 255 (unknown)'
"""
WBITS = zlib.MAX_WBITS | 16
def create_gzip_cache(pelican):
'''Create a gzip cache file for every file that a webserver would
reasonably want to cache (e.g., text type files).
:param pelican: The Pelican instance
'''
for dirpath, _, filenames in os.walk(pelican.settings['OUTPUT_PATH']):
for name in filenames:
if should_compress(name):
filepath = os.path.join(dirpath, name)
create_gzip_file(filepath, should_overwrite(pelican.settings))
def should_compress(filename):
'''Check if the filename is a type of file that should be compressed.
:param filename: A file name to check against
'''
for extension in EXCLUDE_TYPES:
if filename.endswith(extension):
return False
return True
def should_overwrite(settings):
'''Check if the gzipped files should overwrite the originals.
:param settings: The pelican instance settings
'''
return settings.get('GZIP_CACHE_OVERWRITE', False)
def create_gzip_file(filepath, overwrite):
'''Create a gzipped file in the same directory with a filepath.gz name.
:param filepath: A file to compress
:param overwrite: Whether the original file should be overwritten
'''
compressed_path = filepath + '.gz'
with open(filepath, 'rb') as uncompressed:
gzip_compress_obj = zlib.compressobj(COMPRESSION_LEVEL,
zlib.DEFLATED, WBITS)
uncompressed_data = uncompressed.read()
gzipped_data = gzip_compress_obj.compress(uncompressed_data)
gzipped_data += gzip_compress_obj.flush()
if len(gzipped_data) >= len(uncompressed_data):
logger.debug('No improvement: %s' % filepath)
return
with open(compressed_path, 'wb') as compressed:
logger.debug('Compressing: %s' % filepath)
try:
compressed.write(gzipped_data)
except Exception as ex:
logger.critical('Gzip compression failed: %s' % ex)
if overwrite:
logger.debug('Overwriting: %s with %s' % (filepath, compressed_path))
os.remove(filepath)
os.rename(compressed_path, filepath)
def register():
signals.finalized.connect(create_gzip_cache)
|
juhnowski/ph_motors | refs/heads/master | test.py | 1 | from multiprocessing import Process, Pipe
#Basic imports
from ctypes import *
import sys
#Phidget specific imports
from Phidgets.Phidget import Phidget
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import AccelerationChangeEventArgs, AttachEventArgs, DetachEventArgs, ErrorEventArgs
from Phidgets.Devices.Accelerometer import Accelerometer
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import AttachEventArgs, DetachEventArgs, ErrorEventArgs, CurrentChangeEventArgs, PositionChangeEventArgs, VelocityChangeEventArgs
from Phidgets.Devices.AdvancedServo import AdvancedServo
from Phidgets.Devices.Servo import ServoTypes
delta_pos = 0.05
#Create an accelerometer object
try:
accelerometer = Accelerometer()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
try:
advancedServo = AdvancedServo()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
try:
advancedServo2 = AdvancedServo()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
#========================================================================================================================================================================================
#Information Display Function
def DisplayDeviceInfo():
print("|------------|----------------------------------|--------------|------------|")
print("|- Attached -|- Type -|- Serial No. -|- Version -|")
print("|------------|----------------------------------|--------------|------------|")
print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (accelerometer.isAttached(), accelerometer.getDeviceName(), accelerometer.getSerialNum(), accelerometer.getDeviceVersion()))
print("|------------|----------------------------------|--------------|------------|")
print("Number of Axes: %i" % (accelerometer.getAxisCount()))
#Event Handler Callback Functions
def AccelerometerAttached(e):
attached = e.device
print("Accelerometer %i Attached!" % (attached.getSerialNum()))
def AccelerometerDetached(e):
detached = e.device
print("Accelerometer %i Detached!" % (detached.getSerialNum()))
def AccelerometerError(e):
try:
source = e.device
print("Accelerometer %i: Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
def AccelerometerAccelerationChanged(e):
source = e.device
accelerometer.pconn.send("%i:%6f" % (e.index, e.acceleration))
#========================================================================================================================================================================================
def f(conn):
# conn.send([42, None, 'hello'])
accelerometer.pconn = conn
try:
accelerometer.setOnAttachHandler(AccelerometerAttached)
accelerometer.setOnDetachHandler(AccelerometerDetached)
accelerometer.setOnErrorhandler(AccelerometerError)
accelerometer.setOnAccelerationChangeHandler(AccelerometerAccelerationChanged)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Opening phidget object....")
try:
accelerometer.openPhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Waiting for attach....")
try:
accelerometer.waitForAttach(10000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
accelerometer.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
else:
try:
numAxis = accelerometer.getAxisCount()
accelerometer.setAccelChangeTrigger(0, 0.02)
accelerometer.setAccelChangeTrigger(1, 0.02)
if numAxis > 2:
accelerometer.setAccelChangeTrigger(2, 0.500)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
DisplayDeviceInfo()
if __name__ == '__main__':
parent_conn, child_conn = Pipe()
f(child_conn)
#--------------------------------------------------------------------------------------------------------------------------------------
def M1_Attached(e):
attached = e.device
print("Servo %i Attached!" % (attached.getSerialNum()))
def M1_Detached(e):
detached = e.device
print("Servo %i Detached!" % (detached.getSerialNum()))
def M1_Error(e):
try:
source = e.device
print("Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
advancedServo.closePhidget()
def M1_Init():
try:
advancedServo.openPhidget(119917)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
advancedServo.waitForAttach(10000)
except PhidgetException as e:
print("Exception %i: %s " % (e.code, e.details))
try:
advancedServo.closePhidget()
except PhidgetException as e:
print("Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Error advancedServo.waitForAttach....")
try:
advancedServo.setOnAttachHandler(M1_Attached)
advancedServo.setOnDetachHandler(M1_Detached)
advancedServo.setOnErrorhandler(M1_Error)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
advancedServo.setServoType(0, ServoTypes.PHIDGET_SERVO_HITEC_HS422)
advancedServo.setVelocityLimit(0, advancedServo.getVelocityMax(0))
advancedServo.setAcceleration(0, advancedServo.getAccelerationMax(0))
def M1_Start():
try:
advancedServo.setEngaged(0, True)
except:
print "Can't start M1"
try:
advancedServo.closePhidget()
M1_Init()
except:
print "Can't reopen M1'"
def M1_Stop():
try:
advancedServo.setEngaged(0, False)
except:
print "Can't stop M1"
def M1_Rotate(pos):
if cmd[0:1] == '1':
try:
pos=float(cmd[2:])
if (abs(pos) > delta_pos):
if pos>0:
advancedServo.setPosition(0, advancedServo.getPosition(0)-1) #
else:
advancedServo.setPosition(0, advancedServo.getPosition(0)+1) #
except Exception as e:
print("M1 Exception %i: %s" % (e.code, e.details))
def M1_Close():
advancedServo.closePhidget()
#---------------------------------------------------------------------------------------------------------------------------------------------------
def M2_Attached(e):
attached = e.device
print("Servo %i Attached!" % (attached.getSerialNum()))
def M2_Detached(e):
detached = e.device
print("Servo %i Detached!" % (detached.getSerialNum()))
def M2_Error(e):
try:
source = e.device
print("Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
advancedServo2.closePhidget()
def M2_Init():
try:
advancedServo2.openPhidget(119567)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
advancedServo2.waitForAttach(10000)
except PhidgetException as e:
print("Exception %i: %s " % (e.code, e.details))
try:
advancedServo2.closePhidget()
except PhidgetException as e:
print("Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Error advancedServo.waitForAttach....")
try:
advancedServo2.setOnAttachHandler(M2_Attached)
advancedServo2.setOnDetachHandler(M2_Detached)
advancedServo2.setOnErrorhandler(M2_Error)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
advancedServo2.setServoType(0, ServoTypes.PHIDGET_SERVO_DEFAULT)
advancedServo2.setVelocityLimit(0, advancedServo2.getVelocityMax(0))
advancedServo2.setAcceleration(0, advancedServo2.getAccelerationMax(0))
def M2_Start():
advancedServo2.setEngaged(0, True)
def M2_Stop():
advancedServo2.setEngaged(0, False)
def M2_Rotate(cmd):
if cmd[0:1] == '0':
try:
pos=float(cmd[2:])
if (abs(pos) > delta_pos):
if pos>0:
advancedServo2.setPosition(0, advancedServo2.getPosition(0)-1) #
else:
advancedServo2.setPosition(0, advancedServo2.getPosition(0)+1) #
except Exception as e:
print("M2 Exception %i: %s" % (e.code, e.details))
def M2_Close():
advancedServo2.closePhidget()
#---------------------------------------------------------------------------------------------------------------------------------------------------
M1_Init()
M1_Start()
M2_Init()
M2_Start()
#----------------------------------------------------------------------------------------------------------------------------------------------------
while(True):
cmd = parent_conn.recv()
M1_Rotate(cmd)
M2_Rotate(cmd)
|
AlessandroCorsi/fibermodes | refs/heads/master | tests/fiber/solver/test_tlsif.py | 2 | # This file is part of FiberModes.
#
# FiberModes is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FiberModes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FiberModes. If not, see <http://www.gnu.org/licenses/>.
"""Test suite for fibermodes.fiber.solver.tlsif module."""
import unittest
from fibermodes import FiberFactory, Mode, Wavelength
from math import sqrt
class TestTLSIF(unittest.TestCase):
"""Test suite for three-layers step-index fibers."""
def setUp(self):
self.f = FiberFactory()
def _compareWithCo(self, fiber, mode, neff):
co = fiber.cutoff(mode)
wl = Wavelength(1550e-9)
n = max(l.maxIndex(wl) for l in fiber.layers)
r = fiber.innerRadius(-1)
nmax = sqrt(n**2 - (co / (r * wl.k0))**2)
self.assertLess(neff, nmax)
ms = Mode(mode.family, mode.nu+1, mode.m)
co = fiber.cutoff(ms)
nmin = sqrt(n**2 - (co / (r * wl.k0))**2)
self.assertGreater(neff, nmin)
def testCase1LP(self):
self.f.addLayer(radius=4e-6, index=1.4489)
self.f.addLayer(radius=10e-6, index=1.4474)
self.f.addLayer(index=1.4444)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('LP', 0, 1), 1.4472309),
(Mode('LP', 1, 1), 1.4457064),
(Mode('LP', 0, 2), 1.4445245)]
lpmodes = fiber.findLPmodes(wl)
self.assertEqual(len(lpmodes), len(sols))
for mode, neff in sols:
self._compareWithCo(fiber, mode, neff)
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-5), neff)
def testCase2LP(self):
"""Annular-core fiber."""
self.f.addLayer(radius=4e-6, index=1.4444)
self.f.addLayer(radius=10e-6, index=1.4489)
self.f.addLayer(index=1.4444)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('LP', 0, 1), 1.4472296),
(Mode('LP', 1, 1), 1.4465947),
(Mode('LP', 2, 1), 1.4452985)]
lpmodes = fiber.findLPmodes(wl)
self.assertEqual(len(lpmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-4), neff)
def testCase3LP(self):
self.f.addLayer(radius=4e-6, index=1.4474)
self.f.addLayer(radius=10e-6, index=1.4489)
self.f.addLayer(index=1.4444)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('LP', 0, 1), 1.44767716),
(Mode('LP', 1, 1), 1.44675879),
(Mode('LP', 2, 1), 1.44534443),
(Mode('LP', 0, 2), 1.44452950)]
lpmodes = fiber.findLPmodes(wl)
self.assertEqual(len(lpmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-5), neff)
def testCase4LP(self):
self.f.addLayer(radius=4e-6, index=1.4444)
self.f.addLayer(radius=10e-6, index=1.4489)
self.f.addLayer(index=1.4474)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('LP', 0, 1), 1.447761788),
(Mode('LP', 1, 1), 1.447424556)]
lpmodes = fiber.findLPmodes(wl)
self.assertEqual(len(lpmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-5), neff)
def testCase5LP(self):
"""W-type fiber."""
self.f.addLayer(radius=10e-6, index=1.4489)
self.f.addLayer(radius=16e-6, index=1.4444)
self.f.addLayer(index=1.4474)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('LP', 0, 1), 1.44809)] # From OptiFiber
lpmodes = fiber.findLPmodes(wl)
self.assertEqual(len(lpmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-5), neff)
def testCase1Vector(self):
self.f.addLayer(radius=4e-6, index=1.4489)
self.f.addLayer(radius=10e-6, index=1.4474)
self.f.addLayer(index=1.4444)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('HE', 1, 1), 1.44722991),
(Mode('TE', 0, 1), 1.44570643),
(Mode('TM', 0, 1), 1.445706197),
(Mode('HE', 2, 1), 1.445704747),
(Mode('EH', 1, 1), 1.44452366)]
vmodes = fiber.findVmodes(wl)
self.assertEqual(len(vmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-5), neff)
def testCase2Vector(self):
"""Annular-core fiber."""
self.f.addLayer(radius=4e-6, index=1.4444)
self.f.addLayer(radius=10e-6, index=1.4489)
self.f.addLayer(index=1.4444)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('HE', 1, 1), 1.4472267686),
(Mode('TE', 0, 1), 1.4465947086),
(Mode('HE', 2, 1), 1.446591650399142),
(Mode('TM', 0, 1), 1.446587672894224),
(Mode('EH', 1, 1), 1.445296246037881),
(Mode('HE', 3, 1), 1.4452944761507711)]
vmodes = fiber.findVmodes(wl)
self.assertEqual(len(vmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-4), neff)
def testCase3Vector(self):
self.f.addLayer(radius=4e-6, index=1.4474)
self.f.addLayer(radius=10e-6, index=1.4489)
self.f.addLayer(index=1.4444)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('HE', 1, 1), 1.447675825578464),
(Mode('TE', 0, 1), 1.44675879173106),
(Mode('HE', 2, 1), 1.4467563516096955),
(Mode('TM', 0, 1), 1.4467544714182625),
(Mode('EH', 1, 1), 1.445343017591462),
(Mode('HE', 3, 1), 1.4453405392005971),
(Mode('HE', 1, 2), 1.4445293834464685)]
vmodes = fiber.findVmodes(wl)
self.assertEqual(len(vmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-5), neff)
def testCase4Vector(self):
self.f.addLayer(radius=4e-6, index=1.4444)
self.f.addLayer(radius=10e-6, index=1.4489)
self.f.addLayer(index=1.4474)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('HE', 1, 1), 1.4477608163543525),
(Mode('TE', 0, 1), 1.447424556045192),
(Mode('HE', 2, 1), 1.4474241401608832),
(Mode('TM', 0, 1), 1.4474235819526378)]
vmodes = fiber.findVmodes(wl)
self.assertEqual(len(vmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-5), neff)
def testCase5Vector(self):
"""Annular-core fiber."""
self.f.addLayer(radius=10e-6, index=1.4489)
self.f.addLayer(radius=16e-6, index=1.4444)
self.f.addLayer(index=1.4474)
fiber = self.f[0]
wl = Wavelength(1550e-9)
sols = [(Mode('HE', 1, 1), 1.448089116517021)]
vmodes = fiber.findVmodes(wl)
self.assertEqual(len(vmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-6), neff)
wl = Wavelength(800e-9)
sols = [(Mode('HE', 1, 1), 1.448638518377151),
(Mode('TE', 0, 1), 1.4482384223480635),
(Mode('TM', 0, 1), 1.448237707949158),
(Mode('EH', 1, 1), 1.4477149), # Values from OptiFiber
(Mode('HE', 1, 2), 1.4475354),
(Mode('HE', 2, 1), 1.4482380),
(Mode('HE', 3, 1), 1.4477146)]
vmodes = fiber.findVmodes(wl)
self.assertEqual(len(vmodes), len(sols))
for mode, neff in sols:
self.assertAlmostEqual(fiber.neff(mode, wl, delta=1e-6), neff)
def _testFiberCutoff(self, rho, n, cutoffs, places=7):
self.setUp()
self.f.addLayer(radius=rho[0], index=n[0])
self.f.addLayer(radius=rho[1], index=n[1])
self.f.addLayer(index=n[2])
fiber = self.f[0]
for mode, co in cutoffs.items():
self.assertAlmostEqual(fiber.cutoff(mode), co,
places=places,
msg=str(mode))
def testLPCutoffA(self):
rho = [4e-6, 6e-6]
n = [1.47, 1.43, 1.44]
cutoffs = {
Mode('LP', 1, 1): 4.034844259728652,
Mode('LP', 2, 1): 6.1486114063146005,
Mode('LP', 3, 1): 8.07126756792508,
Mode('LP', 4, 1): 9.911798124561814,
Mode('LP', 0, 2): 6.568180843774973,
Mode('LP', 1, 2): 8.922361377477307,
Mode('LP', 2, 2): 11.06585974653044,
}
self._testFiberCutoff(rho, n, cutoffs)
def testVCutoffA(self):
rho = [4e-6, 6e-6]
n = [1.47, 1.43, 1.44]
cutoffs = {
Mode('TE', 0, 1): 4.034844259728651,
Mode('HE', 2, 1): 4.071976253449693,
Mode('TM', 0, 1): 4.058192997221014,
Mode('EH', 1, 1): 6.158255614959294,
Mode('HE', 3, 1): 6.189815896708511,
Mode('EH', 2, 1): 8.080052963422796,
Mode('HE', 4, 1): 8.115131183786337,
Mode('EH', 3, 1): 9.91993372343631,
Mode('HE', 5, 1): 9.957649725258843,
Mode('HE', 1, 2): 6.589429513136826,
Mode('TE', 0, 2): 8.922361377477312,
Mode('HE', 2, 2): 8.948985568829624,
Mode('TM', 0, 2): 8.953573638542046,
Mode('EH', 1, 2): 11.078160141775095,
Mode('HE', 3, 2): 11.09621953195914,
}
self._testFiberCutoff(rho, n, cutoffs)
def testLPCutoffB(self):
rho = [4e-6, 6e-6]
n = [1.47, 1.45, 1.44]
cutoffs = {
Mode('LP', 1, 1): 3.1226096356321893,
Mode('LP', 2, 1): 5.096112984974791,
Mode('LP', 3, 1): 6.968066798210773,
Mode('LP', 4, 1): 8.8012241922413,
Mode('LP', 5, 1): 10.61168894514904,
Mode('LP', 0, 2): 4.676313597977374,
Mode('LP', 1, 2): 6.809117963058563,
Mode('LP', 2, 2): 8.743801177466404,
Mode('LP', 3, 2): 10.598944233713851,
Mode('LP', 0, 3): 8.047306845386878,
Mode('LP', 1, 3): 9.953012983126248,
}
self._testFiberCutoff(rho, n, cutoffs)
def testVCutoffB(self):
rho = [4e-6, 6e-6]
n = [1.47, 1.45, 1.44]
cutoffs = {
Mode('TM', 0, 1): 3.111217543593232,
Mode('TE', 0, 1): 3.122609635632189,
Mode('HE', 2, 1): 3.1400200936070846,
Mode('EH', 1, 1): 4.669304720761619,
Mode('HE', 1, 2): 5.088131872468638,
Mode('HE', 3, 1): 5.118129406153233,
Mode('EH', 2, 1): 5.89459696711537,
Mode('TM', 0, 2): 6.7934897736915065,
Mode('HE', 2, 2): 6.80880538983052,
Mode('TE', 0, 2): 6.809117963058563,
Mode('HE', 4, 1): 6.993124138822584,
Mode('EH', 3, 1): 7.08470361250189,
Mode('EH', 1, 2): 8.049518101191492,
Mode('EH', 4, 1): 8.382115427339519,
Mode('HE', 1, 3): 8.735885113376382,
Mode('HE', 3, 2): 8.747374022674864,
Mode('HE', 5, 1): 8.828714775284547,
Mode('EH', 2, 2): 9.416036962761297,
Mode('TE', 0, 3): 9.95301298312625,
Mode('HE', 2, 3): 9.962177458632713,
Mode('TM', 0, 3): 9.962228554278012,
Mode('HE', 4, 2): 10.605263286689778,
Mode('HE', 6, 1): 10.64128881198123,
}
self._testFiberCutoff(rho, n, cutoffs)
def testLPCutoffC(self):
rho = [4e-6, 6e-6]
n = [1.43, 1.47, 1.44]
cutoffs = {
Mode('LP', 1, 1): 3.010347467577181,
Mode('LP', 2, 1): 4.404178238529268,
Mode('LP', 3, 1): 5.631998448700369,
Mode('LP', 4, 1): 6.7965518925242865,
Mode('LP', 5, 1): 7.93118037952865,
Mode('LP', 6, 1): 9.050134813376669,
Mode('LP', 7, 1): 10.160295215952916,
Mode('LP', 0, 2): 10.813986300277824,
}
self._testFiberCutoff(rho, n, cutoffs)
def testVCutoffC(self):
rho = [4e-6, 6e-6]
n = [1.43, 1.47, 1.44]
cutoffs = {
Mode('TE', 0, 1): 3.0103474675771804,
Mode('TM', 0, 1): 3.0732744029480012,
Mode('TE', 0, 2): 11.215674035379953,
Mode('TM', 0, 2): 11.29528661745687,
Mode('EH', 1, 1): 4.43599929326006,
Mode('EH', 2, 1): 5.660787662502081,
Mode('EH', 3, 1): 6.821606789237238,
Mode('EH', 4, 1): 7.952484494712328,
Mode('EH', 5, 1): 9.067961694568465,
Mode('EH', 6, 1): 10.175030484705225,
Mode('HE', 2, 1): 3.0406851062929734,
Mode('HE', 3, 1): 4.438962073092406,
Mode('HE', 4, 1): 5.668294434394004,
Mode('HE', 5, 1): 6.833174601202006,
Mode('HE', 6, 1): 7.967602949136493,
Mode('HE', 7, 1): 9.086138896920177,
Mode('HE', 8, 1): 10.195817896822504,
Mode('HE', 1, 2): 10.844609209283163,
Mode('HE', 2, 2): 11.249595259175186,
Mode('EH', 1, 2): 11.843142062848772,
Mode('HE', 3, 2): 11.832900895486063,
Mode('EH', 2, 2): 12.538738658782329,
Mode('HE', 4, 2): 12.528230836249172,
}
self._testFiberCutoff(rho, n, cutoffs)
def testLPCutoffD(self):
rho = [4e-6, 6e-6]
n = [1.45, 1.47, 1.44]
cutoffs = {
Mode('LP', 1, 1): 2.702968459636167,
Mode('LP', 2, 1): 4.150583195855695,
Mode('LP', 3, 1): 5.430106475704322,
Mode('LP', 4, 1): 6.636532360901636,
Mode('LP', 5, 1): 7.804416891196523,
Mode('LP', 6, 1): 8.949785986163985,
Mode('LP', 7, 1): 10.080981852288588,
Mode('LP', 0, 2): 5.640393617621346,
Mode('LP', 1, 2): 8.008821133207624,
Mode('LP', 2, 2): 9.679408185385487,
Mode('LP', 3, 2): 10.97034948328025,
Mode('LP', 0, 3): 9.684508718046876,
}
self._testFiberCutoff(rho, n, cutoffs)
def testVCutoffD(self):
rho = [4e-6, 6e-6]
n = [1.45, 1.47, 1.44]
cutoffs = {
Mode('TE', 0, 1): 2.7029684596361676,
Mode('HE', 2, 1): 2.7228694802366005,
Mode('TM', 0, 1): 2.727734813318786,
Mode('EH', 1, 1): 4.1655157193520465,
Mode('HE', 3, 1): 4.176283860563018,
Mode('EH', 2, 1): 5.445008118664826,
Mode('HE', 4, 1): 5.458819017982079,
Mode('HE', 1, 2): 5.634608766525465,
Mode('HE', 1, 2): 5.634608766525469,
Mode('EH', 3, 1): 6.650249935726423,
Mode('HE', 5, 1): 6.666904206459127,
Mode('EH', 4, 1): 7.816502257699482,
Mode('HE', 6, 1): 7.835721155652211,
Mode('TM', 0, 2): 7.993329888241878,
Mode('HE', 2, 2): 8.003273407163517,
Mode('TE', 0, 2): 8.008821133207624,
Mode('EH', 5, 1): 8.960139011974638,
Mode('HE', 7, 1): 8.981619572965892,
Mode('EH', 1, 2): 9.680599063398324,
Mode('HE', 3, 2): 9.681017840406332,
Mode('HE', 1, 3): 9.686653009072776,
Mode('EH', 6, 1): 10.089675701100248,
Mode('HE', 8, 1): 10.113121418012547,
Mode('HE', 4, 2): 10.980585429648642,
Mode('EH', 2, 2): 10.98222290317733,
}
self._testFiberCutoff(rho, n, cutoffs)
def testLPCutoffE(self):
rho = [4e-6, 6e-6]
n = [1.44, 1.47, 1.44]
cutoffs = {
Mode('LP', 1, 1): 2.85904035776636975,
Mode('LP', 2, 1): 4.2866039225676404,
Mode('LP', 3, 1): 5.540915061306307,
Mode('LP', 4, 1): 6.725406031775626,
Mode('LP', 5, 1): 7.8752953434136135,
Mode('LP', 6, 1): 9.006117838838101,
Mode('LP', 7, 1): 10.125608397188888,
Mode('LP', 0, 2): 9.482807865823602,
Mode('LP', 1, 2): 10.27844425627377,
}
self._testFiberCutoff(rho, n, cutoffs)
def testVCutoffE(self):
rho = [4e-6, 6e-6]
n = [1.44, 1.47, 1.44]
cutoffs = {
Mode('TE', 0, 1): 2.859040357765955,
Mode('HE', 2, 1): 2.8832370027681815,
Mode('TM', 0, 1): 2.9017337070631224,
Mode('EH', 1, 1): 4.310052598194367,
Mode('HE', 3, 1): 4.316477364814478,
Mode('EH', 2, 1): 5.563044551409131,
Mode('HE', 4, 1): 5.573271721659564,
Mode('EH', 3, 1): 6.745115477350213,
Mode('HE', 5, 1): 6.758852822325253,
Mode('EH', 4, 1): 7.892295201926296,
Mode('HE', 6, 1): 7.9091525519850805,
Mode('EH', 5, 1): 9.020476094070924,
Mode('HE', 7, 1): 9.040050082529529,
Mode('EH', 6, 1): 10.137550450462196,
Mode('HE', 8, 1): 10.159460454232093,
Mode('HE', 1, 2): 9.482807865823602,
Mode('TE', 0, 2): 10.278444256273769,
Mode('HE', 2, 2): 10.289797328577112,
Mode('TM', 0, 2): 10.310990340988402,
}
self._testFiberCutoff(rho, n, cutoffs)
def testCutoffTableIII(self):
"""Values from cutoff acticle, Table III."""
n = (1.444, 1.474, 1.444)
b = 10e-6
rho = (0.25*b, b)
cutoffs = {
Mode('TE', 0, 1): 2.4161,
Mode('HE', 2, 1): 2.4336,
Mode('TM', 0, 1): 2.4257,
# Mode('EH', 1, 1): 3.8330,
Mode('HE', 3, 1): 3.8561,
Mode('HE', 1, 2): 4.4475,
# Mode('EH', 2, 1): 5.1359,
Mode('HE', 4, 1): 5.1603,
Mode('TE', 0, 2): 5.7336,
Mode('HE', 2, 2): 5.7418,
Mode('TM', 0, 2): 5.7610,
}
self._testFiberCutoff(rho, n, cutoffs, 4)
rho = (0.5*b, b)
cutoffs = {
Mode('TE', 0, 1): 2.5544,
Mode('HE', 2, 1): 2.5742,
Mode('TM', 0, 1): 2.5822,
# Mode('EH', 1, 1): 3.9294,
Mode('HE', 3, 1): 3.9648,
Mode('HE', 1, 2): 6.3932,
# Mode('EH', 2, 1): 5.1976,
Mode('HE', 4, 1): 5.2316,
Mode('TE', 0, 2): 7.3236,
Mode('HE', 2, 2): 7.3337,
Mode('TM', 0, 2): 7.3583,
}
self._testFiberCutoff(rho, n, cutoffs, 4)
rho = (0.75*b, b)
cutoffs = {
Mode('TE', 0, 1): 3.1663,
Mode('HE', 2, 1): 3.1943,
Mode('TM', 0, 1): 3.2188,
# Mode('EH', 1, 1): 4.6458,
Mode('HE', 3, 1): 4.7123,
Mode('HE', 1, 2): 12.6056,
# Mode('EH', 2, 1): 5.9360,
Mode('HE', 4, 1): 6.0074,
Mode('TE', 0, 2): 13.3513,
Mode('HE', 2, 2): 13.3631,
Mode('TM', 0, 2): 13.3822,
}
self._testFiberCutoff(rho, n, cutoffs, 4)
def testBuresEx334(self):
self.f.addLayer(material="SiO2GeO2", radius=4.5e-6,
index=1.448918, wl=1550e-9)
self.f.addLayer(material="Silica", radius=62.5e-6)
self.f.addLayer(material="Air")
fiber = self.f[0]
# Fig 3.31
wl = Wavelength(900e-9)
vgc = 1 / fiber.ng(Mode("HE", 1, 1), wl)
self.assertGreater(vgc, 0.680)
self.assertLess(vgc, 0.6805)
vgc = 1 / fiber.ng(Mode("EH", 1, 1), wl)
self.assertGreater(vgc, 0.6825)
self.assertLess(vgc, 0.683)
wl = Wavelength(1600e-9)
vgc = 1 / fiber.ng(Mode("HE", 1, 1), wl)
self.assertGreater(vgc, 0.6805)
self.assertLess(vgc, 0.6815)
vgc = 1 / fiber.ng(Mode("EH", 1, 1), wl)
self.assertGreater(vgc, 0.683)
self.assertLess(vgc, 0.6836)
# Fig 3.32
# wl = Wavelength(900e-9)
# D = fiber.D(Mode("HE", 1, 1), wl)
# self.assertGreater(D, -80)
# self.assertLess(D, -60)
# D = fiber.D(Mode("EH", 1, 1), wl)
# self.assertGreater(D, -80)
# self.assertLess(D, -60)
# wl = Wavelength(1290e-9)
# D = fiber.D(Mode("HE", 1, 1), wl)
# self.assertGreater(D, -10)
# self.assertLess(D, 10)
# D = fiber.D(Mode("EH", 1, 1), wl)
# self.assertGreater(D, -10)
# self.assertLess(D, 10)
# wl = Wavelength(1550e-9)
# D = fiber.D(Mode("HE", 1, 1), wl)
# self.assertGreater(D, 10)
# self.assertLess(D, 20)
# wl = Wavelength(1600e-9)
# D = fiber.D(Mode("HE", 1, 1), wl)
# self.assertGreater(D, 15)
# self.assertLess(D, 30)
# D = fiber.D(Mode("EH", 1, 1), wl)
# self.assertGreater(D, 15)
# self.assertLess(D, 30)
if __name__ == "__main__":
unittest.main()
|
Einsteinish/PyTune3 | refs/heads/master | vendor/feedvalidator/demo/src/tests/config.py | 19 | from os import environ
# This is a test config, used by the runtests script, to ensure check.cgi
# runs without requiring a web server.
HOMEURL = 'http://localhost/check'
PYDIR = '/usr/lib/python/'
WEBDIR = environ['FEEDVALIDATOR_HOME']
SRCDIR = WEBDIR + '/src'
DOCSURL = 'docs'
CSSURL = 'css'
|
leiferikb/bitpop | refs/heads/master | src/tools/telemetry/telemetry/web_perf/metrics/smoothness.py | 1 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.web_perf.metrics import timeline_based_metric
from telemetry.web_perf.metrics import rendering_stats
from telemetry.page.perf_tests_helper import FlattenList
from telemetry.util import statistics
class SmoothnessMetric(timeline_based_metric.TimelineBasedMetric):
def __init__(self):
super(SmoothnessMetric, self).__init__()
def AddResults(self, model, renderer_thread, interaction_records, results):
renderer_process = renderer_thread.parent
stats = rendering_stats.RenderingStats(
renderer_process, model.browser_process,
[r.GetBounds() for r in interaction_records])
if stats.mouse_wheel_scroll_latency:
mean_mouse_wheel_scroll_latency = statistics.ArithmeticMean(
stats.mouse_wheel_scroll_latency)
mouse_wheel_scroll_latency_discrepancy = statistics.DurationsDiscrepancy(
stats.mouse_wheel_scroll_latency)
results.Add('mean_mouse_wheel_scroll_latency', 'ms',
round(mean_mouse_wheel_scroll_latency, 3))
results.Add('mouse_wheel_scroll_latency_discrepancy', 'ms',
round(mouse_wheel_scroll_latency_discrepancy, 4))
if stats.touch_scroll_latency:
mean_touch_scroll_latency = statistics.ArithmeticMean(
stats.touch_scroll_latency)
touch_scroll_latency_discrepancy = statistics.DurationsDiscrepancy(
stats.touch_scroll_latency)
results.Add('mean_touch_scroll_latency', 'ms',
round(mean_touch_scroll_latency, 3))
results.Add('touch_scroll_latency_discrepancy', 'ms',
round(touch_scroll_latency_discrepancy, 4))
if stats.js_touch_scroll_latency:
mean_js_touch_scroll_latency = statistics.ArithmeticMean(
stats.js_touch_scroll_latency)
js_touch_scroll_latency_discrepancy = statistics.DurationsDiscrepancy(
stats.js_touch_scroll_latency)
results.Add('mean_js_touch_scroll_latency', 'ms',
round(mean_js_touch_scroll_latency, 3))
results.Add('js_touch_scroll_latency_discrepancy', 'ms',
round(js_touch_scroll_latency_discrepancy, 4))
# List of raw frame times.
frame_times = FlattenList(stats.frame_times)
results.Add('frame_times', 'ms', frame_times)
# Arithmetic mean of frame times.
mean_frame_time = statistics.ArithmeticMean(frame_times)
results.Add('mean_frame_time', 'ms', round(mean_frame_time, 3))
# Absolute discrepancy of frame time stamps.
frame_discrepancy = statistics.TimestampsDiscrepancy(
stats.frame_timestamps)
results.Add('jank', 'ms', round(frame_discrepancy, 4))
# Are we hitting 60 fps for 95 percent of all frames?
# We use 19ms as a somewhat looser threshold, instead of 1000.0/60.0.
percentile_95 = statistics.Percentile(frame_times, 95.0)
results.Add('mostly_smooth', 'score', 1.0 if percentile_95 < 19.0 else 0.0)
# Mean percentage of pixels approximated (missing tiles, low resolution
# tiles, non-ideal resolution tiles)
results.Add('mean_pixels_approximated', 'percent',
round(statistics.ArithmeticMean(
FlattenList(stats.approximated_pixel_percentages)), 3))
|
cchamberlain/npm-msys2 | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
mdole/wustl_rts_benchmarks | refs/heads/master | benchmarks/rayCast/common/runTests.py | 98 | import subprocess
import sys
import random
import os
def onPprocessors(command,p) :
if os.environ.has_key("OPENMP"):
os.putenv("OMP_NUM_THREADS", "%d" %p)
return command
elif os.environ.has_key("CILK"):
return command + " -cilk_set_worker_count " + `p`
elif os.environ.has_key("MKLROOT"):
return "export CILK_NWORKERS="+`p`+"; " + command
return command
def shellGetOutput(str) :
process = subprocess.Popen(str,shell=True,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = process.communicate()
if (len(err) > 0):
raise NameError(str+"\n"+output+err)
return output
def stripFloat(val) :
trunc = float(int(val*1000))/1000
return str(trunc).rstrip('0')
def runSingle(runProgram, options, ifile, procs) :
comString = "./"+runProgram+" "+options+" "+ifile
if (procs > 0) :
comString = onPprocessors(comString,procs)
out = shellGetOutput(comString)
#print(out)
try:
times = [float(str[str.index(' ')+1:]) for str in out.split('\n') if str.startswith("PBBS-time: ")]
return times
except (ValueError,IndexError):
raise NameError(comString+"\n"+out)
def runTest(runProgram, checkProgram, dataDir, test, rounds, procs, noOutput) :
random.seed()
outFile="/tmp/ofile%d_%d" %(random.randint(0, 1000000), random.randint(0, 1000000))
[weight, inputFileNames, runOptions, checkOptions] = test
if type(inputFileNames) is str :
inputFileNames = [inputFileNames]
shortInputNames = " ".join(inputFileNames)
if len(dataDir)>0:
out = shellGetOutput("cd " + dataDir + "; make " + shortInputNames)
longInputNames = " ".join(dataDir + "/" + name for name in inputFileNames)
runOptions = runOptions + " -r " + `rounds`
if (noOutput == 0) :
runOptions = runOptions + " -o " + outFile
times = runSingle(runProgram, runOptions, longInputNames, procs)
if (noOutput == 0) :
checkString = ("./" + checkProgram + " " + checkOptions + " "
+ longInputNames + " " + outFile)
checkOut = shellGetOutput(checkString)
# Allow checker output comments. Comments are lines prefixed by '::'
nonCommentLines = [s for s in checkOut.split('\n') if not s.startswith(':') and len(s)>0]
if (len(nonCommentLines) > 0) :
print("CheckOut:", checkOut)
raise NameError(checkString+"\n"+checkOut)
os.remove(outFile)
ptimes = str([stripFloat(time)
for time in times])[1:-1]
outputStr = ""
if (len(runOptions) > 0) :
outputStr = " : " + runOptions
print(`weight` + " : " + shortInputNames + outputStr + " : "
+ ptimes)
return [weight,times]
def averageTime(times) :
return sum(times)/len(times)
def timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput,
addToDatabase, problem) :
totalTime = 0
totalWeight = 0
try:
results = [runTest(runProgram, checkProgram, dataDir, test, rounds, procs,
noOutput)
for test in tests]
totalTimeMean = 0
totalTimeMin = 0
totalTimeMedian = 0
totalWeight = 0
j = 0
for (weight,times) in results:
l = len(times)
if (l == 0):
print("Warning, no timed results for", tests[j])
continue
times = sorted(times)
totalTimeMean = totalTimeMean + weight*sum(times)/l
totalTimeMin = totalTimeMin + weight*times[0]
totalTimeMedian = totalTimeMedian + weight*times[(l-1)/2]
totalWeight = totalWeight + weight
j += 1
print(name + " : " + `procs` +" : " +
"weighted time, min=" + stripFloat(totalTimeMin/totalWeight) +
" median=" + stripFloat(totalTimeMedian/totalWeight) +
" mean=" + stripFloat(totalTimeMean/totalWeight))
if (addToDatabase) :
try:
dbAddResult(problem=problem, program=runProgram, results=results, numProcs=procs, mean=totalTimeMean/totalWeight,
min=totalTimeMin/totalWeight, median=totalTimeMedian/totalWeight, tests=tests)
except:
print("Could not insert result in database. Error:", sys.exc_info()[0])
# if (os.getlogin() == 'akyrola'): raise
return 0
except NameError,v:
x, = v
print "TEST TERMINATED ABNORMALLY:\n["+x + "]"
return 1
except KeyboardInterrupt:
return 1
def getOption(str) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str) :
return True
return False
def getArg(str, default) :
a = sys.argv
l = len(a)
for i in range(1, l) :
if (a[i] == str and (i+1 != l)) :
return sys.argv[i+1]
return default
def getArgs() :
noOutput = getOption("-x")
addToDatabase = getOption("-d")
processors = int(getArg("-p", 0))
rounds = int(getArg("-r", 1))
return (noOutput, rounds, addToDatabase, processors)
def timeAllArgs(runProgram, problem, checkProgram, dataDir, tests) :
(noOutput, rounds, addToDatabase, procs) = getArgs()
name = os.path.basename(os.getcwd())
timeAll(name, runProgram, checkProgram, dataDir, tests, rounds, procs, noOutput, addToDatabase, problem)
#
# Database insertions
# - akyrola@cs.cmu.edu
import os
def dbInitConnection():
import MySQLdb
global cursor
# TODO: move to a config file
dbconn = MySQLdb.connect (host = "multi6.aladdin.cs.cmu.edu",
user = "pbbs",
passwd = "pbbspasshuuhaa",
db = "pbbsweb")
cursor = dbconn.cursor ()
dbconn.autocommit(1)
def dbAddResult(problem, program, results, numProcs, mean, min, median, tests):
dbInitConnection()
contentHash = computeContentHash(tests)
program = shellGetOutput("pwd").split('/')[-1].replace('\r','').replace('\n', '') + '/' + program
problemId = dbGetProblemId(problem, contentHash)
programId = dbGetProgramId(program, problemId)
hostId = getHostId()
#username = os.getlogin()
# getlogin does not work with some terminals (see various posts on web)
# guyb replaced with the following
username = os.getenv('USER')
if (numProcs == 0): numProcs = detectCPUs()
# Insert run into db
cursor.execute(""" insert into pbbs_runs (problem_id,program_id,numprocs,mean_time,min_time,median_time,username,host_id) values(
%s, %s, %s, %s, %s, %s, %s, %s)
""", (problemId, programId, numProcs, mean, min, median, username, hostId))
cursor.execute(" select last_insert_id()")
runId = cursor.fetchone()[0]
for i in range(0, len(results)):
(weight, times) = results[i]
test = tests[i]
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
for time in times:
cursor.execute(""" insert into pbbs_subruns(run_id, inputfile, time, weight, params, check_params) values(
%s, %s , %s , %s, %s, %s) """,
(runId, inputFileNames, time, weight, runOptions, checkOptions))
def computeContentHash(tests):
hash = ""
for test in tests:
[weight,inputFileNames,runOptions,checkOptions] = test
if type(inputFileNames) is list :
inputFileNames = "+".join(inputFileNames)
hash += ";%f%s%s%s" %(weight,inputFileNames.strip(), runOptions.strip(),checkOptions.strip())
hash = hash.replace(' ', '_')
return hash
def dbGetProblemId(probname, contentHash):
cursor.execute("select id from pbbs_problems where name=%s and content_hash=%s", (probname, contentHash))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_problems (name,content_hash) values(%s,%s) ", (probname, contentHash))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def dbGetProgramId(progname, problemId):
cursor.execute("select id from pbbs_programs where name=%s and problem_id=%s", (progname, problemId))
row = cursor.fetchone()
if row == None:
# Insert into db
cursor.execute( "insert into pbbs_programs (problem_id, name) values(%s, %s) ", (problemId, progname))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
import platform
def getHostId():
(procmodel, mhz) = detectCPUModel()
numprocs = detectCPUs()
(sysname, nodename, release, version, machine) = os.uname()
if (os.environ.has_key("OPENMP")):
nodename = nodename + "[OPENMP]"
cursor.execute("select id from pbbs_hosts where hostname=%s and procmodel=%s and version=%s and numprocs=%s", (nodename, procmodel, version, numprocs))
row = cursor.fetchone()
if row == None:
cursor.execute(""" insert into pbbs_hosts(hostname,sysname,releasen,version,machine,numprocs,procmodel,mhz) values
(%s, %s, %s, %s, %s, %s, %s, %s) """,
(nodename, sysname, release, version, machine, numprocs, procmodel, mhz))
cursor.execute(" select last_insert_id()")
row = cursor.fetchone()
return row[0]
def detectCPUModel():
mhz = 0
model = platform.processor()
try:
if (platform.system() == "Darwin"):
model = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Name'")
mhz = shellGetOutput("system_profiler SPHardwareDataType |grep 'Processor Speed'")
else:
model = shellGetOutput('grep "model name" /proc/cpuinfo').split('\n')[0]
mhz = shellGetOutput('grep "cpu MHz" /proc/cpuinfo').split('\n')[0]
model = model.split(':')[-1].strip()
mhz = mhz.split(':')[-1].strip()
except:
# Could not get processor model
print("Could not determine CPU model", sys.exc_info()[0])
return (model, mhz)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
|
spaceone/pyjs | refs/heads/master | pyjswidgets/pyjamas/ui/MenuBarPopupPanel.py | 9 | # Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from pyjamas.ui.PopupPanel import PopupPanel
class MenuBarPopupPanel(PopupPanel):
def __init__(self, item, **kwargs):
self.item = item
kwargs['Widget'] = item.getSubMenu()
PopupPanel.__init__(self, True, False, **kwargs)
item.getSubMenu().onShow()
def onEventPreview(self, event):
type = DOM.eventGetType(event)
if type == "click":
target = DOM.eventGetTarget(event)
parentMenuElement = self.item.getParentMenu().getElement()
if DOM.isOrHasChild(parentMenuElement, target):
return False
return PopupPanel.onEventPreview(self, event)
Factory.registerClass('pyjamas.ui.MenuBarPopupPanel', 'MenuBarPopupPanel', MenuBarPopupPanel)
|
jzoldak/edx-platform | refs/heads/master | openedx/core/djangoapps/ccxcon/migrations/0001_initial_ccxcon_model.py | 65 | """
Initial migration
"""
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
"""
Initial migration for CCXCon model
"""
dependencies = [
]
operations = [
migrations.CreateModel(
name='CCXCon',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(unique=True, db_index=True)),
('oauth_client_id', models.CharField(max_length=255)),
('oauth_client_secret', models.CharField(max_length=255)),
('title', models.CharField(max_length=255)),
],
),
]
|
pedro2d10/SickRage-FR | refs/heads/develop | sickbeard/providers/transmitthenet.py | 1 | # coding=utf-8
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import traceback
from requests.utils import dict_from_cookiejar
from urllib import urlencode
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size, try_int
from sickrage.helper.exceptions import AuthException
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class TransmitTheNetProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "TransmitTheNet")
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.ratio = None
self.minseed = None
self.minleech = None
self.freeleech = None
# URLs
self.url = 'https://transmithe.net/'
self.urls = {
'login': 'https://transmithe.net/login.php',
'search': 'https://transmithe.net/torrents.php',
'base_url': self.url,
}
# Proper Strings
# Cache
self.cache = tvcache.TVCache(self)
def _check_auth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
'keeplogged': 'on',
'login': 'Login'
}
response = self.get_url(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('Username Incorrect', response) or re.search('Password Incorrect', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-locals, too-many-statements
results = []
if not self.login():
return results
for mode in search_strings:
items = []
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: {search}".format(search=search_string.decode('utf-8')),
logger.DEBUG)
search_params = {
'searchtext': search_string,
'filter_freeleech': (0, 1)[self.freeleech is True],
'order_by': ('seeders', 'time')[mode == 'RSS'],
"order_way": "desc"
}
if not search_string:
del search_params['searchtext']
search_url = self.urls['search'] + "?" + urlencode(search_params)
logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
data = self.get_url(self.urls['search'], params=search_params)
if not data:
logger.log(u"No data returned from provider", logger.DEBUG)
continue
try:
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', {'id': 'torrent_table'})
if not torrent_table:
logger.log(u"Data returned from %s does not contain any torrents" % self.name, logger.DEBUG)
continue
torrent_rows = torrent_table.findAll('tr', {'class': 'torrent'})
# Continue only if one Release is found
if not torrent_rows:
logger.log(u"Data returned from %s does not contain any torrents" % self.name, logger.DEBUG)
continue
for torrent_row in torrent_rows:
freeleech = torrent_row.find('img', alt="Freeleech") is not None
if self.freeleech and not freeleech:
continue
download_item = torrent_row.find('a', {'title': 'Download Torrent'})
if not download_item:
continue
download_url = self.urls['base_url'] + download_item['href']
temp_anchor = torrent_row.find('a', {"data-src": True})
title = temp_anchor['data-src'].rsplit('.', 1)[0]
if not title:
title = torrent_row.find('a', onmouseout='return nd();').string
title = title.replace("[", "").replace("]", "").replace("/ ", "") if title else ''
temp_anchor = torrent_row.find('span', class_='time').parent.find_next_sibling()
if not all([title, download_url]):
continue
seeders = try_int(temp_anchor.text.strip())
leechers = try_int(temp_anchor.find_next_sibling().text.strip())
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the"
u" minimum seeders or leechers: {} (S:{} L:{})".format
(title, seeders, leechers), logger.DEBUG)
continue
cells = torrent_row.find_all('td')
torrent_size = cells[5].text.strip()
size = convert_size(torrent_size) or -1
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: {} with {} seeders and {} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except Exception:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
# For each search mode sort all the items by seeders
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
return results
def seed_ratio(self):
return self.ratio
provider = TransmitTheNetProvider()
|
zimmermant/dlvo_lammps | refs/heads/master | examples/dlvo/plot_dlvo.py | 1 | # Plotting DLVO force from pair_dlvo.cpp file with real constants
import numpy as np
import matplotlib.pyplot as plt
x_max=5e-6
points=10000
espr = 80
epso =8.85E-12
radius = 3e-6
psi = 7e-2
psisqrd=psi**2
debyeinv = 104022291
hamaker =1.3e-20
X=np.linspace(0,x_max,points)
h=X-radius
f_dlvo = espr*epso*radius*psisqrd*debyeinv/(np.exp(-debyeinv*h)+1)*0.5 + radius*hamaker/(12*h**2)
#print f_dlvo
evdwl = espr*epso*radius*psisqrd*0.5*np.log(1+np.exp(-debyeinv*h))-radius*hamaker/(12*h)
plt.semilogy(X,f_dlvo)
plt.show()
plt.plot(X,evdwl)
plt.show()
|
petemounce/ansible | refs/heads/devel | lib/ansible/parsing/yaml/loader.py | 74 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from _yaml import CParser, CEmitter
HAVE_PYYAML_C = True
except ImportError:
HAVE_PYYAML_C = False
from yaml.resolver import Resolver
from ansible.parsing.yaml.constructor import AnsibleConstructor
if HAVE_PYYAML_C:
class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_password=None):
CParser.__init__(self, stream)
AnsibleConstructor.__init__(self, file_name=file_name, b_vault_password=vault_password)
Resolver.__init__(self)
else:
from yaml.composer import Composer
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.parser import Parser
class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None, vault_password=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
AnsibleConstructor.__init__(self, file_name=file_name, b_vault_password=vault_password)
Resolver.__init__(self)
|
mgedmin/ansible-modules-core | refs/heads/devel | files/replace.py | 103 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Evan Kaufman <evan@digitalflophouse.com
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import tempfile
DOCUMENTATION = """
---
module: replace
author: "Evan Kaufman (@EvanK)"
extends_documentation_fragment:
- files
- validate
short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression.
description:
- This module will replace all instances of a pattern within a file.
- It is up to the user to maintain idempotence by ensuring that the
same pattern would never match any replacements made.
version_added: "1.6"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The file to modify.
regexp:
required: true
description:
- The regular expression to look for in the contents of the file.
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
Uses multiline mode, which means C(^) and C($) match the beginning
and end respectively of I(each line) of the file.
replace:
required: false
description:
- The string to replace regexp matches. May contain backreferences
that will get expanded with the regexp capture groups if the regexp
matches. If not set, matches are removed entirely.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
"""
EXAMPLES = r"""
- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes
- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644
- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t'
"""
def write_changes(module,contents,dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd,'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc,err))
if valid:
module.atomic_move(tmpfile, dest)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True, aliases=['name', 'destfile']),
regexp=dict(required=True),
replace=dict(default='', type='str'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
dest = os.path.expanduser(params['dest'])
if os.path.isdir(dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
if not os.path.exists(dest):
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
else:
f = open(dest, 'rb')
contents = f.read()
f.close()
mre = re.compile(params['regexp'], re.MULTILINE)
result = re.subn(mre, params['replace'], contents, 0)
if result[1] > 0 and contents != result[0]:
msg = '%s replacements made' % result[1]
changed = True
else:
msg = ''
changed = False
if changed and not module.check_mode:
if params['backup'] and os.path.exists(dest):
module.backup_local(dest)
if params['follow'] and os.path.islink(dest):
dest = os.path.realpath(dest)
write_changes(module, result[0], dest)
msg, changed = check_file_attrs(module, changed, msg)
module.exit_json(changed=changed, msg=msg)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
mm1ke/portage | refs/heads/master | pym/portage/dep/dep_check.py | 1 | # Copyright 2010-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
__all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps']
import collections
import logging
import operator
import portage
from portage.dep import Atom, match_from_list, use_reduce
from portage.exception import InvalidDependString, ParseError
from portage.localization import _
from portage.util import writemsg, writemsg_level
from portage.util.SlotObject import SlotObject
from portage.versions import vercmp, _pkg_str
def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
trees=None, use_mask=None, use_force=None, **kwargs):
"""
In order to solve bug #141118, recursively expand new-style virtuals so
as to collapse one or more levels of indirection, generating an expanded
search space. In dep_zapdeps, new-style virtuals will be assigned
zero cost regardless of whether or not they are currently installed. Virtual
blockers are supported but only when the virtual expands to a single
atom because it wouldn't necessarily make sense to block all the components
of a compound virtual. When more than one new-style virtual is matched,
the matches are sorted from highest to lowest versions and the atom is
expanded to || ( highest match ... lowest match )."""
newsplit = []
mytrees = trees[myroot]
portdb = mytrees["porttree"].dbapi
pkg_use_enabled = mytrees.get("pkg_use_enabled")
# Atoms are stored in the graph as (atom, id(atom)) tuples
# since each atom is considered to be a unique entity. For
# example, atoms that appear identical may behave differently
# in USE matching, depending on their unevaluated form. Also,
# specially generated virtual atoms may appear identical while
# having different _orig_atom attributes.
atom_graph = mytrees.get("atom_graph")
parent = mytrees.get("parent")
virt_parent = mytrees.get("virt_parent")
graph_parent = None
if parent is not None:
if virt_parent is not None:
graph_parent = virt_parent
parent = virt_parent
else:
graph_parent = parent
repoman = not mysettings.local_config
if kwargs["use_binaries"]:
portdb = trees[myroot]["bintree"].dbapi
pprovideddict = mysettings.pprovideddict
myuse = kwargs["myuse"]
for x in mysplit:
if x == "||":
newsplit.append(x)
continue
elif isinstance(x, list):
newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
use_force=use_force, **kwargs))
continue
if not isinstance(x, Atom):
raise ParseError(
_("invalid token: '%s'") % x)
if repoman:
x = x._eval_qa_conditionals(use_mask, use_force)
mykey = x.cp
if not mykey.startswith("virtual/"):
newsplit.append(x)
if atom_graph is not None:
atom_graph.add((x, id(x)), graph_parent)
continue
if x.blocker:
# Virtual blockers are no longer expanded here since
# the un-expanded virtual atom is more useful for
# maintaining a cache of blocker atoms.
newsplit.append(x)
if atom_graph is not None:
atom_graph.add((x, id(x)), graph_parent)
continue
if repoman or not hasattr(portdb, 'match_pkgs') or \
pkg_use_enabled is None:
if portdb.cp_list(x.cp):
newsplit.append(x)
else:
a = []
myvartree = mytrees.get("vartree")
if myvartree is not None:
mysettings._populate_treeVirtuals_if_needed(myvartree)
mychoices = mysettings.getvirtuals().get(mykey, [])
for y in mychoices:
a.append(Atom(x.replace(x.cp, y.cp, 1)))
if not a:
newsplit.append(x)
elif len(a) == 1:
newsplit.append(a[0])
else:
newsplit.append(['||'] + a)
continue
pkgs = []
# Ignore USE deps here, since otherwise we might not
# get any matches. Choices with correct USE settings
# will be preferred in dep_zapdeps().
matches = portdb.match_pkgs(x.without_use)
# Use descending order to prefer higher versions.
matches.reverse()
for pkg in matches:
# only use new-style matches
if pkg.cp.startswith("virtual/"):
pkgs.append(pkg)
mychoices = []
if not pkgs and not portdb.cp_list(x.cp):
myvartree = mytrees.get("vartree")
if myvartree is not None:
mysettings._populate_treeVirtuals_if_needed(myvartree)
mychoices = mysettings.getvirtuals().get(mykey, [])
if not (pkgs or mychoices):
# This one couldn't be expanded as a new-style virtual. Old-style
# virtuals have already been expanded by dep_virtual, so this one
# is unavailable and dep_zapdeps will identify it as such. The
# atom is not eliminated here since it may still represent a
# dependency that needs to be satisfied.
newsplit.append(x)
if atom_graph is not None:
atom_graph.add((x, id(x)), graph_parent)
continue
a = []
for pkg in pkgs:
virt_atom = '=' + pkg.cpv
if x.unevaluated_atom.use:
virt_atom += str(x.unevaluated_atom.use)
virt_atom = Atom(virt_atom)
if parent is None:
if myuse is None:
virt_atom = virt_atom.evaluate_conditionals(
mysettings.get("PORTAGE_USE", "").split())
else:
virt_atom = virt_atom.evaluate_conditionals(myuse)
else:
virt_atom = virt_atom.evaluate_conditionals(
pkg_use_enabled(parent))
else:
virt_atom = Atom(virt_atom)
# Allow the depgraph to map this atom back to the
# original, in order to avoid distortion in places
# like display or conflict resolution code.
virt_atom.__dict__['_orig_atom'] = x
# According to GLEP 37, RDEPEND is the only dependency
# type that is valid for new-style virtuals. Repoman
# should enforce this.
depstring = pkg._metadata['RDEPEND']
pkg_kwargs = kwargs.copy()
pkg_kwargs["myuse"] = pkg_use_enabled(pkg)
if edebug:
writemsg_level(_("Virtual Parent: %s\n") \
% (pkg,), noiselevel=-1, level=logging.DEBUG)
writemsg_level(_("Virtual Depstring: %s\n") \
% (depstring,), noiselevel=-1, level=logging.DEBUG)
# Set EAPI used for validation in dep_check() recursion.
mytrees["virt_parent"] = pkg
try:
mycheck = dep_check(depstring, mydbapi, mysettings,
myroot=myroot, trees=trees, **pkg_kwargs)
finally:
# Restore previous EAPI after recursion.
if virt_parent is not None:
mytrees["virt_parent"] = virt_parent
else:
del mytrees["virt_parent"]
if not mycheck[0]:
raise ParseError("%s: %s '%s'" % \
(pkg, mycheck[1], depstring))
# Replace the original atom "x" with "virt_atom" which refers
# to the specific version of the virtual whose deps we're
# expanding. The virt_atom._orig_atom attribute is used
# by depgraph to map virt_atom back to the original atom.
# We specifically exclude the original atom "x" from the
# the expanded output here, since otherwise it could trigger
# incorrect dep_zapdeps behavior (see bug #597752).
mycheck[1].append(virt_atom)
a.append(mycheck[1])
if atom_graph is not None:
virt_atom_node = (virt_atom, id(virt_atom))
atom_graph.add(virt_atom_node, graph_parent)
atom_graph.add(pkg, virt_atom_node)
atom_graph.add((x, id(x)), graph_parent)
if not a and mychoices:
# Check for a virtual package.provided match.
for y in mychoices:
new_atom = Atom(x.replace(x.cp, y.cp, 1))
if match_from_list(new_atom,
pprovideddict.get(new_atom.cp, [])):
a.append(new_atom)
if atom_graph is not None:
atom_graph.add((new_atom, id(new_atom)), graph_parent)
if not a:
newsplit.append(x)
if atom_graph is not None:
atom_graph.add((x, id(x)), graph_parent)
elif len(a) == 1:
newsplit.append(a[0])
else:
newsplit.append(['||'] + a)
return newsplit
def dep_eval(deplist):
if not deplist:
return 1
if deplist[0]=="||":
#or list; we just need one "1"
for x in deplist[1:]:
if isinstance(x, list):
if dep_eval(x)==1:
return 1
elif x==1:
return 1
#XXX: unless there's no available atoms in the list
#in which case we need to assume that everything is
#okay as some ebuilds are relying on an old bug.
if len(deplist) == 1:
return 1
return 0
else:
for x in deplist:
if isinstance(x, list):
if dep_eval(x)==0:
return 0
elif x==0 or x==2:
return 0
return 1
class _dep_choice(SlotObject):
__slots__ = ('atoms', 'slot_map', 'cp_map', 'all_available',
'all_installed_slots')
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
"""
Takes an unreduced and reduced deplist and removes satisfied dependencies.
Returned deplist contains steps that must be taken to satisfy dependencies.
"""
if trees is None:
trees = portage.db
writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
if not reduced or unreduced == ["||"] or dep_eval(reduced):
return []
if unreduced[0] != "||":
unresolved = []
for x, satisfied in zip(unreduced, reduced):
if isinstance(x, list):
unresolved += dep_zapdeps(x, satisfied, myroot,
use_binaries=use_binaries, trees=trees)
elif not satisfied:
unresolved.append(x)
return unresolved
# We're at a ( || atom ... ) type level and need to make a choice
deps = unreduced[1:]
satisfieds = reduced[1:]
# Our preference order is for an the first item that:
# a) contains all unmasked packages with the same key as installed packages
# b) contains all unmasked packages
# c) contains masked installed packages
# d) is the first item
preferred_installed = []
preferred_in_graph = []
preferred_any_slot = []
preferred_non_installed = []
unsat_use_in_graph = []
unsat_use_installed = []
unsat_use_non_installed = []
other_installed = []
other_installed_some = []
other_installed_any_slot = []
other = []
# unsat_use_* must come after preferred_non_installed
# for correct ordering in cases like || ( foo[a] foo[b] ).
choice_bins = (
preferred_in_graph,
preferred_installed,
preferred_any_slot,
preferred_non_installed,
unsat_use_in_graph,
unsat_use_installed,
unsat_use_non_installed,
other_installed,
other_installed_some,
other_installed_any_slot,
other,
)
# Alias the trees we'll be checking availability against
parent = trees[myroot].get("parent")
priority = trees[myroot].get("priority")
graph_db = trees[myroot].get("graph_db")
graph = trees[myroot].get("graph")
pkg_use_enabled = trees[myroot].get("pkg_use_enabled")
want_update_pkg = trees[myroot].get("want_update_pkg")
downgrade_probe = trees[myroot].get("downgrade_probe")
vardb = None
if "vartree" in trees[myroot]:
vardb = trees[myroot]["vartree"].dbapi
if use_binaries:
mydbapi = trees[myroot]["bintree"].dbapi
else:
mydbapi = trees[myroot]["porttree"].dbapi
try:
mydbapi_match_pkgs = mydbapi.match_pkgs
except AttributeError:
def mydbapi_match_pkgs(atom):
return [mydbapi._pkg_str(cpv, atom.repo)
for cpv in mydbapi.match(atom)]
# Sort the deps into installed, not installed but already
# in the graph and other, not installed and not in the graph
# and other, with values of [[required_atom], availablility]
for x, satisfied in zip(deps, satisfieds):
if isinstance(x, list):
atoms = dep_zapdeps(x, satisfied, myroot,
use_binaries=use_binaries, trees=trees)
else:
atoms = [x]
if vardb is None:
# When called by repoman, we can simply return the first choice
# because dep_eval() handles preference selection.
return atoms
all_available = True
all_use_satisfied = True
all_use_unmasked = True
conflict_downgrade = False
slot_atoms = collections.defaultdict(list)
slot_map = {}
cp_map = {}
for atom in atoms:
if atom.blocker:
continue
# Ignore USE dependencies here since we don't want USE
# settings to adversely affect || preference evaluation.
avail_pkg = mydbapi_match_pkgs(atom.without_use)
if avail_pkg:
avail_pkg = avail_pkg[-1] # highest (ascending order)
avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
if not avail_pkg:
all_available = False
all_use_satisfied = False
break
if graph_db is not None and downgrade_probe is not None:
slot_matches = graph_db.match_pkgs(avail_slot)
if (len(slot_matches) > 1 and
avail_pkg < slot_matches[-1] and
not downgrade_probe(avail_pkg)):
# If a downgrade is not desirable, then avoid a
# choice that pulls in a lower version involved
# in a slot conflict (bug #531656).
conflict_downgrade = True
if atom.use:
avail_pkg_use = mydbapi_match_pkgs(atom)
if not avail_pkg_use:
all_use_satisfied = False
if pkg_use_enabled is not None:
# Check which USE flags cause the match to fail,
# so we can prioritize choices that do not
# require changes to use.mask or use.force
# (see bug #515584).
violated_atom = atom.violated_conditionals(
pkg_use_enabled(avail_pkg),
avail_pkg.iuse.is_valid_flag)
# Note that violated_atom.use can be None here,
# since evaluation can collapse conditional USE
# deps that cause the match to fail due to
# missing IUSE (match uses atom.unevaluated_atom
# to detect such missing IUSE).
if violated_atom.use is not None:
for flag in violated_atom.use.enabled:
if flag in avail_pkg.use.mask:
all_use_unmasked = False
break
else:
for flag in violated_atom.use.disabled:
if flag in avail_pkg.use.force and \
flag not in avail_pkg.use.mask:
all_use_unmasked = False
break
else:
# highest (ascending order)
avail_pkg_use = avail_pkg_use[-1]
if avail_pkg_use != avail_pkg:
avail_pkg = avail_pkg_use
avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
slot_map[avail_slot] = avail_pkg
slot_atoms[avail_slot].append(atom)
highest_cpv = cp_map.get(avail_pkg.cp)
all_match_current = None
all_match_previous = None
if (highest_cpv is not None and
highest_cpv.slot == avail_pkg.slot):
# If possible, make the package selection internally
# consistent by choosing a package that satisfies all
# atoms which match a package in the same slot. Later on,
# the package version chosen here is used in the
# has_upgrade/has_downgrade logic to prefer choices with
# upgrades, and a package choice that is not internally
# consistent will lead the has_upgrade/has_downgrade logic
# to produce invalid results (see bug 600346).
all_match_current = all(a.match(avail_pkg)
for a in slot_atoms[avail_slot])
all_match_previous = all(a.match(highest_cpv)
for a in slot_atoms[avail_slot])
if all_match_previous and not all_match_current:
continue
current_higher = (highest_cpv is None or
vercmp(avail_pkg.version, highest_cpv.version) > 0)
if current_higher or (all_match_current and not all_match_previous):
cp_map[avail_pkg.cp] = avail_pkg
this_choice = _dep_choice(atoms=atoms, slot_map=slot_map,
cp_map=cp_map, all_available=all_available,
all_installed_slots=False)
if all_available:
# The "all installed" criterion is not version or slot specific.
# If any version of a package is already in the graph then we
# assume that it is preferred over other possible packages choices.
all_installed = True
for atom in set(Atom(atom.cp) for atom in atoms \
if not atom.blocker):
# New-style virtuals have zero cost to install.
if not vardb.match(atom) and not atom.startswith("virtual/"):
all_installed = False
break
all_installed_slots = False
if all_installed:
all_installed_slots = True
for slot_atom in slot_map:
# New-style virtuals have zero cost to install.
if not vardb.match(slot_atom) and \
not slot_atom.startswith("virtual/"):
all_installed_slots = False
break
this_choice.all_installed_slots = all_installed_slots
if graph_db is None:
if all_use_satisfied:
if all_installed:
if all_installed_slots:
preferred_installed.append(this_choice)
else:
preferred_any_slot.append(this_choice)
else:
preferred_non_installed.append(this_choice)
else:
if not all_use_unmasked:
other.append(this_choice)
elif all_installed_slots:
unsat_use_installed.append(this_choice)
else:
unsat_use_non_installed.append(this_choice)
elif conflict_downgrade:
other.append(this_choice)
else:
all_in_graph = True
for atom in atoms:
# New-style virtuals have zero cost to install.
if atom.blocker or atom.cp.startswith("virtual/"):
continue
# We check if the matched package has actually been
# added to the digraph, in order to distinguish between
# those packages and installed packages that may need
# to be uninstalled in order to resolve blockers.
if not any(pkg in graph for pkg in
graph_db.match_pkgs(atom)):
all_in_graph = False
break
circular_atom = None
if not (parent is None or priority is None) and \
(parent.onlydeps or
(all_in_graph and priority.buildtime and
not (priority.satisfied or priority.optional))):
# Check if the atom would result in a direct circular
# dependency and try to avoid that if it seems likely
# to be unresolvable. This is only relevant for
# buildtime deps that aren't already satisfied by an
# installed package.
cpv_slot_list = [parent]
for atom in atoms:
if atom.blocker:
continue
if vardb.match(atom):
# If the atom is satisfied by an installed
# version then it's not a circular dep.
continue
if atom.cp != parent.cp:
continue
if match_from_list(atom, cpv_slot_list):
circular_atom = atom
break
if circular_atom is not None:
other.append(this_choice)
else:
if all_use_satisfied:
if all_in_graph:
preferred_in_graph.append(this_choice)
elif all_installed:
if all_installed_slots:
preferred_installed.append(this_choice)
elif parent is None or want_update_pkg is None:
preferred_any_slot.append(this_choice)
else:
# When appropriate, prefer a slot that is not
# installed yet for bug #478188.
want_update = True
for slot_atom, avail_pkg in slot_map.items():
if avail_pkg in graph:
continue
# New-style virtuals have zero cost to install.
if slot_atom.startswith("virtual/") or \
vardb.match(slot_atom):
continue
if not want_update_pkg(parent, avail_pkg):
want_update = False
break
if want_update:
preferred_installed.append(this_choice)
else:
preferred_any_slot.append(this_choice)
else:
preferred_non_installed.append(this_choice)
else:
if not all_use_unmasked:
other.append(this_choice)
elif all_in_graph:
unsat_use_in_graph.append(this_choice)
elif all_installed_slots:
unsat_use_installed.append(this_choice)
else:
unsat_use_non_installed.append(this_choice)
else:
all_installed = True
some_installed = False
for atom in atoms:
if not atom.blocker:
if vardb.match(atom):
some_installed = True
else:
all_installed = False
if all_installed:
this_choice.all_installed_slots = True
other_installed.append(this_choice)
elif some_installed:
other_installed_some.append(this_choice)
# Use Atom(atom.cp) for a somewhat "fuzzy" match, since
# the whole atom may be too specific. For example, see
# bug #522652, where using the whole atom leads to an
# unsatisfiable choice.
elif any(vardb.match(Atom(atom.cp)) for atom in atoms
if not atom.blocker):
other_installed_any_slot.append(this_choice)
else:
other.append(this_choice)
# Prefer choices which contain upgrades to higher slots. This helps
# for deps such as || ( foo:1 foo:2 ), where we want to prefer the
# atom which matches the higher version rather than the atom furthest
# to the left. Sorting is done separately for each of choice_bins, so
# as not to interfere with the ordering of the bins. Because of the
# bin separation, the main function of this code is to allow
# --depclean to remove old slots (rather than to pull in new slots).
for choices in choice_bins:
if len(choices) < 2:
continue
# Prefer choices with all_installed_slots for bug #480736.
choices.sort(key=operator.attrgetter('all_installed_slots'),
reverse=True)
for choice_1 in choices[1:]:
cps = set(choice_1.cp_map)
for choice_2 in choices:
if choice_1 is choice_2:
# choice_1 will not be promoted, so move on
break
intersecting_cps = cps.intersection(choice_2.cp_map)
if not intersecting_cps:
continue
has_upgrade = False
has_downgrade = False
for cp in intersecting_cps:
version_1 = choice_1.cp_map[cp]
version_2 = choice_2.cp_map[cp]
difference = vercmp(version_1.version, version_2.version)
if difference != 0:
if difference > 0:
has_upgrade = True
else:
has_downgrade = True
break
if has_upgrade and not has_downgrade:
# promote choice_1 in front of choice_2
choices.remove(choice_1)
index_2 = choices.index(choice_2)
choices.insert(index_2, choice_1)
break
for allow_masked in (False, True):
for choices in choice_bins:
for choice in choices:
if choice.all_available or allow_masked:
return choice.atoms
assert(False) # This point should not be reachable
def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
use_cache=1, use_binaries=0, myroot=None, trees=None):
"""
Takes a depend string, parses it, and selects atoms.
The myroot parameter is unused (use mysettings['EROOT'] instead).
"""
myroot = mysettings['EROOT']
edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
#check_config_instance(mysettings)
if trees is None:
trees = globals()["db"]
if use=="yes":
if myuse is None:
#default behavior
myusesplit = mysettings["PORTAGE_USE"].split()
else:
myusesplit = myuse
# We've been given useflags to use.
#print "USE FLAGS PASSED IN."
#print myuse
#if "bindist" in myusesplit:
# print "BINDIST is set!"
#else:
# print "BINDIST NOT set."
else:
#we are being run by autouse(), don't consult USE vars yet.
# WE ALSO CANNOT USE SETTINGS
myusesplit=[]
mymasks = set()
useforce = set()
if use == "all":
# This is only for repoman, in order to constrain the use_reduce
# matchall behavior to account for profile use.mask/force. The
# ARCH/archlist code here may be redundant, since the profile
# really should be handling ARCH masking/forcing itself.
arch = mysettings.get("ARCH")
mymasks.update(mysettings.usemask)
mymasks.update(mysettings.archlist())
if arch:
mymasks.discard(arch)
useforce.add(arch)
useforce.update(mysettings.useforce)
useforce.difference_update(mymasks)
# eapi code borrowed from _expand_new_virtuals()
mytrees = trees[myroot]
parent = mytrees.get("parent")
virt_parent = mytrees.get("virt_parent")
current_parent = None
eapi = None
if parent is not None:
if virt_parent is not None:
current_parent = virt_parent
else:
current_parent = parent
if current_parent is not None:
# Don't pass the eapi argument to use_reduce() for installed packages
# since previous validation will have already marked them as invalid
# when necessary and now we're more interested in evaluating
# dependencies so that things like --depclean work as well as possible
# in spite of partial invalidity.
if not current_parent.installed:
eapi = current_parent.eapi
if isinstance(depstring, list):
mysplit = depstring
else:
try:
mysplit = use_reduce(depstring, uselist=myusesplit,
masklist=mymasks, matchall=(use=="all"), excludeall=useforce,
opconvert=True, token_class=Atom, eapi=eapi)
except InvalidDependString as e:
return [0, "%s" % (e,)]
if mysplit == []:
#dependencies were reduced to nothing
return [1,[]]
# Recursively expand new-style virtuals so as to
# collapse one or more levels of indirection.
try:
mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
use=use, mode=mode, myuse=myuse,
use_force=useforce, use_mask=mymasks, use_cache=use_cache,
use_binaries=use_binaries, myroot=myroot, trees=trees)
except ParseError as e:
return [0, "%s" % (e,)]
mysplit2 = dep_wordreduce(mysplit,
mysettings, mydbapi, mode, use_cache=use_cache)
if mysplit2 is None:
return [0, _("Invalid token")]
writemsg("\n\n\n", 1)
writemsg("mysplit: %s\n" % (mysplit), 1)
writemsg("mysplit2: %s\n" % (mysplit2), 1)
selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
use_binaries=use_binaries, trees=trees)
return [1, selected_atoms]
def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
"Reduces the deplist to ones and zeros"
deplist=mydeplist[:]
for mypos, token in enumerate(deplist):
if isinstance(deplist[mypos], list):
#recurse
deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
elif deplist[mypos]=="||":
pass
elif token[:1] == "!":
deplist[mypos] = False
else:
mykey = deplist[mypos].cp
if mysettings and mykey in mysettings.pprovideddict and \
match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
deplist[mypos]=True
elif mydbapi is None:
# Assume nothing is satisfied. This forces dep_zapdeps to
# return all of deps the deps that have been selected
# (excluding those satisfied by package.provided).
deplist[mypos] = False
else:
if mode:
x = mydbapi.xmatch(mode, deplist[mypos])
if mode.startswith("minimum-"):
mydep = []
if x:
mydep.append(x)
else:
mydep = x
else:
mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
if mydep!=None:
tmp=(len(mydep)>=1)
if deplist[mypos][0]=="!":
tmp=False
deplist[mypos]=tmp
else:
#encountered invalid string
return None
return deplist
|
kosugi/plurk4ji | refs/heads/master | config.py | 1 | # -*- coding: utf-8 -*-
import os
import yaml
import logging.config
def load(filename):
with open(os.path.expanduser(filename), 'r') as f:
config = yaml.load(f.read()) # SMTPHandler requires tuple
logging.config.dictConfig(config.get('logging', dict(version=1)))
return config
|
mi1980/projecthadoop3 | refs/heads/master | udacity/cs101-intro-cs/code/lesson3/problem-set-3/matrix.py | 4 | # By Ashwath from forums
# Given a list of lists representing a n * n matrix as input,
# define a procedure that returns True if the input is an identity matrix
# and False otherwise.
# An IDENTITY matrix is a square matrix in which all the elements
# on the principal/main diagonal are 1 and all the elements outside
# the principal diagonal are 0.
# (A square matrix is a matrix in which the number of rows
# is equal to the number of columns)
def is_identity_matrix(matrix):
for i, e in enumerate(matrix):
if len(e) != len(matrix) or e[i] != 1:
return False
e.pop(i)
if not all(x == 0 for x in e):
return False
return True
# Test Cases:
matrix1 = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
print is_identity_matrix(matrix1)
#>>>True
matrix2 = [[1,0,0],
[0,1,0],
[0,0,0]]
print is_identity_matrix(matrix2)
#>>>False
matrix3 = [[2,0,0],
[0,2,0],
[0,0,2]]
print is_identity_matrix(matrix3)
#>>>False
matrix4 = [[1,0,0,0],
[0,1,1,0],
[0,0,0,1]]
print is_identity_matrix(matrix4)
#>>>False
matrix5 = [[1,0,0,0,0,0,0,0,0]]
print is_identity_matrix(matrix5)
#>>>False
matrix6 = [[1,0,0,0],
[0,1,0,2],
[0,0,1,0],
[0,0,0,1]]
print is_identity_matrix(matrix6)
#>>>False
matrix7 = [[1, -1, 1],
[0, 1, 0],
[0, 0, 1]]
print is_identity_matrix(matrix7)
#>>>False |
olivierverdier/sfepy | refs/heads/master | tests/test_input_poisson.py | 1 | input_name = '../examples/diffusion/poisson.py'
output_name = 'test_poisson.vtk'
from testsBasic import TestInput
class Test( TestInput ):
pass
|
JGrippo/YACS | refs/heads/master | events/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
hyperwd/hwcram | refs/heads/master | rds/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
drayanaindra/inasafe | refs/heads/develop | realtime/test/test_shake_data.py | 2 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **Tests Shake Data functionality related to shakemaps.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'tim@linfiniti.com'
__version__ = '0.5.0'
__date__ = '30/07/2012'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import shutil
import unittest
from realtime.shake_data import ShakeData
from realtime.utils import (
shakemap_zip_dir,
purge_working_data,
shakemap_extract_dir)
# Clear away working dirs so we can be sure they are
# actually created
purge_working_data()
class TestShakeMap(unittest.TestCase):
"""Testing for the shakemap class"""
#noinspection PyPep8Naming
def setUp(self):
"""Copy our cached dataset from the fixture dir to the cache dir."""
output_file = '20120726022003.out.zip'
input_file = '20120726022003.inp.zip'
output_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'../fixtures',
output_file))
input_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'../fixtures',
input_file))
shutil.copyfile(
output_path,
os.path.join(shakemap_zip_dir(),
output_file))
shutil.copyfile(
input_path,
os.path.join(shakemap_zip_dir(),
input_file))
#TODO Downloaded data should be removed before each test
def test_get_shake_map_input(self):
"""Check that we can retrieve a shakemap 'inp' input file."""
shake_event = '20110413170148'
shake_data = ShakeData(shake_event)
shakemap_file = shake_data.fetch_input()
expected_file = os.path.join(shakemap_zip_dir(),
shake_event + '.inp.zip')
message = 'Expected path for downloaded shakemap INP not received'
self.assertEqual(shakemap_file, expected_file, message)
def test_get_shake_map_output(self):
"""Check that we can retrieve a shakemap 'out' input file."""
event_id = '20110413170148'
shake_data = ShakeData(event_id)
shakemap_file = shake_data.fetch_output()
expected_file = os.path.join(shakemap_zip_dir(),
event_id + '.out.zip')
message = 'Expected path for downloaded shakemap OUT not received'
self.assertEqual(shakemap_file, expected_file, message)
def test_get_remote_shake_map(self):
"""Check that we can retrieve both input and output from ftp at once.
"""
shake_event = '20110413170148'
shake_data = ShakeData(shake_event)
expected_input_file = os.path.join(
shakemap_zip_dir(),
shake_event + '.inp.zip')
expected_output_file = os.path.join(
shakemap_zip_dir(),
shake_event + '.out.zip')
if os.path.exists(expected_input_file):
os.remove(expected_input_file)
if os.path.exists(expected_output_file):
os.remove(expected_output_file)
input_file, output_file = shake_data.fetch_event()
message = ('Expected path for downloaded shakemap INP not received'
'\nExpected: %s\nGot: %s' %
(expected_output_file, output_file))
self.assertEqual(input_file, expected_input_file, message)
message = ('Expected path for downloaded shakemap OUT not received'
'\nExpected: %s\nGot: %s' %
(expected_output_file, output_file))
self.assertEqual(output_file, expected_output_file, message)
assert os.path.exists(expected_input_file)
assert os.path.exists(expected_output_file)
def test_get_cached_shake_map(self):
"""Check that we can retrieve both input and output from ftp at once.
"""
shake_event = '20120726022003'
expected_input_file = os.path.join(shakemap_zip_dir(),
shake_event + '.inp.zip')
expected_output_file = os.path.join(shakemap_zip_dir(),
shake_event + '.out.zip')
shake_data = ShakeData(shake_event)
input_file, output_file = shake_data.fetch_event()
message = ('Expected path for downloaded shakemap INP not received'
'\nExpected: %s\nGot: %s' %
(expected_output_file, output_file))
self.assertEqual(input_file, expected_input_file, message)
message = ('Expected path for downloaded shakemap OUT not received'
'\nExpected: %s\nGot: %s' %
(expected_output_file, output_file))
self.assertEqual(output_file, expected_output_file, message)
def test_get_latest_shake_map(self):
"""Check that we can retrieve the latest shake event."""
# Simply dont set the event id in the ctor to get the latest
shake_data = ShakeData()
input_file, output_file = shake_data.fetch_event()
event_id = shake_data.event_id
expected_input_file = os.path.join(shakemap_zip_dir(),
event_id + '.inp.zip')
expected_output_file = os.path.join(shakemap_zip_dir(),
event_id + '.out.zip')
message = ('Expected path for downloaded shakemap INP not received'
'\nExpected: %s\nGot: %s' %
(expected_output_file, output_file))
self.assertEqual(input_file, expected_input_file, message)
message = ('Expected path for downloaded shakemap OUT not received'
'\nExpected: %s\nGot: %s' %
(expected_output_file, output_file))
self.assertEqual(output_file, expected_output_file, message)
def test_extract_shake_map(self):
"""Test that we can extract the shakemap inp and out files."""
shake_event = '20120726022003'
shake_data = ShakeData(shake_event)
grid_xml = shake_data.extract(force_flag=True)
extract_dir = shakemap_extract_dir()
expected_grid_xml = (os.path.join(extract_dir,
'20120726022003/grid.xml'))
message = 'Expected: %s\nGot: %s\n' % (expected_grid_xml, grid_xml)
assert expected_grid_xml in expected_grid_xml, message
assert os.path.exists(grid_xml)
def test_check_event_is_on_server(self):
"""Test that we can check if an event is on the server."""
shake_event = '20120726022003'
shake_data = ShakeData(shake_event)
self.assertTrue(shake_data.is_on_server(),
('Data for %s is on server' % shake_event))
#noinspection PyMethodMayBeStatic
def test_cache_paths(self):
"""Check we compute local cache paths properly."""
shake_event = '20120726022003'
shake_data = ShakeData(shake_event)
expected_input_path = ('/tmp/inasafe/realtime/shakemaps-zipped/'
'20120726022003.inp.zip')
expected_output_path = ('/tmp/inasafe/realtime/shakemaps-zipped/'
'20120726022003.out.zip')
input_path, output_path = shake_data.cache_paths()
message = 'Expected: %s\nGot: %s' % (expected_input_path, input_path)
assert input_path == expected_input_path, message
message = 'Expected: %s\nGot: %s' % (expected_output_path, output_path)
assert output_path == expected_output_path, message
#noinspection PyMethodMayBeStatic
def test_file_names(self):
"""Check we compute file names properly."""
shake_event = '20120726022003'
shake_data = ShakeData(shake_event)
expected_input_file_name = '20120726022003.inp.zip'
expected_output_file_name = '20120726022003.out.zip'
input_file_name, output_file_name = shake_data.file_names()
message = 'Expected: %s\nGot: %s' % (
expected_input_file_name, input_file_name)
assert input_file_name == expected_input_file_name, message
message = 'Expected: %s\nGot: %s' % (
expected_output_file_name, output_file_name)
assert output_file_name == expected_output_file_name, message
if __name__ == '__main__':
unittest.main()
|
KinKir/jaikuengine | refs/heads/master | common/views.py | 32 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import logging
import urlparse
from django import http
from django import template
from django.conf import settings
from django.template import loader
from common import api
from common import exception
from common import messages
from common import util
from common import validate
def common_confirm(request):
message = request.REQUEST['message']
redirect_to = request.REQUEST['redirect_to']
try:
validate.nonce(request, message + redirect_to)
parts = urlparse.urlparse(redirect_to)
action_url = parts[2]
query_dict = cgi.parse_qs(parts[4], keep_blank_values=True)
query_dict = dict([(k, v[0]) for k, v in query_dict.iteritems()])
except:
message = None
exception.handle_exception(request)
c = template.RequestContext(request, locals())
t = loader.get_template('common/templates/confirm.html')
return http.HttpResponse(t.render(c))
def common_logme(request):
logging.info("REQUEST: %s", request)
raise http.Http404()
def common_404(request, template_name='404.html'):
# You need to create a 404.html template.
t = loader.get_template(template_name)
return http.HttpResponseNotFound(
t.render(template.RequestContext(request, {'request_path': request.path})))
def common_500(request, template_name='500.html'):
logging.error("An error occurred: %s", str(request))
# You need to create a 500.html template.
t = loader.get_template(template_name)
# return http.HttpResponseServerError(
# t.render(template.RequestContext(request, {})))
return http.HttpResponse(t.render(template.RequestContext(request, {})))
def common_error(request):
message = request.REQUEST['error']
try:
validate.error_nonce(request, message)
except:
exception.handle_exception(request)
message = "An error has occurred"
c = template.RequestContext(request, locals())
t = loader.get_template('common/templates/error_generic.html')
return http.HttpResponse(t.render(c))
def common_noslash(request, path=""):
return http.HttpResponseRedirect("/" + path)
def common_photo_upload(request, success="/", nick=None):
if not nick:
nick = request.user.nick
if request.FILES:
try:
# we're going to handle a file upload, wee
validate.nonce(request, 'change_photo')
img = request.FILES.get('imgfile')
if not img:
raise exception.ValidationError('imgfile must be set')
validate.avatar_photo_size(img)
img_url = api.avatar_upload(request.user,
nick,
img.read())
api.avatar_set_actor(request.user, nick, img_url)
return util.RedirectFlash(success, "Avatar uploaded")
except:
exception.handle_exception(request)
elif 'avatar' in request.POST:
try:
validate.nonce(request, 'change_photo')
avatar_path = request.POST.get('avatar')
if not avatar_path:
raise exception.ValidationError('avatar must be set')
rv = api.avatar_set_actor(request.user, nick, avatar_path)
if not rv:
raise exception.ValidationError('failed to set avatar')
return util.RedirectFlash(success, "Avatar changed")
except:
exception.handle_exception(request)
if 'delete' in request.REQUEST:
try:
validate.nonce(request, 'delete_photo')
validate.confirm_dangerous(request, 'Delete your photo?')
rv = api.avatar_clear_actor(request.user, nick)
return util.RedirectFlash(success, "Avatar deleted")
except:
exception.handle_exception(request)
def call_api_from_request(request, api_call):
"""Call an API function 'api_call' if it's present in the request parameters.
The first parameter to the API call is always the logged-in user.
The rest of the parameters may come in two forms:
api_call_name=first_param& ... rest of params
or
api_call_name=& ... rest of params
rest_of_params is always turned into Python keyword arguments.
If the api_call_name has a value, that is turned into Python positional
params.
RETURNS:
(False, None) if it isn't or the call throws an exception,
(True, return value from call) otherwise.
"""
# TODO(termie): make this only accept POST once we update javascript
# to turn the links into POSTs
for request_dict in (request.POST, request.GET):
if api_call in request_dict:
call = getattr(api, api_call)
try:
validate.nonce(request, api_call)
confirm_msg = messages.confirmation(api_call)
if not confirm_msg is None:
validate.confirm_dangerous(
request, messages.confirmation(api_call))
kwparams = util.query_dict_to_keywords(request_dict)
if '' in kwparams:
del kwparams['']
first_param = kwparams.pop(api_call, '')
params = list()
if len(first_param):
params = (first_param,)
validate.nonce(request, api_call)
kwparams.pop('_nonce')
kwparams.pop('confirm', None)
kwparams.pop('redirect_to', None)
return (True, call(request.user, *params, **kwparams))
except:
exception.handle_exception(request)
return (False, None)
def handle_view_action(request, actions):
"""Call an API function based on the request parameters if there is a match
to the keys in 'actions'. Redirect to the corresponding value in 'actions'
after the call.
"""
for action in actions.keys():
called, ret = call_api_from_request(request, action)
if called:
redirect = actions[action]
return util.RedirectFlash(redirect, messages.flash(action))
return None
def common_design_update(request, nick):
view = api.actor_get(api.ROOT, nick)
if request.POST:
try:
validate.nonce(request, 'update_design')
color = request.POST.get('bg_color')
repeat = request.POST.get('bg_repeat', 'no-repeat')
if not repeat:
repeat = ''
img = request.FILES.get('bg_image')
img_url = None
if img:
img_url = api.background_upload(request.user,
nick,
img.read())
api.background_set_actor(request.user,
nick,
img_url,
color,
repeat)
return util.RedirectFlash(view.url() + '/settings/design',
'design updated')
except:
exception.handle_exception(request)
if request.GET and 'restore' in request.GET:
api.background_clear_actor(request.user, nick)
return util.RedirectFlash(view.url() + '/settings/design',
'design updated')
return None
|
MattCCS/PyVault | refs/heads/master | pyvault/old_decorator.py | 1 | def requires_file_loaded(func):
@functools.wraps(func)
def _decorator(self, *args, **kwargs):
if not self.table:
raise errors.PasswordFileNotLoaded()
func(self, *args, **kwargs)
return _decorator
|
michel-slm/0install | refs/heads/master | zeroinstall/0launch-gui/main.py | 6 | # Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os, sys
import logging
import warnings
from optparse import OptionParser
from zeroinstall import _, SafeException
from zeroinstall.injector import requirements
from zeroinstall.injector.driver import Driver
from zeroinstall.injector.config import load_config
from zeroinstall.support import tasks
_recalculate = tasks.Blocker('recalculate')
def recalculate():
"""Ask the mainloop to recalculate. If we're already recalculating, wait for that to finish
and then do it again."""
global _recalculate
_recalculate.trigger()
_recalculate = tasks.Blocker('recalculate')
def run_gui(args):
parser = OptionParser(usage=_("usage: %prog [options] interface"))
parser.add_option("", "--before", help=_("choose a version before this"), metavar='VERSION')
parser.add_option("", "--cpu", help=_("target CPU type"), metavar='CPU')
parser.add_option("", "--command", help=_("command to select"), metavar='COMMAND')
parser.add_option("-d", "--download-only", help=_("fetch but don't run"), action='store_true')
parser.add_option("-g", "--force-gui", help=_("display an error if there's no GUI"), action='store_true')
parser.add_option("", "--message", help=_("message to display when interacting with user"))
parser.add_option("", "--not-before", help=_("minimum version to choose"), metavar='VERSION')
parser.add_option("", "--os", help=_("target operation system type"), metavar='OS')
parser.add_option("-r", "--refresh", help=_("check for updates of all interfaces"), action='store_true')
parser.add_option("", "--select-only", help=_("only download the feeds"), action='store_true')
parser.add_option("-s", "--source", help=_("select source code"), action='store_true')
parser.add_option("", "--systray", help=_("download in the background"), action='store_true')
parser.add_option("-v", "--verbose", help=_("more verbose output"), action='count')
parser.add_option("-V", "--version", help=_("display version information"), action='store_true')
parser.add_option("", "--version-for", help=_("set version constraints for a specific interface"),
nargs=2, metavar='URI RANGE', action='append')
parser.add_option("", "--with-store", help=_("add an implementation cache"), action='append', metavar='DIR')
parser.disable_interspersed_args()
(options, args) = parser.parse_args(args)
if options.verbose:
logger = logging.getLogger()
if options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
if options.version:
import gui
print("0launch-gui (zero-install) " + gui.version)
print("Copyright (C) 2010 Thomas Leonard")
print(_("This program comes with ABSOLUTELY NO WARRANTY,"
"\nto the extent permitted by law."
"\nYou may redistribute copies of this program"
"\nunder the terms of the GNU Lesser General Public License."
"\nFor more information about these matters, see the file named COPYING."))
sys.exit(0)
def nogui(ex):
if options.force_gui:
fn = logging.warn
else:
fn = logging.info
fn("No GUI available", exc_info = ex)
sys.exit(100)
with warnings.catch_warnings():
if not options.force_gui:
warnings.filterwarnings("ignore")
if sys.version_info[0] < 3:
try:
import pygtk; pygtk.require('2.0')
except ImportError as ex:
nogui(ex)
import gui
try:
if sys.version_info[0] > 2:
from zeroinstall.gtkui import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version = '3.0')
import gtk
except (ImportError, ValueError, RuntimeError) as ex:
nogui(ex)
if gtk.gdk.get_display() is None:
try:
raise SafeException("Failed to connect to display.")
except SafeException as ex:
nogui(ex) # logging needs this as a raised exception
handler = gui.GUIHandler()
config = load_config(handler)
if options.with_store:
from zeroinstall import zerostore
for x in options.with_store:
config.stores.stores.append(zerostore.Store(os.path.abspath(x)))
if len(args) < 1:
@tasks.async
def prefs_main():
import preferences
box = preferences.show_preferences(config)
done = tasks.Blocker('close preferences')
box.connect('destroy', lambda w: done.trigger())
yield done
tasks.wait_for_blocker(prefs_main())
sys.exit(0)
interface_uri = args[0]
if len(args) > 1:
parser.print_help()
sys.exit(1)
import mainwindow, dialog
r = requirements.Requirements(interface_uri)
r.parse_options(options)
widgets = dialog.Template('main')
driver = Driver(config = config, requirements = r)
root_iface = config.iface_cache.get_interface(interface_uri)
driver.solver.record_details = True
window = mainwindow.MainWindow(driver, widgets, download_only = bool(options.download_only), select_only = bool(options.select_only))
handler.mainwindow = window
if options.message:
window.set_message(options.message)
root = config.iface_cache.get_interface(r.interface_uri)
window.browser.set_root(root)
window.window.connect('destroy', lambda w: handler.abort_all_downloads())
if options.systray:
window.use_systray_icon()
@tasks.async
def main():
force_refresh = bool(options.refresh)
while True:
window.refresh_button.set_sensitive(False)
window.browser.set_update_icons(force_refresh)
solved = driver.solve_with_downloads(force = force_refresh, update_local = True)
if not window.systray_icon:
window.show()
yield solved
try:
window.refresh_button.set_sensitive(True)
window.browser.highlight_problems()
tasks.check(solved)
except Exception as ex:
window.report_exception(ex)
if window.systray_icon and window.systray_icon.get_visible() and \
window.systray_icon.is_embedded():
if driver.solver.ready:
window.systray_icon.set_tooltip(_('Downloading updates for %s') % root_iface.get_name())
window.run_button.set_active(True)
else:
# Should already be reporting an error, but
# blink it again just in case
window.systray_icon.set_blinking(True)
refresh_clicked = dialog.ButtonClickedBlocker(window.refresh_button)
yield refresh_clicked, _recalculate
if refresh_clicked.happened:
force_refresh = True
tasks.wait_for_blocker(main())
|
trashkalmar/omim | refs/heads/master | tools/python/transit/transit_color_palette.py | 3 | import math
def to_rgb(color_str):
if len(color_str) != 6:
return (0, 0, 0)
r = int(color_str[0:2], 16)
g = int(color_str[2:4], 16)
b = int(color_str[4:], 16)
return (r, g, b)
def blend_colors(rgb_array1, rgb_array2, k):
return (rgb_array1[0] * (1.0 - k) + rgb_array2[0] * k,
rgb_array1[1] * (1.0 - k) + rgb_array2[1] * k,
rgb_array1[2] * (1.0 - k) + rgb_array2[2] * k)
def rgb_pivot(n):
result = n / 12.92
if n > 0.04045:
result = ((n + 0.055) / 1.055) ** 2.4
return result * 100.0;
def to_xyz(rgb_array):
r = rgb_pivot(rgb_array[0] / 255.0);
g = rgb_pivot(rgb_array[1] / 255.0);
b = rgb_pivot(rgb_array[2] / 255.0);
return (r * 0.4124 + g * 0.3576 + b * 0.1805,
r * 0.2126 + g * 0.7152 + b * 0.0722,
r * 0.0193 + g * 0.1192 + b * 0.9505)
#https://en.wikipedia.org/wiki/Lab_color_space#CIELAB
def lab_pivot(n):
if n > 0.008856:
return n ** (1.0/3.0)
return (903.3 * n + 16.0) / 116.0
def to_lab(rgb_array):
xyz = to_xyz(rgb_array)
x = lab_pivot(xyz[0] / 95.047)
y = lab_pivot(xyz[1] / 100.0)
z = lab_pivot(xyz[2] / 108.883)
l = 116.0 * y - 16.0
if l < 0.0:
l = 0.0
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return (l, a, b)
def lum_distance(ref_color, src_color):
return 30 * (ref_color[0] - src_color[0]) ** 2 +\
59 * (ref_color[1] - src_color[1]) ** 2 +\
11 * (ref_color[2] - src_color[2]) ** 2
def is_bluish(rgb_array):
d1 = lum_distance((255, 0, 0), rgb_array)
d2 = lum_distance((0, 0, 255), rgb_array)
return d2 < d1
#http://en.wikipedia.org/wiki/Color_difference#CIE94
def cie94(ref_color, src_color):
lab_ref = to_lab(ref_color)
lab_src = to_lab(src_color)
deltaL = lab_ref[0] - lab_src[0]
deltaA = lab_ref[1] - lab_src[1]
deltaB = lab_ref[2] - lab_src[2]
c1 = math.sqrt(lab_ref[0] * lab_ref[0] + lab_ref[1] * lab_ref[1])
c2 = math.sqrt(lab_src[0] * lab_src[0] + lab_src[1] * lab_src[1])
deltaC = c1 - c2
deltaH = deltaA * deltaA + deltaB * deltaB - deltaC * deltaC
if deltaH < 0.0:
deltaH = 0.0
else:
deltaH = math.sqrt(deltaH)
# cold tones if a color is more bluish.
Kl = 1.0
K1 = 0.045
K2 = 0.015
sc = 1.0 + K1 * c1
sh = 1.0 + K2 * c1
deltaLKlsl = deltaL / Kl
deltaCkcsc = deltaC / sc
deltaHkhsh = deltaH / sh
i = deltaLKlsl * deltaLKlsl + deltaCkcsc * deltaCkcsc + deltaHkhsh * deltaHkhsh
if i < 0:
return 0.0
return math.sqrt(i)
class Palette:
def __init__(self, colors):
self.colors = {}
for name, color_info in colors['colors'].iteritems():
self.colors[name] = to_rgb(color_info['clear'])
def get_default_color(self):
return 'default'
def get_nearest_color(self, color_str, casing_color_str, excluded_names):
"""Returns the nearest color from the palette."""
nearest_color_name = self.get_default_color()
color = to_rgb(color_str)
if (casing_color_str is not None and len(casing_color_str) != 0):
color = blend_colors(color, to_rgb(casing_color_str), 0.5)
min_diff = None
bluish = is_bluish(color)
for name, palette_color in self.colors.iteritems():
# Uncomment if you want to exclude duplicates.
#if name in excluded_names:
# continue
if bluish:
diff = lum_distance(palette_color, color)
else:
diff = cie94(palette_color, color)
if min_diff is None or diff < min_diff:
min_diff = diff
nearest_color_name = name
# Left here for debug purposes.
#print("Result: " + color_str + "," + str(casing_color_str) +
# " - " + nearest_color_name + ": bluish = " + str(bluish))
return nearest_color_name
|
molobrakos/home-assistant | refs/heads/master | homeassistant/components/zha/core/gateway.py | 2 | """
Virtual gateway for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import asyncio
import collections
import itertools
import logging
import os
import traceback
from homeassistant.components.system_log import LogEntry, _figure_out_source
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_component import EntityComponent
from ..api import async_get_device_info
from .channels import MAINS_POWERED, ZDOChannel
from .const import (
ADD_DEVICE_RELAY_LOGGERS, ATTR_MANUFACTURER, BELLOWS, CONF_BAUDRATE,
CONF_DATABASE, CONF_RADIO_TYPE, CONF_USB_PATH, CONTROLLER, CURRENT,
DATA_ZHA, DATA_ZHA_BRIDGE_ID, DATA_ZHA_CORE_COMPONENT, DATA_ZHA_GATEWAY,
DEBUG_LEVELS, DEFAULT_BAUDRATE, DEFAULT_DATABASE_NAME, DEVICE_FULL_INIT,
DEVICE_INFO, DEVICE_JOINED, DEVICE_REMOVED, DOMAIN, IEEE, LOG_ENTRY,
LOG_OUTPUT, MODEL, NWK, ORIGINAL, RADIO, RADIO_DESCRIPTION, RAW_INIT,
SIGNAL_REMOVE, SIGNATURE, TYPE, ZHA, ZHA_GW_MSG, ZIGPY, ZIGPY_DECONZ,
ZIGPY_XBEE)
from .device import DeviceStatus, ZHADevice
from .discovery import (
async_create_device_entity, async_dispatch_discovery_info,
async_process_endpoint)
from .patches import apply_application_controller_patch
from .registries import RADIO_TYPES
from .store import async_get_registry
_LOGGER = logging.getLogger(__name__)
EntityReference = collections.namedtuple(
'EntityReference', 'reference_id zha_device cluster_channels device_info')
class ZHAGateway:
"""Gateway that handles events that happen on the ZHA Zigbee network."""
def __init__(self, hass, config):
"""Initialize the gateway."""
self._hass = hass
self._config = config
self._component = EntityComponent(_LOGGER, DOMAIN, hass)
self._devices = {}
self._device_registry = collections.defaultdict(list)
self.zha_storage = None
self.application_controller = None
self.radio_description = None
hass.data[DATA_ZHA][DATA_ZHA_CORE_COMPONENT] = self._component
hass.data[DATA_ZHA][DATA_ZHA_GATEWAY] = self
self._log_levels = {
ORIGINAL: async_capture_log_levels(),
CURRENT: async_capture_log_levels()
}
self.debug_enabled = False
self._log_relay_handler = LogRelayHandler(hass, self)
async def async_initialize(self, config_entry):
"""Initialize controller and connect radio."""
self.zha_storage = await async_get_registry(self._hass)
usb_path = config_entry.data.get(CONF_USB_PATH)
baudrate = self._config.get(CONF_BAUDRATE, DEFAULT_BAUDRATE)
radio_type = config_entry.data.get(CONF_RADIO_TYPE)
radio_details = RADIO_TYPES[radio_type][RADIO]()
radio = radio_details[RADIO]
self.radio_description = RADIO_TYPES[radio_type][RADIO_DESCRIPTION]
await radio.connect(usb_path, baudrate)
if CONF_DATABASE in self._config:
database = self._config[CONF_DATABASE]
else:
database = os.path.join(
self._hass.config.config_dir, DEFAULT_DATABASE_NAME)
self.application_controller = radio_details[CONTROLLER](
radio, database)
apply_application_controller_patch(self)
self.application_controller.add_listener(self)
await self.application_controller.startup(auto_form=True)
self._hass.data[DATA_ZHA][DATA_ZHA_BRIDGE_ID] = str(
self.application_controller.ieee)
init_tasks = []
for device in self.application_controller.devices.values():
init_tasks.append(self.async_device_initialized(device, False))
await asyncio.gather(*init_tasks)
def device_joined(self, device):
"""Handle device joined.
At this point, no information about the device is known other than its
address
"""
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: DEVICE_JOINED,
NWK: device.nwk,
IEEE: str(device.ieee)
}
)
def raw_device_initialized(self, device):
"""Handle a device initialization without quirks loaded."""
endpoint_ids = device.endpoints.keys()
ept_id = next((ept_id for ept_id in endpoint_ids if ept_id != 0), None)
manufacturer = 'Unknown'
model = 'Unknown'
if ept_id is not None:
manufacturer = device.endpoints[ept_id].manufacturer
model = device.endpoints[ept_id].model
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: RAW_INIT,
NWK: device.nwk,
IEEE: str(device.ieee),
MODEL: model,
ATTR_MANUFACTURER: manufacturer,
SIGNATURE: device.get_signature()
}
)
def device_initialized(self, device):
"""Handle device joined and basic information discovered."""
self._hass.async_create_task(
self.async_device_initialized(device, True))
def device_left(self, device):
"""Handle device leaving the network."""
pass
def device_removed(self, device):
"""Handle device being removed from the network."""
zha_device = self._devices.pop(device.ieee, None)
self._device_registry.pop(device.ieee, None)
if zha_device is not None:
device_info = async_get_device_info(self._hass, zha_device)
self._hass.async_create_task(zha_device.async_unsub_dispatcher())
async_dispatcher_send(
self._hass,
"{}_{}".format(SIGNAL_REMOVE, str(zha_device.ieee))
)
if device_info is not None:
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: DEVICE_REMOVED,
DEVICE_INFO: device_info
}
)
def get_device(self, ieee):
"""Return ZHADevice for given ieee."""
return self._devices.get(ieee)
def get_entity_reference(self, entity_id):
"""Return entity reference for given entity_id if found."""
for entity_reference in itertools.chain.from_iterable(
self.device_registry.values()):
if entity_id == entity_reference.reference_id:
return entity_reference
@property
def devices(self):
"""Return devices."""
return self._devices
@property
def device_registry(self):
"""Return entities by ieee."""
return self._device_registry
def register_entity_reference(
self, ieee, reference_id, zha_device, cluster_channels,
device_info):
"""Record the creation of a hass entity associated with ieee."""
self._device_registry[ieee].append(
EntityReference(
reference_id=reference_id,
zha_device=zha_device,
cluster_channels=cluster_channels,
device_info=device_info
)
)
@callback
def async_enable_debug_mode(self):
"""Enable debug mode for ZHA."""
self._log_levels[ORIGINAL] = async_capture_log_levels()
async_set_logger_levels(DEBUG_LEVELS)
self._log_levels[CURRENT] = async_capture_log_levels()
for logger_name in ADD_DEVICE_RELAY_LOGGERS:
logging.getLogger(logger_name).addHandler(self._log_relay_handler)
self.debug_enabled = True
@callback
def async_disable_debug_mode(self):
"""Disable debug mode for ZHA."""
async_set_logger_levels(self._log_levels[ORIGINAL])
self._log_levels[CURRENT] = async_capture_log_levels()
for logger_name in ADD_DEVICE_RELAY_LOGGERS:
logging.getLogger(logger_name).removeHandler(
self._log_relay_handler)
self.debug_enabled = False
@callback
def _async_get_or_create_device(self, zigpy_device, is_new_join):
"""Get or create a ZHA device."""
zha_device = self._devices.get(zigpy_device.ieee)
if zha_device is None:
zha_device = ZHADevice(self._hass, zigpy_device, self)
self._devices[zigpy_device.ieee] = zha_device
if not is_new_join:
entry = self.zha_storage.async_get_or_create(zha_device)
zha_device.async_update_last_seen(entry.last_seen)
zha_device.set_power_source(entry.power_source)
return zha_device
@callback
def async_device_became_available(
self, sender, is_reply, profile, cluster, src_ep, dst_ep, tsn,
command_id, args):
"""Handle tasks when a device becomes available."""
self.async_update_device(sender)
@callback
def async_update_device(self, sender):
"""Update device that has just become available."""
if sender.ieee in self.devices:
device = self.devices[sender.ieee]
# avoid a race condition during new joins
if device.status is DeviceStatus.INITIALIZED:
device.update_available(True)
async def async_update_device_storage(self):
"""Update the devices in the store."""
for device in self.devices.values():
self.zha_storage.async_update(device)
await self.zha_storage.async_save()
async def async_device_initialized(self, device, is_new_join):
"""Handle device joined and basic information discovered (async)."""
zha_device = self._async_get_or_create_device(device, is_new_join)
discovery_infos = []
for endpoint_id, endpoint in device.endpoints.items():
async_process_endpoint(
self._hass, self._config, endpoint_id, endpoint,
discovery_infos, device, zha_device, is_new_join
)
if endpoint_id != 0:
for cluster in endpoint.in_clusters.values():
cluster.bind_only = False
for cluster in endpoint.out_clusters.values():
cluster.bind_only = True
if is_new_join:
# configure the device
await zha_device.async_configure()
zha_device.update_available(True)
elif zha_device.power_source is not None\
and zha_device.power_source == MAINS_POWERED:
# the device isn't a battery powered device so we should be able
# to update it now
_LOGGER.debug(
"attempting to request fresh state for %s %s",
zha_device.name,
"with power source: {}".format(
ZDOChannel.POWER_SOURCES.get(zha_device.power_source)
)
)
await zha_device.async_initialize(from_cache=False)
else:
await zha_device.async_initialize(from_cache=True)
for discovery_info in discovery_infos:
async_dispatch_discovery_info(
self._hass,
is_new_join,
discovery_info
)
device_entity = async_create_device_entity(zha_device)
await self._component.async_add_entities([device_entity])
if is_new_join:
device_info = async_get_device_info(self._hass, zha_device)
async_dispatcher_send(
self._hass,
ZHA_GW_MSG,
{
TYPE: DEVICE_FULL_INIT,
DEVICE_INFO: device_info
}
)
async def shutdown(self):
"""Stop ZHA Controller Application."""
_LOGGER.debug("Shutting down ZHA ControllerApplication")
await self.application_controller.shutdown()
@callback
def async_capture_log_levels():
"""Capture current logger levels for ZHA."""
return {
BELLOWS: logging.getLogger(BELLOWS).getEffectiveLevel(),
ZHA: logging.getLogger(ZHA).getEffectiveLevel(),
ZIGPY: logging.getLogger(ZIGPY).getEffectiveLevel(),
ZIGPY_XBEE: logging.getLogger(ZIGPY_XBEE).getEffectiveLevel(),
ZIGPY_DECONZ: logging.getLogger(ZIGPY_DECONZ).getEffectiveLevel(),
}
@callback
def async_set_logger_levels(levels):
"""Set logger levels for ZHA."""
logging.getLogger(BELLOWS).setLevel(levels[BELLOWS])
logging.getLogger(ZHA).setLevel(levels[ZHA])
logging.getLogger(ZIGPY).setLevel(levels[ZIGPY])
logging.getLogger(ZIGPY_XBEE).setLevel(levels[ZIGPY_XBEE])
logging.getLogger(ZIGPY_DECONZ).setLevel(levels[ZIGPY_DECONZ])
class LogRelayHandler(logging.Handler):
"""Log handler for error messages."""
def __init__(self, hass, gateway):
"""Initialize a new LogErrorHandler."""
super().__init__()
self.hass = hass
self.gateway = gateway
def emit(self, record):
"""Relay log message via dispatcher."""
stack = []
if record.levelno >= logging.WARN:
if not record.exc_info:
stack = [f for f, _, _, _ in traceback.extract_stack()]
entry = LogEntry(record, stack,
_figure_out_source(record, stack, self.hass))
async_dispatcher_send(
self.hass,
ZHA_GW_MSG,
{
TYPE: LOG_OUTPUT,
LOG_ENTRY: entry.to_dict()
}
)
|
frankk00/django-avatar | refs/heads/master | avatar/util.py | 43 | from django.conf import settings
from django.core.cache import cache
from django.contrib.auth.models import User
from avatar.settings import (AVATAR_DEFAULT_URL, AVATAR_CACHE_TIMEOUT,
AUTO_GENERATE_AVATAR_SIZES, AVATAR_DEFAULT_SIZE)
cached_funcs = set()
def get_cache_key(user_or_username, size, prefix):
"""
Returns a cache key consisten of a username and image size.
"""
if isinstance(user_or_username, User):
user_or_username = user_or_username.username
return '%s_%s_%s' % (prefix, user_or_username, size)
def cache_result(func):
"""
Decorator to cache the result of functions that take a ``user`` and a
``size`` value.
"""
def cache_set(key, value):
cache.set(key, value, AVATAR_CACHE_TIMEOUT)
return value
def cached_func(user, size):
prefix = func.__name__
cached_funcs.add(prefix)
key = get_cache_key(user, size, prefix=prefix)
return cache.get(key) or cache_set(key, func(user, size))
return cached_func
def invalidate_cache(user, size=None):
"""
Function to be called when saving or changing an user's avatars.
"""
sizes = set(AUTO_GENERATE_AVATAR_SIZES)
if size is not None:
sizes.add(size)
for prefix in cached_funcs:
for size in sizes:
cache.delete(get_cache_key(user, size, prefix))
def get_default_avatar_url():
base_url = getattr(settings, 'STATIC_URL', None)
if not base_url:
base_url = getattr(settings, 'MEDIA_URL', '')
# Don't use base_url if the default avatar url starts with http:// of https://
if AVATAR_DEFAULT_URL.startswith('http://') or AVATAR_DEFAULT_URL.startswith('https://'):
return AVATAR_DEFAULT_URL
# We'll be nice and make sure there are no duplicated forward slashes
ends = base_url.endswith('/')
begins = AVATAR_DEFAULT_URL.startswith('/')
if ends and begins:
base_url = base_url[:-1]
elif not ends and not begins:
return '%s/%s' % (base_url, AVATAR_DEFAULT_URL)
return '%s%s' % (base_url, AVATAR_DEFAULT_URL)
def get_primary_avatar(user, size=AVATAR_DEFAULT_SIZE):
if not isinstance(user, User):
try:
user = User.objects.get(username=user)
except User.DoesNotExist:
return None
try:
# Order by -primary first; this means if a primary=True avatar exists
# it will be first, and then ordered by date uploaded, otherwise a
# primary=False avatar will be first. Exactly the fallback behavior we
# want.
avatar = user.avatar_set.order_by("-primary", "-date_uploaded")[0]
except IndexError:
avatar = None
if avatar:
if not avatar.thumbnail_exists(size):
avatar.create_thumbnail(size)
return avatar
|
susansalkeld/discsongs | refs/heads/master | discsongs/lib/python2.7/site-packages/pip/_vendor/re-vendor.py | 1240 | import os
import sys
import pip
import glob
import shutil
here = os.path.abspath(os.path.dirname(__file__))
def usage():
print("Usage: re-vendor.py [clean|vendor]")
sys.exit(1)
def clean():
for fn in os.listdir(here):
dirname = os.path.join(here, fn)
if os.path.isdir(dirname):
shutil.rmtree(dirname)
# six is a single file, not a package
os.unlink(os.path.join(here, 'six.py'))
def vendor():
pip.main(['install', '-t', here, '-r', 'vendor.txt'])
for dirname in glob.glob('*.egg-info'):
shutil.rmtree(dirname)
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
if sys.argv[1] == 'clean':
clean()
elif sys.argv[1] == 'vendor':
vendor()
else:
usage()
|
diogolundberg/dojo-secex | refs/heads/master | app/models/__init__.py | 12133432 | |
gangadhar-kadam/verve_erp | refs/heads/v5.0 | erpnext/stock/report/batch_wise_balance_history/__init__.py | 12133432 | |
yawnosnorous/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/test/crashers/loosing_mro_ref.py | 61 | """
There is a way to put keys of any type in a type's dictionary.
I think this allows various kinds of crashes, but so far I have only
found a convoluted attack of _PyType_Lookup(), which uses the mro of the
type without holding a strong reference to it. Probably works with
super.__getattribute__() too, which uses the same kind of code.
"""
class MyKey(object):
def __hash__(self):
return hash('mykey')
def __eq__(self, other):
# the following line decrefs the previous X.__mro__
X.__bases__ = (Base2,)
# trash all tuples of length 3, to make sure that the items of
# the previous X.__mro__ are really garbage
z = []
for i in range(1000):
z.append((i, None, None))
return 0
class Base(object):
mykey = 'from Base'
class Base2(object):
mykey = 'from Base2'
# you can't add a non-string key to X.__dict__, but it can be
# there from the beginning :-)
X = type('X', (Base,), {MyKey(): 5})
print(X.mykey)
# I get a segfault, or a slightly wrong assertion error in a debug build.
|
mitar/django-dirtyfields | refs/heads/master | src/dirtyfields/dirtyfields.py | 1 | # Adapted from http://stackoverflow.com/questions/110803/dirty-fields-in-django
from django.db.models.signals import post_save
class DirtyFieldsMixin(object):
def __init__(self, *args, **kwargs):
super(DirtyFieldsMixin, self).__init__(*args, **kwargs)
post_save.connect(reset_state, sender=self.__class__,
dispatch_uid='%s-DirtyFieldsMixin-sweeper' % self.__class__.__name__)
reset_state(sender=self.__class__, instance=self)
def _as_dict(self):
return dict([(f.name, getattr(self, f.name)) for f in self._meta.fields if not f.rel])
def get_dirty_fields(self):
new_state = self._as_dict()
return dict([(key, value) for key, value in self._original_state.iteritems() if value != new_state[key]])
def is_dirty(self):
# in order to be dirty we need to have been saved at least once, so we
# check for a primary key and we need our dirty fields to not be empty
if not self.pk:
return True
return {} != self.get_dirty_fields()
def reset_state(sender, instance, **kwargs):
instance._original_state = instance._as_dict()
|
artmusic0/theano-learning.part02 | refs/heads/master | fixed_official_convolutional_v3(pickle_out_best_W&B)/code/SdA.py | 30 | """
This tutorial introduces stacked denoising auto-encoders (SdA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from dA import dA
# start-snippet-1
class SdA(object):
"""Stacked denoising auto-encoder class (SdA)
A stacked denoising autoencoder model is obtained by stacking several
dAs. The hidden layer of the dA at layer `i` becomes the input of
the dA at layer `i+1`. The first layer dA gets as input the input of
the SdA, and the hidden layer of the last dA represents the output.
Note that after pretraining, the SdA is dealt with as a normal MLP,
the dAs are only used to initialize the weights.
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
n_ins=784,
hidden_layers_sizes=[500, 500],
n_outs=10,
corruption_levels=[0.1, 0.1]
):
""" This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the sdA
:type n_layers_sizes: list of ints
:param n_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:type corruption_levels: list of float
:param corruption_levels: amount of corruption to use for each
layer
"""
self.sigmoid_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
# end-snippet-1
# The SdA is an MLP, for which all weights of intermediate layers
# are shared with a different denoising autoencoders
# We will first construct the SdA as a deep multilayer perceptron,
# and when constructing each sigmoidal layer we also construct a
# denoising autoencoder that shares weights with that layer
# During pretraining we will train these autoencoders (which will
# lead to chainging the weights of the MLP as well)
# During finetunining we will finish training the SdA by doing
# stochastich gradient descent on the MLP
# start-snippet-2
for i in xrange(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question...
# but we are going to only declare that the parameters of the
# sigmoid_layers are parameters of the StackedDAA
# the visible biases in the dA are parameters of those
# dA, but not the SdA
self.params.extend(sigmoid_layer.params)
# Construct a denoising autoencoder that shared weights with this
# layer
dA_layer = dA(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
bhid=sigmoid_layer.b)
self.dA_layers.append(dA_layer)
# end-snippet-2
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs
)
self.params.extend(self.logLayer.params)
# construct a function that implements one step of finetunining
# compute the cost for second phase of training,
# defined as the negative log likelihood
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, batch_size):
''' Generates a list of functions, each of them implementing one
step in trainnig the dA corresponding to the layer with same index.
The function will require as input the minibatch index, and to train
a dA you just need to iterate, calling the corresponding function on
all minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: float
:param learning_rate: learning rate used during training for any of
the dA layers
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
corruption_level = T.scalar('corruption') # % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dA in self.dA_layers:
# get the cost and the updates list
cost, updates = dA.get_cost_updates(corruption_level,
learning_rate)
# compile the theano function
fn = theano.function(
inputs=[
index,
theano.Param(corruption_level, default=0.2),
theano.Param(learning_rate, default=0.1)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin: batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = [
(param, param - gparam * learning_rate)
for param, gparam in zip(self.params, gparams)
]
train_fn = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='train'
)
test_score_i = theano.function(
[index],
self.errors,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='test'
)
valid_score_i = theano.function(
[index],
self.errors,
givens={
self.x: valid_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: valid_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='valid'
)
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in xrange(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in xrange(n_test_batches)]
return train_fn, valid_score, test_score
def test_SdA(finetune_lr=0.1, pretraining_epochs=15,
pretrain_lr=0.001, training_epochs=1000,
dataset='mnist.pkl.gz', batch_size=1):
"""
Demonstrates how to train and test a stochastic denoising autoencoder.
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used in the finetune stage
(factor for the stochastic gradient)
:type pretraining_epochs: int
:param pretraining_epochs: number of epoch to do pretraining
:type pretrain_lr: float
:param pretrain_lr: learning rate to be used during pre-training
:type n_iter: int
:param n_iter: maximal number of iterations ot run the optimizer
:type dataset: string
:param dataset: path the the pickled dataset
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
# numpy random generator
# start-snippet-3
numpy_rng = numpy.random.RandomState(89677)
print '... building the model'
# construct the stacked denoising autoencoder class
sda = SdA(
numpy_rng=numpy_rng,
n_ins=28 * 28,
hidden_layers_sizes=[1000, 1000, 1000],
n_outs=10
)
# end-snippet-3 start-snippet-4
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size)
print '... pre-training the model'
start_time = timeit.default_timer()
## Pre-train layer-wise
corruption_levels = [.1, .2, .3]
for i in xrange(sda.n_layers):
# go through pretraining epochs
for epoch in xrange(pretraining_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i],
lr=pretrain_lr))
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = timeit.default_timer()
print >> sys.stderr, ('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
# end-snippet-4
########################
# FINETUNING THE MODEL #
########################
# get the training, validation and testing function for the model
print '... getting the finetuning functions'
train_fn, validate_model, test_model = sda.build_finetune_functions(
datasets=datasets,
batch_size=batch_size,
learning_rate=finetune_lr
)
print '... finetunning the model'
# early-stopping parameters
patience = 10 * n_train_batches # look as this many examples regardless
patience_increase = 2. # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_fn(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
validation_losses = validate_model()
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = test_model()
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(
(
'Optimization complete with best validation score of %f %%, '
'on iteration %i, '
'with test performance %f %%'
)
% (best_validation_loss * 100., best_iter + 1, test_score * 100.)
)
print >> sys.stderr, ('The training code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
test_SdA()
|
Pablo126/SSBW | refs/heads/master | Entrega1/lib/python3.5/site-packages/bson/raw_bson.py | 17 | # Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for representing raw BSON documents.
"""
import collections
from bson import _UNPACK_INT, _iterate_elements
from bson.py3compat import iteritems
from bson.codec_options import (
CodecOptions, DEFAULT_CODEC_OPTIONS, _RAW_BSON_DOCUMENT_MARKER)
from bson.errors import InvalidBSON
class RawBSONDocument(collections.Mapping):
"""Representation for a MongoDB document that provides access to the raw
BSON bytes that compose it.
Only when a field is accessed or modified within the document does
RawBSONDocument decode its bytes.
"""
__slots__ = ('__raw', '__inflated_doc', '__codec_options')
_type_marker = _RAW_BSON_DOCUMENT_MARKER
def __init__(self, bson_bytes, codec_options=DEFAULT_CODEC_OPTIONS):
"""Create a new :class:`RawBSONDocument`.
:Parameters:
- `bson_bytes`: the BSON bytes that compose this document
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`.
"""
self.__raw = bson_bytes
self.__inflated_doc = None
# Always decode documents to their lazy representations.
co = codec_options
self.__codec_options = CodecOptions(
tz_aware=co.tz_aware,
document_class=RawBSONDocument,
uuid_representation=co.uuid_representation,
unicode_decode_error_handler=co.unicode_decode_error_handler,
tzinfo=co.tzinfo)
@property
def raw(self):
"""The raw BSON bytes composing this document."""
return self.__raw
def items(self):
"""Lazily decode and iterate elements in this document."""
return iteritems(self.__inflated)
@property
def __inflated(self):
if self.__inflated_doc is None:
# We already validated the object's size when this document was
# created, so no need to do that again. We still need to check the
# size of all the elements and compare to the document size.
object_size = _UNPACK_INT(self.__raw[:4])[0] - 1
position = 0
self.__inflated_doc = {}
for key, value, position in _iterate_elements(
self.__raw, 4, object_size, self.__codec_options):
self.__inflated_doc[key] = value
if position != object_size:
raise InvalidBSON('bad object or element length')
return self.__inflated_doc
def __getitem__(self, item):
return self.__inflated[item]
def __iter__(self):
return iter(self.__inflated)
def __len__(self):
return len(self.__inflated)
def __eq__(self, other):
if isinstance(other, RawBSONDocument):
return self.__raw == other.raw
return NotImplemented
def __repr__(self):
return ("RawBSONDocument(%r, codec_options=%r)"
% (self.raw, self.__codec_options))
|
TechRunner2/i3-gaps-rice | refs/heads/master | .config/Scripts/shortcuts.py | 1 | import csv
qute = ""
rang = ""
bash = ""
with open(".config/qutebrowser/keys.conf.base") as qb:
qute+=qb.read()
with open(".config/ranger/rc.conf.base") as rg:
rang+=rg.read()
with open(".config/Scripts/bashrc") as bsh:
bash+=bsh.read()
#First we open the list of folder shortcuts and go down each line adding each in the required syntax to each of the three configs:
with open(".config/Scripts/folders") as fold:
for line in csv.reader(fold, dialect="excel-tab"):
#Adds the qutebrowser downloads commands:
qute+="set storage download-directory "+line[1]+" ;; hint links download\n\t;"+line[0]+"\n"
#Adds the ranger go, tab, move and yank commands:
rang+=("map g"+line[0]+" cd "+line[1]+"\n")
rang+=("map t"+line[0]+" tab_new "+line[1]+"\n")
rang+=("map m"+line[0]+" shell mv %s "+line[1]+"\n")
rang+=("map Y"+line[0]+" shell cp -r %s "+line[1]+"\n")
#Adds the bash shortcuts:
bash+=("alias "+line[0]+"=\"cd "+line[1]+" && ls -a\"\n")
#Goes thru the config file file and adds the shortcuts to both bash and ranger.
with open(".config/Scripts/configs") as conf:
for line in csv.reader(conf, dialect="excel-tab"):
bash+=("alias "+line[0]+"=\"vim "+line[1]+"\"\n")
rang+=("map "+line[0]+" shell vim "+line[1]+"\n")
with open(".config/ranger/rc.conf", "w") as outrang:
outrang.write(rang)
with open(".config/qutebrowser/keys.conf","w") as outqb:
outqb.write(qute)
with open(".bashrc","w") as outbash:
outbash.write(bash)
|
blackmiaool/rt-thread | refs/heads/master | bsp/simulator/rtconfig.py | 23 | import os
# toolchains options
ARCH='sim'
#CROSS_TOOL='msvc' or 'gcc' or 'mingw'
#'msvc' and 'mingw' are both for windows
# 'gcc' is for linux
CROSS_TOOL='msvc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path
if CROSS_TOOL == 'gcc' or CROSS_TOOL == 'clang-analyze':
CPU = 'posix'
PLATFORM = 'gcc'
EXEC_PATH = ''
elif CROSS_TOOL == 'mingw':
CPU = 'win32'
PLATFORM = 'mingw'
EXEC_PATH = r'D:\Program Files\CodeBlocks\MinGW\bin'
elif CROSS_TOOL == 'msvc':
CPU = 'win32'
PLATFORM = 'cl'
EXEC_PATH = ''
else:
print "bad CROSS TOOL!"
exit(1)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
#BUILD = ''
if PLATFORM == 'gcc':
# toolchains
PREFIX = ''
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -ffunction-sections -fdata-sections'
DEVICE = ' '
CFLAGS = DEVICE + ' -I/usr/include -w -D_REENTRANT'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
#LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-linux.map -lpthread'
LFLAGS = DEVICE + ' -Wl,-Map=rtthread-linux.map -pthread -T gcc.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -g -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = ''
elif PLATFORM == 'mingw':
# toolchains
PREFIX = ''
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'exe'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -ffunction-sections -fdata-sections'
DEVICE = ' '
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
DEFFILE_LFLAGS = DEVICE + ' -Wl,-Map=rtthread-win32.map,--output-def,rtthread.def -T mingw.ld '
LFLAGS = DEVICE + ' -Wl,-Map=rtthread-win32.map -T mingw.ld '
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -g -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = ''
elif PLATFORM == 'cl':
# toolchains
PREFIX = ''
TARGET_EXT = 'exe'
AS = PREFIX + 'cl'
CC = PREFIX + 'cl'
AR = PREFIX + 'cl'
LINK = PREFIX + 'cl'
AFLAGS = ''
CFLAGS = ''
LFLAGS = ''
if BUILD == 'debug':
CFLAGS += ' /MTd'
LFLAGS += ' /DEBUG'
else:
CFLAGS += ' /MT'
LFLAGS += ''
CFLAGS += ' /ZI /Od /W 3 /WL '
LFLAGS += ' /SUBSYSTEM:CONSOLE /MACHINE:X86 '
CPATH = ''
LPATH = ''
POST_ACTION = ''
|
juranki/robotframework-selenium2library | refs/heads/master | test/run_tests.py | 42 | #!/usr/bin/env python
import env
import os
import sys
from subprocess import Popen, call
from tempfile import TemporaryFile
from run_unit_tests import run_unit_tests
ROBOT_ARGS = [
'--doc', 'SeleniumSPacceptanceSPtestsSPwithSP%(browser)s',
'--outputdir', '%(outdir)s',
'--variable', 'browser:%(browser)s',
'--escape', 'space:SP',
'--report', 'none',
'--log', 'none',
#'--suite', 'Acceptance.Keywords.Textfields',
'--loglevel', 'DEBUG',
'--pythonpath', '%(pythonpath)s',
]
REBOT_ARGS = [
'--outputdir', '%(outdir)s',
'--name', '%(browser)sSPAcceptanceSPTests',
'--escape', 'space:SP',
'--critical', 'regression',
'--noncritical', 'inprogress',
]
ARG_VALUES = {'outdir': env.RESULTS_DIR, 'pythonpath': env.SRC_DIR}
def acceptance_tests(interpreter, browser, args):
ARG_VALUES['browser'] = browser.replace('*', '')
start_http_server()
runner = {'python': 'pybot', 'jython': 'jybot', 'ipy': 'ipybot'}[interpreter]
if os.sep == '\\':
runner += '.bat'
execute_tests(runner, args)
stop_http_server()
return process_output()
def start_http_server():
server_output = TemporaryFile()
Popen(['python', env.HTTP_SERVER_FILE ,'start'],
stdout=server_output, stderr=server_output)
def execute_tests(runner, args):
if not os.path.exists(env.RESULTS_DIR):
os.mkdir(env.RESULTS_DIR)
command = [runner] + [arg % ARG_VALUES for arg in ROBOT_ARGS] + args + [env.ACCEPTANCE_TEST_DIR]
print ''
print 'Starting test execution with command:\n' + ' '.join(command)
syslog = os.path.join(env.RESULTS_DIR, 'syslog.txt')
call(command, shell=os.sep=='\\', env=dict(os.environ, ROBOT_SYSLOG_FILE=syslog))
def stop_http_server():
call(['python', env.HTTP_SERVER_FILE, 'stop'])
def process_output():
print
if _has_robot_27():
call(['python', os.path.join(env.RESOURCES_DIR, 'statuschecker.py'),
os.path.join(env.RESULTS_DIR, 'output.xml')])
rebot = 'rebot' if os.sep == '/' else 'rebot.bat'
rebot_cmd = [rebot] + [ arg % ARG_VALUES for arg in REBOT_ARGS ] + \
[os.path.join(ARG_VALUES['outdir'], 'output.xml') ]
rc = call(rebot_cmd, env=os.environ)
if rc == 0:
print 'All critical tests passed'
else:
print '%d critical test%s failed' % (rc, 's' if rc != 1 else '')
return rc
def _has_robot_27():
try:
from robot.result import ExecutionResult
except:
return False
return True
def _exit(rc):
sys.exit(rc)
def _help():
print 'Usage: python run_tests.py python|jython browser [options]'
print
print 'See README.txt for details.'
return 255
def _run_unit_tests():
print 'Running unit tests'
failures = run_unit_tests()
if failures != 0:
print '\n%d unit tests failed - not running acceptance tests!' % failures
else:
print 'All unit tests passed'
return failures
if __name__ == '__main__':
if not len(sys.argv) > 2:
_exit(_help())
unit_failures = _run_unit_tests()
if unit_failures:
_exit(unit_failures)
interpreter = sys.argv[1]
browser = sys.argv[2].lower()
args = sys.argv[3:]
if browser != 'unit':
_exit(acceptance_tests(interpreter, browser, args))
|
yangzilong1986/python | refs/heads/master | JiYouMCC/0024/todoList/todoList/views.py | 26 | from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, RequestContext
from todoList.list.models import List, Status
from todoList.list.views import get_first_status, get_last_status, create_new_list, next_step_list,pre_step_list
import datetime
def process(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
status_result = Status.objects.all()
if request.method == "POST":
if 'detail' in request.POST:
create_new_list(request.user, request.POST.get('detail'))
if 'next' in request.POST:
next_step_list(request.POST.get('next'))
if 'pre' in request.POST:
pre_step_list(request.POST.get('pre'))
list_result = []
for item in status_result:
list_result.append(List.objects.filter(user=request.user, status=item))
return render_to_response("process.html",
{'user_name': request.user,
'status': status_result,
'lists': list_result,
'laststatus': get_last_status(),
'firststatus': get_first_status() }, context_instance=RequestContext(request))
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect("/redirect_to_login")
else:
form = UserCreationForm()
return render_to_response("registration/register.html",
{'form': form, },
context_instance=RequestContext(request),)
|
openstack/tacker | refs/heads/master | tacker/db/nfvo/ns_db.py | 2 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from datetime import datetime
from oslo_db.exception import DBDuplicateEntry
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy import schema
from tacker.common import exceptions
from tacker.db.common_services import common_services_db_plugin
from tacker.db import db_base
from tacker.db import model_base
from tacker.db import models_v1
from tacker.db import types
from tacker.extensions import nfvo
from tacker.extensions.nfvo_plugins import network_service
from tacker.plugins.common import constants
LOG = logging.getLogger(__name__)
_ACTIVE_UPDATE = (constants.ACTIVE, constants.PENDING_UPDATE)
_ACTIVE_UPDATE_ERROR_DEAD = (
constants.PENDING_CREATE, constants.ACTIVE, constants.PENDING_UPDATE,
constants.PENDING_DELETE, constants.ERROR, constants.DEAD)
CREATE_STATES = (constants.PENDING_CREATE, constants.DEAD)
###########################################################################
# db tables
class NSD(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
models_v1.Audit):
"""Represents NSD to create NS."""
__tablename__ = 'nsd'
# Descriptive name
name = sa.Column(sa.String(255), nullable=False)
description = sa.Column(sa.Text)
vnfds = sa.Column(types.Json, nullable=True)
# Nsd template source - onboarded
template_source = sa.Column(sa.String(255), server_default='onboarded')
# (key, value) pair to spin up
attributes = orm.relationship('NSDAttribute',
backref='nsd')
__table_args__ = (
schema.UniqueConstraint(
"tenant_id",
"name",
name="uniq_nsd0tenant_id0name"),
)
class NSDAttribute(model_base.BASE, models_v1.HasId):
"""Represents attributes necessary for creation of ns in (key, value) pair
"""
__tablename__ = 'nsd_attribute'
nsd_id = sa.Column(types.Uuid, sa.ForeignKey('nsd.id'),
nullable=False)
key = sa.Column(sa.String(255), nullable=False)
value = sa.Column(sa.TEXT(65535), nullable=True)
class NS(model_base.BASE, models_v1.HasId, models_v1.HasTenant,
models_v1.Audit):
"""Represents network services that deploys services.
"""
__tablename__ = 'ns'
nsd_id = sa.Column(types.Uuid, sa.ForeignKey('nsd.id'))
nsd = orm.relationship('NSD')
name = sa.Column(sa.String(255), nullable=False)
description = sa.Column(sa.Text, nullable=True)
# Dict of VNF details that network service launches
vnf_ids = sa.Column(sa.TEXT(65535), nullable=True)
# VNFFG ids
vnffg_ids = sa.Column(sa.TEXT(65535), nullable=True)
# Dict of mgmt ip addresses that network service launches
mgmt_ip_addresses = sa.Column(sa.TEXT(65535), nullable=True)
status = sa.Column(sa.String(64), nullable=False)
vim_id = sa.Column(types.Uuid, sa.ForeignKey('vims.id'), nullable=False)
error_reason = sa.Column(sa.Text, nullable=True)
__table_args__ = (
schema.UniqueConstraint(
"tenant_id",
"name",
name="uniq_ns0tenant_id0name"),
)
class NSPluginDb(network_service.NSPluginBase, db_base.CommonDbMixin):
def __init__(self):
super(NSPluginDb, self).__init__()
self._cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
def _get_resource(self, context, model, id):
try:
return self._get_by_id(context, model, id)
except orm_exc.NoResultFound:
if issubclass(model, NSD):
raise network_service.NSDNotFound(nsd_id=id)
if issubclass(model, NS):
raise network_service.NSNotFound(ns_id=id)
else:
raise
def _get_ns_db(self, context, ns_id, current_statuses):
try:
ns_db = (
self._model_query(context, NS).
filter(NS.id == ns_id).
filter(NS.status.in_(current_statuses)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
raise network_service.NSNotFound(ns_id=ns_id)
return ns_db
def _update_ns_db(self, ns_db, new_status):
ns_db.update({'status': new_status})
return ns_db
def _make_attributes_dict(self, attributes_db):
return dict((attr.key, attr.value) for attr in attributes_db)
def _make_nsd_dict(self, nsd, fields=None):
res = {
'attributes': self._make_attributes_dict(nsd['attributes']),
}
key_list = ('id', 'tenant_id', 'name', 'description',
'created_at', 'updated_at', 'vnfds', 'template_source')
res.update((key, nsd[key]) for key in key_list)
return self._fields(res, fields)
def _make_dev_attrs_dict(self, dev_attrs_db):
return dict((arg.key, arg.value) for arg in dev_attrs_db)
def _make_ns_dict(self, ns_db, fields=None):
LOG.debug('ns_db %s', ns_db)
res = {}
key_list = ('id', 'tenant_id', 'nsd_id', 'name', 'description',
'vnf_ids', 'vnffg_ids', 'status', 'mgmt_ip_addresses',
'error_reason', 'vim_id', 'created_at', 'updated_at')
res.update((key, ns_db[key]) for key in key_list)
return self._fields(res, fields)
def create_nsd(self, context, nsd):
vnfds = nsd['vnfds']
nsd = nsd['nsd']
LOG.debug('nsd %s', nsd)
tenant_id = self._get_tenant_id_for_create(context, nsd)
template_source = nsd.get('template_source')
try:
with context.session.begin(subtransactions=True):
nsd_id = uuidutils.generate_uuid()
nsd_db = NSD(
id=nsd_id,
tenant_id=tenant_id,
name=nsd.get('name'),
vnfds=vnfds,
description=nsd.get('description'),
deleted_at=datetime.min,
template_source=template_source)
context.session.add(nsd_db)
for (key, value) in nsd.get('attributes', {}).items():
attribute_db = NSDAttribute(
id=uuidutils.generate_uuid(),
nsd_id=nsd_id,
key=key,
value=value)
context.session.add(attribute_db)
except DBDuplicateEntry as e:
raise exceptions.DuplicateEntity(
_type="nsd",
entry=e.columns)
LOG.debug('nsd_db %(nsd_db)s %(attributes)s ',
{'nsd_db': nsd_db,
'attributes': nsd_db.attributes})
nsd_dict = self._make_nsd_dict(nsd_db)
LOG.debug('nsd_dict %s', nsd_dict)
self._cos_db_plg.create_event(
context, res_id=nsd_dict['id'],
res_type=constants.RES_TYPE_NSD,
res_state=constants.RES_EVT_ONBOARDED,
evt_type=constants.RES_EVT_CREATE,
tstamp=nsd_dict[constants.RES_EVT_CREATED_FLD])
return nsd_dict
def delete_nsd(self, context, nsd_id, soft_delete=True):
with context.session.begin(subtransactions=True):
nss_db = context.session.query(NS).filter_by(
nsd_id=nsd_id).first()
if nss_db is not None and nss_db.deleted_at is None:
raise nfvo.NSDInUse(nsd_id=nsd_id)
nsd_db = self._get_resource(context, NSD, nsd_id)
if soft_delete:
nsd_db.update({'deleted_at': timeutils.utcnow()})
self._cos_db_plg.create_event(
context, res_id=nsd_db['id'],
res_type=constants.RES_TYPE_NSD,
res_state=constants.RES_EVT_NA_STATE,
evt_type=constants.RES_EVT_DELETE,
tstamp=nsd_db[constants.RES_EVT_DELETED_FLD])
else:
context.session.query(NSDAttribute).filter_by(
nsd_id=nsd_id).delete()
context.session.delete(nsd_db)
def get_nsd(self, context, nsd_id, fields=None):
nsd_db = self._get_resource(context, NSD, nsd_id)
return self._make_nsd_dict(nsd_db)
def get_nsds(self, context, filters, fields=None):
if ('template_source' in filters) and \
(filters['template_source'][0] == 'all'):
filters.pop('template_source')
return self._get_collection(context, NSD,
self._make_nsd_dict,
filters=filters, fields=fields)
# reference implementation. needs to be overrided by subclass
def create_ns(self, context, ns):
LOG.debug('ns %s', ns)
ns = ns['ns']
tenant_id = self._get_tenant_id_for_create(context, ns)
nsd_id = ns['nsd_id']
vim_id = ns['vim_id']
name = ns.get('name')
ns_id = ns['ns_id']
description = None
if 'description' in ns:
description = ns.get('description')
try:
with context.session.begin(subtransactions=True):
if description is None:
nsd_db = self._get_resource(context, NSD,
nsd_id)
description = nsd_db.description
ns_db = NS(id=ns_id,
tenant_id=tenant_id,
name=name,
description=description,
vnf_ids=None,
vnffg_ids=None,
status=constants.PENDING_CREATE,
mgmt_ip_addresses=None,
nsd_id=nsd_id,
vim_id=vim_id,
error_reason=None,
deleted_at=datetime.min)
context.session.add(ns_db)
except DBDuplicateEntry as e:
raise exceptions.DuplicateEntity(
_type="ns",
entry=e.columns)
evt_details = "NS UUID assigned."
self._cos_db_plg.create_event(
context, res_id=ns_id,
res_type=constants.RES_TYPE_NS,
res_state=constants.PENDING_CREATE,
evt_type=constants.RES_EVT_CREATE,
tstamp=ns_db[constants.RES_EVT_CREATED_FLD],
details=evt_details)
return self._make_ns_dict(ns_db)
def create_ns_post(self, context, ns_id, mistral_obj,
vnfd_dict, vnffgd_templates, error_reason):
LOG.debug('ns ID %s', ns_id)
output = ast.literal_eval(mistral_obj.output)
mgmt_ip_addresses = dict()
vnf_ids = dict()
vnffg_ids = dict()
if len(output) > 0:
for vnfd_name, vnfd_val in vnfd_dict.items():
for instance in vnfd_val['instances']:
if 'mgmt_ip_address_' + instance in output:
mgmt_ip_addresses[instance] = ast.literal_eval(
output['mgmt_ip_address_' + instance].strip())
vnf_ids[instance] = output['vnf_id_' + instance]
vnf_ids = str(vnf_ids)
mgmt_ip_addresses = str(mgmt_ip_addresses)
if vnffgd_templates:
for vnffg_name in vnffgd_templates:
vnffg_output = 'vnffg_id_%s' % vnffg_name
vnffg_ids[vnffg_name] = output[vnffg_output]
vnffg_ids = str(vnffg_ids)
if not vnf_ids:
vnf_ids = None
if not mgmt_ip_addresses:
mgmt_ip_addresses = None
if not vnffg_ids:
vnffg_ids = None
status = constants.ACTIVE if mistral_obj.state == 'SUCCESS' \
else constants.ERROR
with context.session.begin(subtransactions=True):
ns_db = self._get_resource(context, NS, ns_id)
ns_db.update({'vnf_ids': vnf_ids})
ns_db.update({'vnffg_ids': vnffg_ids})
ns_db.update({'mgmt_ip_addresses': mgmt_ip_addresses})
ns_db.update({'status': status})
ns_db.update({'error_reason': error_reason})
ns_db.update({'updated_at': timeutils.utcnow()})
ns_dict = self._make_ns_dict(ns_db)
self._cos_db_plg.create_event(
context, res_id=ns_dict['id'],
res_type=constants.RES_TYPE_NS,
res_state=constants.RES_EVT_NA_STATE,
evt_type=constants.RES_EVT_UPDATE,
tstamp=ns_dict[constants.RES_EVT_UPDATED_FLD])
return ns_dict
# reference implementation. needs to be overrided by subclass
def delete_ns_pre(self, context, ns_id, force_delete=False):
with context.session.begin(subtransactions=True):
ns_db = self._get_ns_db(
context, ns_id, _ACTIVE_UPDATE_ERROR_DEAD)
if not force_delete:
if (ns_db is not None and ns_db.status in
[constants.PENDING_DELETE,
constants.PENDING_CREATE,
constants.PENDING_UPDATE]):
raise network_service.NSInUse(ns_id=ns_id)
ns_db = self._update_ns_db(ns_db, constants.PENDING_DELETE)
deleted_ns_db = self._make_ns_dict(ns_db)
self._cos_db_plg.create_event(
context, res_id=ns_id,
res_type=constants.RES_TYPE_NS,
res_state=deleted_ns_db['status'],
evt_type=constants.RES_EVT_DELETE,
tstamp=timeutils.utcnow(), details="NS delete initiated")
return deleted_ns_db
def delete_ns_post(self, context, ns_id, mistral_obj,
error_reason, soft_delete=True, force_delete=False):
ns = self.get_ns(context, ns_id)
nsd_id = ns.get('nsd_id')
with context.session.begin(subtransactions=True):
if force_delete:
query = (
self._model_query(context, NS).
filter(NS.id == ns_id))
else:
query = (
self._model_query(context, NS).
filter(NS.id == ns_id).
filter(NS.status == constants.PENDING_DELETE))
if not force_delete and (mistral_obj
and mistral_obj.state == 'ERROR'):
query.update({'status': constants.ERROR})
self._cos_db_plg.create_event(
context, res_id=ns_id,
res_type=constants.RES_TYPE_NS,
res_state=constants.ERROR,
evt_type=constants.RES_EVT_DELETE,
tstamp=timeutils.utcnow(),
details="NS Delete ERROR")
else:
if soft_delete:
deleted_time_stamp = timeutils.utcnow()
query.update({'deleted_at': deleted_time_stamp})
self._cos_db_plg.create_event(
context, res_id=ns_id,
res_type=constants.RES_TYPE_NS,
res_state=constants.PENDING_DELETE,
evt_type=constants.RES_EVT_DELETE,
tstamp=deleted_time_stamp,
details="ns Delete Complete")
else:
query.delete()
try:
template_db = self._get_resource(context, NSD, nsd_id)
if template_db.get('template_source') == 'inline':
self.delete_nsd(context, nsd_id)
except orm_exc.NoResultFound:
pass
def get_ns(self, context, ns_id, fields=None):
ns_db = self._get_resource(context, NS, ns_id)
return self._make_ns_dict(ns_db)
def get_nss(self, context, filters=None, fields=None):
return self._get_collection(context, NS,
self._make_ns_dict,
filters=filters, fields=fields)
|
dantebarba/docker-media-server | refs/heads/master | plex/Sub-Zero.bundle/Contents/Libraries/Shared/guessit/rules/properties/episode_title.py | 11 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Episode title
"""
from collections import defaultdict
from rebulk import Rebulk, Rule, AppendMatch, RemoveMatch, RenameMatch, POST_PROCESS
from ..common import seps, title_seps
from ..common.formatters import cleanup
from ..properties.title import TitleFromPosition, TitleBaseRule
from ..properties.type import TypeProcessor
def episode_title():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
previous_names = ('episode', 'episode_details', 'episode_count',
'season', 'season_count', 'date', 'title', 'year')
rebulk = Rebulk().rules(RemoveConflictsWithEpisodeTitle(previous_names),
EpisodeTitleFromPosition(previous_names),
AlternativeTitleReplace(previous_names),
TitleToEpisodeTitle,
Filepart3EpisodeTitle,
Filepart2EpisodeTitle,
RenameEpisodeTitleWhenMovieType)
return rebulk
class RemoveConflictsWithEpisodeTitle(Rule):
"""
Remove conflicting matches that might lead to wrong episode_title parsing.
"""
priority = 64
consequence = RemoveMatch
def __init__(self, previous_names):
super(RemoveConflictsWithEpisodeTitle, self).__init__()
self.previous_names = previous_names
self.next_names = ('streaming_service', 'screen_size', 'format',
'video_codec', 'audio_codec', 'other', 'container')
self.affected_if_holes_after = ('part', )
self.affected_names = ('part', 'year')
def when(self, matches, context):
to_remove = []
for filepart in matches.markers.named('path'):
for match in matches.range(filepart.start, filepart.end,
predicate=lambda m: m.name in self.affected_names):
before = matches.previous(match, index=0,
predicate=lambda m, fp=filepart: not m.private and m.start >= fp.start)
if not before or before.name not in self.previous_names:
continue
after = matches.next(match, index=0,
predicate=lambda m, fp=filepart: not m.private and m.end <= fp.end)
if not after or after.name not in self.next_names:
continue
group = matches.markers.at_match(match, predicate=lambda m: m.name == 'group', index=0)
def has_value_in_same_group(current_match, current_group=group):
"""Return true if current match has value and belongs to the current group."""
return current_match.value.strip(seps) and (
current_group == matches.markers.at_match(current_match,
predicate=lambda mm: mm.name == 'group', index=0)
)
holes_before = matches.holes(before.end, match.start, predicate=has_value_in_same_group)
holes_after = matches.holes(match.end, after.start, predicate=has_value_in_same_group)
if not holes_before and not holes_after:
continue
if match.name in self.affected_if_holes_after and not holes_after:
continue
to_remove.append(match)
if match.parent:
to_remove.append(match.parent)
return to_remove
class TitleToEpisodeTitle(Rule):
"""
If multiple different title are found, convert the one following episode number to episode_title.
"""
dependency = TitleFromPosition
def when(self, matches, context):
titles = matches.named('title')
title_groups = defaultdict(list)
for title in titles:
title_groups[title.value].append(title)
if len(title_groups) < 2:
return
episode_titles = []
for title in titles:
if matches.previous(title, lambda match: match.name == 'episode'):
episode_titles.append(title)
if episode_titles:
return episode_titles
def then(self, matches, when_response, context):
for title in when_response:
matches.remove(title)
title.name = 'episode_title'
matches.append(title)
class EpisodeTitleFromPosition(TitleBaseRule):
"""
Add episode title match in existing matches
Must run after TitleFromPosition rule.
"""
dependency = TitleToEpisodeTitle
def __init__(self, previous_names):
super(EpisodeTitleFromPosition, self).__init__('episode_title', ['title'])
self.previous_names = previous_names
def hole_filter(self, hole, matches):
episode = matches.previous(hole,
lambda previous: any(name in previous.names
for name in self.previous_names),
0)
crc32 = matches.named('crc32')
return episode or crc32
def filepart_filter(self, filepart, matches):
# Filepart where title was found.
if matches.range(filepart.start, filepart.end, lambda match: match.name == 'title'):
return True
return False
def should_remove(self, match, matches, filepart, hole, context):
if match.name == 'episode_details':
return False
return super(EpisodeTitleFromPosition, self).should_remove(match, matches, filepart, hole, context)
def when(self, matches, context):
if matches.named('episode_title'):
return
return super(EpisodeTitleFromPosition, self).when(matches, context)
class AlternativeTitleReplace(Rule):
"""
If alternateTitle was found and title is next to episode, season or date, replace it with episode_title.
"""
dependency = EpisodeTitleFromPosition
consequence = RenameMatch
def __init__(self, previous_names):
super(AlternativeTitleReplace, self).__init__()
self.previous_names = previous_names
def when(self, matches, context):
if matches.named('episode_title'):
return
alternative_title = matches.range(predicate=lambda match: match.name == 'alternative_title', index=0)
if alternative_title:
main_title = matches.chain_before(alternative_title.start, seps=seps,
predicate=lambda match: 'title' in match.tags, index=0)
if main_title:
episode = matches.previous(main_title,
lambda previous: any(name in previous.names
for name in self.previous_names),
0)
crc32 = matches.named('crc32')
if episode or crc32:
return alternative_title
def then(self, matches, when_response, context):
matches.remove(when_response)
when_response.name = 'episode_title'
when_response.tags.append('alternative-replaced')
matches.append(when_response)
class RenameEpisodeTitleWhenMovieType(Rule):
"""
Rename episode_title by alternative_title when type is movie.
"""
priority = POST_PROCESS
dependency = TypeProcessor
consequence = RenameMatch
def when(self, matches, context):
if matches.named('episode_title', lambda m: 'alternative-replaced' not in m.tags) \
and not matches.named('type', lambda m: m.value == 'episode'):
return matches.named('episode_title')
def then(self, matches, when_response, context):
for match in when_response:
matches.remove(match)
match.name = 'alternative_title'
matches.append(match)
class Filepart3EpisodeTitle(Rule):
"""
If we have at least 3 filepart structured like this:
Serie name/SO1/E01-episode_title.mkv
AAAAAAAAAA/BBB/CCCCCCCCCCCCCCCCCCCC
If CCCC contains episode and BBB contains seasonNumber
Then title is to be found in AAAA.
"""
consequence = AppendMatch('title')
def when(self, matches, context):
fileparts = matches.markers.named('path')
if len(fileparts) < 3:
return
filename = fileparts[-1]
directory = fileparts[-2]
subdirectory = fileparts[-3]
episode_number = matches.range(filename.start, filename.end, lambda match: match.name == 'episode', 0)
if episode_number:
season = matches.range(directory.start, directory.end, lambda match: match.name == 'season', 0)
if season:
hole = matches.holes(subdirectory.start, subdirectory.end,
formatter=cleanup, seps=title_seps, predicate=lambda match: match.value,
index=0)
if hole:
return hole
class Filepart2EpisodeTitle(Rule):
"""
If we have at least 2 filepart structured like this:
Serie name SO1/E01-episode_title.mkv
AAAAAAAAAAAAA/BBBBBBBBBBBBBBBBBBBBB
If BBBB contains episode and AAA contains a hole followed by seasonNumber
then title is to be found in AAAA.
or
Serie name/SO1E01-episode_title.mkv
AAAAAAAAAA/BBBBBBBBBBBBBBBBBBBBB
If BBBB contains season and episode and AAA contains a hole
then title is to be found in AAAA.
"""
consequence = AppendMatch('title')
def when(self, matches, context):
fileparts = matches.markers.named('path')
if len(fileparts) < 2:
return
filename = fileparts[-1]
directory = fileparts[-2]
episode_number = matches.range(filename.start, filename.end, lambda match: match.name == 'episode', 0)
if episode_number:
season = (matches.range(directory.start, directory.end, lambda match: match.name == 'season', 0) or
matches.range(filename.start, filename.end, lambda match: match.name == 'season', 0))
if season:
hole = matches.holes(directory.start, directory.end, formatter=cleanup, seps=title_seps,
predicate=lambda match: match.value, index=0)
if hole:
return hole
|
GoogleCloudPlatform/hellodashboard | refs/heads/master | simplejson/tests/test_dump.py | 38 | from unittest import TestCase
from simplejson.compat import StringIO, long_type, b, binary_type, PY3
import simplejson as json
def as_text_type(s):
if PY3 and isinstance(s, binary_type):
return s.decode('ascii')
return s
class TestDump(TestCase):
def test_dump(self):
sio = StringIO()
json.dump({}, sio)
self.assertEqual(sio.getvalue(), '{}')
def test_constants(self):
for c in [None, True, False]:
self.assertTrue(json.loads(json.dumps(c)) is c)
self.assertTrue(json.loads(json.dumps([c]))[0] is c)
self.assertTrue(json.loads(json.dumps({'a': c}))['a'] is c)
def test_stringify_key(self):
items = [(b('bytes'), 'bytes'),
(1.0, '1.0'),
(10, '10'),
(True, 'true'),
(False, 'false'),
(None, 'null'),
(long_type(100), '100')]
for k, expect in items:
self.assertEqual(
json.loads(json.dumps({k: expect})),
{expect: expect})
self.assertEqual(
json.loads(json.dumps({k: expect}, sort_keys=True)),
{expect: expect})
self.assertRaises(TypeError, json.dumps, {json: 1})
for v in [{}, {'other': 1}, {b('derp'): 1, 'herp': 2}]:
for sort_keys in [False, True]:
v0 = dict(v)
v0[json] = 1
v1 = dict((as_text_type(key), val) for (key, val) in v.items())
self.assertEqual(
json.loads(json.dumps(v0, skipkeys=True, sort_keys=sort_keys)),
v1)
self.assertEqual(
json.loads(json.dumps({'': v0}, skipkeys=True, sort_keys=sort_keys)),
{'': v1})
self.assertEqual(
json.loads(json.dumps([v0], skipkeys=True, sort_keys=sort_keys)),
[v1])
def test_dumps(self):
self.assertEqual(json.dumps({}), '{}')
def test_encode_truefalse(self):
self.assertEqual(json.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEqual(
json.dumps(
{2: 3.0,
4.0: long_type(5),
False: 1,
long_type(6): True,
"7": 0},
sort_keys=True),
'{"2": 3.0, "4.0": 5, "6": true, "7": 0, "false": 1}')
def test_ordered_dict(self):
# http://bugs.python.org/issue6105
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = json.dumps(json.OrderedDict(items))
self.assertEqual(
s,
'{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}')
def test_indent_unknown_type_acceptance(self):
"""
A test against the regression mentioned at `github issue 29`_.
The indent parameter should accept any type which pretends to be
an instance of int or long when it comes to being multiplied by
strings, even if it is not actually an int or long, for
backwards compatibility.
.. _github issue 29:
http://github.com/simplejson/simplejson/issue/29
"""
class AwesomeInt(object):
"""An awesome reimplementation of integers"""
def __init__(self, *args, **kwargs):
if len(args) > 0:
# [construct from literals, objects, etc.]
# ...
# Finally, if args[0] is an integer, store it
if isinstance(args[0], int):
self._int = args[0]
# [various methods]
def __mul__(self, other):
# [various ways to multiply AwesomeInt objects]
# ... finally, if the right-hand operand is not awesome enough,
# try to do a normal integer multiplication
if hasattr(self, '_int'):
return self._int * other
else:
raise NotImplementedError("To do non-awesome things with"
" this object, please construct it from an integer!")
s = json.dumps([0, 1, 2], indent=AwesomeInt(3))
self.assertEqual(s, '[\n 0,\n 1,\n 2\n]')
def test_accumulator(self):
# the C API uses an accumulator that collects after 100,000 appends
lst = [0] * 100000
self.assertEqual(json.loads(json.dumps(lst)), lst)
|
grupozeety/CDerpnext | refs/heads/bk_master | erpnext/patches/v6_4/fix_journal_entries_due_to_reconciliation.py | 25 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
je_rows = frappe.db.sql("""
select name, parent, reference_type, reference_name, debit, credit
from `tabJournal Entry Account`
where docstatus=1 and date(modified) >= '2015-09-17'
and ((ifnull(debit_in_account_currency, 0)*exchange_rate != ifnull(debit, 0))
or (ifnull(credit_in_account_currency, 0)*exchange_rate != ifnull(credit, 0)))
order by parent
""", as_dict=True)
journal_entries = []
for d in je_rows:
if d.parent not in journal_entries:
journal_entries.append(d.parent)
is_advance_entry=None
if d.reference_type in ("Sales Invoice", "Purchase Invoice") and d.reference_name:
is_advance_entry = frappe.db.sql("""select name from `tab{0}`
where journal_entry=%s and jv_detail_no=%s
and ifnull(allocated_amount, 0) > 0 and docstatus=1"""
.format(d.reference_type + " Advance"), (d.parent, d.name))
if is_advance_entry or not (d.debit or d.credit):
frappe.db.sql("""
update `tabJournal Entry Account`
set debit=debit_in_account_currency*exchange_rate,
credit=credit_in_account_currency*exchange_rate
where name=%s""", d.name)
else:
frappe.db.sql("""
update `tabJournal Entry Account`
set debit_in_account_currency=debit/exchange_rate,
credit_in_account_currency=credit/exchange_rate
where name=%s""", d.name)
for d in journal_entries:
print d
# delete existing gle
frappe.db.sql("delete from `tabGL Entry` where voucher_type='Journal Entry' and voucher_no=%s", d)
# repost gl entries
je = frappe.get_doc("Journal Entry", d)
je.make_gl_entries() |
christianurich/VIBe2UrbanSim | refs/heads/master | 3rdparty/opus/src/urbansim/configurations/development_event_transition_model_configuration_creator.py | 2 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from numpy import array
from opus_core.configuration import Configuration
class DevelopmentEventTransitionModelConfigurationCreator(object):
_model_name = 'development_event_transition_model'
def __init__(self,
debuglevel = 'debuglevel',
input_projects = 'dptm_results',
output_events = 'development_events'):
self.debuglevel = debuglevel
self.input_projects = input_projects
self.output_events = output_events
def execute(self):
# Names of intermediate objects used to get data between steps
# in this model process.
_types = 'all_project_types'
_units = 'all_project_units'
return Configuration({
'import': {
'urbansim.models.%s' % self._model_name: 'DevelopmentEventTransitionModel'
},
'init': {'name': 'DevelopmentEventTransitionModel'},
'prepare_for_run': {
'arguments': {
'dev_projects': self.input_projects,
'model_configuration': 'model_configuration'
},
'name': 'prepare_for_run',
'output': '(%s, %s)' % (_types, _units)
},
'run': {
'arguments': {
'debuglevel': self.debuglevel,
'projects': self.input_projects,
'types': _types,
'units': _units,
'year': 'year'
},
'output': self.output_events,
}
})
from opus_core.tests import opus_unittest
class TestDevelopmentEventTransitionModelConfigurationCreator(opus_unittest.OpusTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_defaults(self):
creator = DevelopmentEventTransitionModelConfigurationCreator()
expected = Configuration({
'import': {
'urbansim.models.development_event_transition_model': 'DevelopmentEventTransitionModel'
},
'init': {'name': 'DevelopmentEventTransitionModel'},
'prepare_for_run': {
'arguments': {
'dev_projects': 'dptm_results',
'model_configuration': 'model_configuration'
},
'name': 'prepare_for_run',
'output': '(all_project_types, all_project_units)'
},
'run': {
'arguments': {
'debuglevel': 'debuglevel',
'projects': 'dptm_results',
'types': 'all_project_types',
'units': 'all_project_units',
'year': 'year'
},
'output': 'development_events'
}
})
result = creator.execute()
self.assertDictsEqual(result, expected)
def test_with_arguments(self):
creator = DevelopmentEventTransitionModelConfigurationCreator(
debuglevel = 9999,
output_events = 'output_events',
input_projects = 'input_projects',
)
expected = Configuration({
'import': {
'urbansim.models.development_event_transition_model': 'DevelopmentEventTransitionModel'
},
'init': {'name': 'DevelopmentEventTransitionModel'},
'prepare_for_run': {
'arguments': {
'dev_projects': 'input_projects',
'model_configuration': 'model_configuration'
},
'name': 'prepare_for_run',
'output': '(all_project_types, all_project_units)'
},
'run': {
'arguments': {
'debuglevel': 9999,
'projects': 'input_projects',
'types': 'all_project_types',
'units': 'all_project_units',
'year': 'year'
},
'output': 'output_events',
}
})
result = creator.execute()
self.assertDictsEqual(result, expected)
if __name__ == '__main__':
opus_unittest.main() |
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/mobile/shared_salt_mynock.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_salt_mynock.iff"
result.attribute_template_id = 9
result.stfName("monster_name","salt_mynock")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
bwsblake/lettercounter | refs/heads/master | django-norel-env/lib/python2.7/site-packages/django/conf/locale/en/__init__.py | 12133432 | |
gohin/django | refs/heads/master | django/conf/locale/ka/__init__.py | 12133432 | |
georgemarshall/django | refs/heads/master | tests/queryset_pickle/__init__.py | 12133432 | |
peterlauri/django | refs/heads/master | django/contrib/sitemaps/management/__init__.py | 12133432 | |
PatrickChrist/scikit-learn | refs/heads/master | sklearn/cluster/setup.py | 263 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
xuxiao19910803/edx | refs/heads/master | common/test/acceptance/tests/lms/test_library.py | 92 | # -*- coding: utf-8 -*-
"""
End-to-end tests for LibraryContent block in LMS
"""
import ddt
import textwrap
from nose.plugins.attrib import attr
from ..helpers import UniqueCourseTest, TestWithSearchIndexMixin
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.library import StudioLibraryContentEditor, StudioLibraryContainerXBlockWrapper
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.library import LibraryContentXBlockWrapper
from ...pages.common.logout import LogoutPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...fixtures.library import LibraryFixture
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
@attr('shard_3')
class LibraryContentTestBase(UniqueCourseTest):
""" Base class for library content block tests """
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
STAFF_USERNAME = "STAFF_TESTER"
STAFF_EMAIL = "staff101@example.com"
def populate_library_fixture(self, library_fixture):
"""
To be overwritten by subclassed tests. Used to install a library to
run tests on.
"""
def setUp(self):
"""
Set up library, course and library content XBlock
"""
super(LibraryContentTestBase, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.library_fixture = LibraryFixture('test_org', self.unique_id, 'Test Library {}'.format(self.unique_id))
self.populate_library_fixture(self.library_fixture)
self.library_fixture.install()
self.library_info = self.library_fixture.library_info
self.library_key = self.library_fixture.library_key
# Install a course with library content xblock
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
library_content_metadata = {
'source_library_id': unicode(self.library_key),
'mode': 'random',
'max_count': 1,
'has_score': False
}
self.lib_block = XBlockFixtureDesc('library_content', "Library Content", metadata=library_content_metadata)
self.course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
self.lib_block
)
)
)
)
self.course_fixture.install()
def _change_library_content_settings(self, count=1, capa_type=None):
"""
Performs library block refresh in Studio, configuring it to show {count} children
"""
unit_page = self._go_to_unit_page(True)
library_container_block = StudioLibraryContainerXBlockWrapper.from_xblock_wrapper(unit_page.xblocks[1])
library_container_block.edit()
editor = StudioLibraryContentEditor(self.browser, library_container_block.locator)
editor.count = count
if capa_type is not None:
editor.capa_type = capa_type
editor.save()
self._go_to_unit_page(change_login=False)
unit_page.wait_for_page()
unit_page.publish_action.click()
unit_page.wait_for_ajax()
self.assertIn("Published and Live", unit_page.publish_title)
@property
def library_xblocks_texts(self):
"""
Gets texts of all xblocks in library
"""
return frozenset(child.data for child in self.library_fixture.children)
def _go_to_unit_page(self, change_login=True):
"""
Open unit page in Studio
"""
if change_login:
LogoutPage(self.browser).visit()
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.course_outline.visit()
subsection = self.course_outline.section(SECTION_NAME).subsection(SUBSECTION_NAME)
return subsection.expand_subsection().unit(UNIT_NAME).go_to()
def _goto_library_block_page(self, block_id=None):
"""
Open library page in LMS
"""
self.courseware_page.visit()
paragraphs = self.courseware_page.q(css='.course-content p')
if paragraphs and "You were most recently in" in paragraphs.text[0]:
paragraphs[0].find_element_by_tag_name('a').click()
block_id = block_id if block_id is not None else self.lib_block.locator
#pylint: disable=attribute-defined-outside-init
self.library_content_page = LibraryContentXBlockWrapper(self.browser, block_id)
self.library_content_page.wait_for_page()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
@ddt.ddt
@attr('shard_3')
class LibraryContentTest(LibraryContentTestBase):
"""
Test courseware.
"""
def populate_library_fixture(self, library_fixture):
"""
Populates library fixture with XBlock Fixtures
"""
library_fixture.add_children(
XBlockFixtureDesc("html", "Html1", data='html1'),
XBlockFixtureDesc("html", "Html2", data='html2'),
XBlockFixtureDesc("html", "Html3", data='html3'),
)
@ddt.data(1, 2, 3)
def test_shows_random_xblocks_from_configured(self, count):
"""
Scenario: Ensures that library content shows {count} random xblocks from library in LMS
Given I have a library, a course and a LibraryContent block in that course
When I go to studio unit page for library content xblock as staff
And I set library content xblock to display {count} random children
And I refresh library content xblock and pulbish unit
When I go to LMS courseware page for library content xblock as student
Then I can see {count} random xblocks from the library
"""
self._change_library_content_settings(count=count)
self._auto_auth(self.USERNAME, self.EMAIL, False)
self._goto_library_block_page()
children_contents = self.library_content_page.children_contents
self.assertEqual(len(children_contents), count)
self.assertLessEqual(children_contents, self.library_xblocks_texts)
def test_shows_all_if_max_set_to_greater_value(self):
"""
Scenario: Ensures that library content shows {count} random xblocks from library in LMS
Given I have a library, a course and a LibraryContent block in that course
When I go to studio unit page for library content xblock as staff
And I set library content xblock to display more children than library have
And I refresh library content xblock and pulbish unit
When I go to LMS courseware page for library content xblock as student
Then I can see all xblocks from the library
"""
self._change_library_content_settings(count=10)
self._auto_auth(self.USERNAME, self.EMAIL, False)
self._goto_library_block_page()
children_contents = self.library_content_page.children_contents
self.assertEqual(len(children_contents), 3)
self.assertEqual(children_contents, self.library_xblocks_texts)
@ddt.ddt
@attr('shard_3')
class StudioLibraryContainerCapaFilterTest(LibraryContentTestBase, TestWithSearchIndexMixin):
"""
Test Library Content block in LMS
"""
def setUp(self):
""" SetUp method """
self._create_search_index()
super(StudioLibraryContainerCapaFilterTest, self).setUp()
def tearDown(self):
self._cleanup_index_file()
super(StudioLibraryContainerCapaFilterTest, self).tearDown()
def _get_problem_choice_group_text(self, name, items):
""" Generates Choice Group CAPA problem XML """
items_text = "\n".join([
"<choice correct='{correct}'>{item}</choice>".format(correct=correct, item=item)
for item, correct in items
])
return textwrap.dedent("""
<problem>
<p>{name}</p>
<multiplechoiceresponse>
<choicegroup label="{name}" type="MultipleChoice">{items}</choicegroup>
</multiplechoiceresponse>
</problem>""").format(name=name, items=items_text)
def _get_problem_select_text(self, name, items, correct):
""" Generates Select Option CAPA problem XML """
items_text = ",".join(["'{0}'".format(item) for item in items])
return textwrap.dedent("""
<problem>
<p>{name}</p>
<optionresponse>
<optioninput label="{name}" options="({options})" correct="{correct}"></optioninput>
</optionresponse>
</problem>""").format(name=name, options=items_text, correct=correct)
def populate_library_fixture(self, library_fixture):
"""
Populates library fixture with XBlock Fixtures
"""
items = (
XBlockFixtureDesc(
"problem", "Problem Choice Group 1",
data=self._get_problem_choice_group_text("Problem Choice Group 1 Text", [("1", False), ('2', True)])
),
XBlockFixtureDesc(
"problem", "Problem Choice Group 2",
data=self._get_problem_choice_group_text("Problem Choice Group 2 Text", [("Q", True), ('W', False)])
),
XBlockFixtureDesc(
"problem", "Problem Select 1",
data=self._get_problem_select_text("Problem Select 1 Text", ["Option 1", "Option 2"], "Option 1")
),
XBlockFixtureDesc(
"problem", "Problem Select 2",
data=self._get_problem_select_text("Problem Select 2 Text", ["Option 3", "Option 4"], "Option 4")
),
)
library_fixture.add_children(*items)
@property
def _problem_headers(self):
""" Expected XBLock headers according to populate_library_fixture """
return frozenset(child.display_name.upper() for child in self.library_fixture.children)
def _set_library_content_settings(self, count=1, capa_type="Any Type"):
"""
Sets library content XBlock parameters, saves, publishes unit, goes to LMS unit page and
gets children XBlock headers to assert against them
"""
self._change_library_content_settings(count=count, capa_type=capa_type)
self._auto_auth(self.USERNAME, self.EMAIL, False)
self._goto_library_block_page()
return self.library_content_page.children_headers
def test_problem_type_selector(self):
"""
Scenario: Ensure setting "Any Type" for Problem Type does not filter out Problems
Given I have a library with two "Select Option" and two "Choice Group" problems, and a course containing
LibraryContent XBlock configured to draw XBlocks from that library
When I set library content xblock Problem Type to "Any Type" and Count to 3 and publish unit
When I go to LMS courseware page for library content xblock as student
Then I can see 3 xblocks from the library of any type
When I set library content xblock Problem Type to "Choice Group" and Count to 1 and publish unit
When I go to LMS courseware page for library content xblock as student
Then I can see 1 xblock from the library of "Choice Group" type
When I set library content xblock Problem Type to "Select Option" and Count to 2 and publish unit
When I go to LMS courseware page for library content xblock as student
Then I can see 2 xblock from the library of "Select Option" type
When I set library content xblock Problem Type to "Matlab" and Count to 2 and publish unit
When I go to LMS courseware page for library content xblock as student
Then I can see 0 xblocks from the library
"""
children_headers = self._set_library_content_settings(count=3, capa_type="Any Type")
self.assertEqual(len(children_headers), 3)
self.assertLessEqual(children_headers, self._problem_headers)
# Choice group test
children_headers = self._set_library_content_settings(count=1, capa_type="Multiple Choice")
self.assertEqual(len(children_headers), 1)
self.assertLessEqual(
children_headers,
set([header.upper() for header in ["Problem Choice Group 1", "Problem Choice Group 2"]])
)
# Choice group test
children_headers = self._set_library_content_settings(count=2, capa_type="Dropdown")
self.assertEqual(len(children_headers), 2)
self.assertEqual(
children_headers,
set([header.upper() for header in ["Problem Select 1", "Problem Select 2"]])
)
# Missing problem type test
children_headers = self._set_library_content_settings(count=2, capa_type="Custom Evaluated Script")
self.assertEqual(children_headers, set())
|
40223211/cadpbtest-0420 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/_random.py | 518 | from browser import window
def _randint(a, b):
return int(window.Math.random()*(b-a+1)+a)
def _urandom(n):
"""urandom(n) -> str
Return n random bytes suitable for cryptographic use."""
randbytes= [_randint(0,255) for i in range(n)]
return bytes(randbytes)
class Random:
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
#random
#seed
#getstate
#setstate
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self._state=x
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
self._state=a
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self._state
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
self._state=state
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return window.Math.random()
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
|
cycotech/WAR-app | refs/heads/master | env/lib/python3.5/site-packages/setuptools/py27compat.py | 189 | """
Compatibility Support for Python 2.7 and earlier
"""
import platform
from setuptools.extern import six
def get_all_headers(message, key):
"""
Given an HTTPMessage, return all headers matching a given key.
"""
return message.get_all(key)
if six.PY2:
def get_all_headers(message, key):
return message.getheaders(key)
linux_py2_ascii = (
platform.system() == 'Linux' and
six.PY2
)
rmtree_safe = str if linux_py2_ascii else lambda x: x
"""Workaround for http://bugs.python.org/issue24672"""
|
Anonymous-X6/django | refs/heads/master | tests/template_tests/templatetags/__init__.py | 12133432 | |
cydenix/OpenGLCffi | refs/heads/master | OpenGLCffi/GL/EXT/GREMEDY/__init__.py | 12133432 | |
entomb/CouchPotatoServer | refs/heads/master | libs/migrate/versioning/templates/__init__.py | 12133432 | |
annacorobco/django-tastypie | refs/heads/master | tests/alphanumeric/api/__init__.py | 12133432 | |
nicholasbs/zulip | refs/heads/master | confirmation/management/commands/__init__.py | 12133432 | |
adafruit/Adafruit_Python_GPIO | refs/heads/master | Adafruit_GPIO/PCF8574.py | 1 | '''
Adafruit compatible using BaseGPIO class to represent a PCF8574/A IO expander
Copyright (C) 2015 Sylvan Butler
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.I2C as I2C
IN = GPIO.IN
OUT = GPIO.OUT
HIGH = GPIO.HIGH
LOW = GPIO.LOW
class PCF8574(GPIO.BaseGPIO):
"""Class to represent a PCF8574 or PCF8574A GPIO extender. Compatible
with the Adafruit_GPIO BaseGPIO class so it can be used as a custom GPIO
class for interacting with device.
"""
NUM_GPIO = 8
def __init__(self, address=0x27, busnum=None, i2c=None, **kwargs):
address = int(address)
self.__name__ = \
"PCF8574" if address in range(0x20, 0x28) else \
"PCF8574A" if address in range(0x38, 0x40) else \
"Bad address for PCF8574(A): 0x%02X not in range [0x20..0x27, 0x38..0x3F]" % address
if self.__name__[0] != 'P':
raise ValueError(self.__name__)
# Create I2C device.
i2c = i2c or I2C
busnum = busnum or i2c.get_default_bus()
self._device = i2c.get_i2c_device(address, busnum, **kwargs)
# Buffer register values so they can be changed without reading.
self.iodir = 0xFF # Default direction to all inputs is in
self.gpio = 0x00
self._write_pins()
def _write_pins(self):
self._device.writeRaw8(self.gpio | self.iodir)
def _read_pins(self):
return self._device.readRaw8() & self.iodir
def setup(self, pin, mode):
self.setup_pins({pin: mode})
def setup_pins(self, pins):
if False in [y for x,y in [(self._validate_pin(pin),mode in (IN,OUT)) for pin,mode in pins.items()]]:
raise ValueError('Invalid MODE, IN or OUT')
for pin,mode in pins.items():
self.iodir = self._bit2(self.iodir, pin, mode)
self._write_pins()
def output(self, pin, value):
self.output_pins({pin: value})
def output_pins(self, pins):
[self._validate_pin(pin) for pin in pins.keys()]
for pin,value in pins.items():
self.gpio = self._bit2(self.gpio, pin, bool(value))
self._write_pins()
def input(self, pin):
return self.input_pins([pin])[0]
def input_pins(self, pins):
[self._validate_pin(pin) for pin in pins]
inp = self._read_pins()
return [bool(inp & (1<<pin)) for pin in pins]
|
nkgilley/home-assistant | refs/heads/dev | homeassistant/components/influxdb/sensor.py | 2 | """InfluxDB component which allows you to get data from an Influx database."""
import logging
from typing import Dict
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA as SENSOR_PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_VERSION,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_STOP,
STATE_UNKNOWN,
)
from homeassistant.exceptions import PlatformNotReady, TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from . import create_influx_url, get_influx_connection, validate_version_specific_config
from .const import (
API_VERSION_2,
COMPONENT_CONFIG_SCHEMA_CONNECTION,
CONF_BUCKET,
CONF_DB_NAME,
CONF_FIELD,
CONF_GROUP_FUNCTION,
CONF_IMPORTS,
CONF_LANGUAGE,
CONF_MEASUREMENT_NAME,
CONF_QUERIES,
CONF_QUERIES_FLUX,
CONF_QUERY,
CONF_RANGE_START,
CONF_RANGE_STOP,
CONF_WHERE,
DEFAULT_API_VERSION,
DEFAULT_FIELD,
DEFAULT_FUNCTION_FLUX,
DEFAULT_GROUP_FUNCTION,
DEFAULT_RANGE_START,
DEFAULT_RANGE_STOP,
INFLUX_CONF_VALUE,
INFLUX_CONF_VALUE_V2,
LANGUAGE_FLUX,
LANGUAGE_INFLUXQL,
MIN_TIME_BETWEEN_UPDATES,
QUERY_MULTIPLE_RESULTS_MESSAGE,
QUERY_NO_RESULTS_MESSAGE,
RENDERING_QUERY_ERROR_MESSAGE,
RENDERING_QUERY_MESSAGE,
RENDERING_WHERE_ERROR_MESSAGE,
RENDERING_WHERE_MESSAGE,
RUNNING_QUERY_MESSAGE,
)
_LOGGER = logging.getLogger(__name__)
def _merge_connection_config_into_query(conf, query):
"""Merge connection details into each configured query."""
for key in conf:
if key not in query and key not in [CONF_QUERIES, CONF_QUERIES_FLUX]:
query[key] = conf[key]
def validate_query_format_for_version(conf: Dict) -> Dict:
"""Ensure queries are provided in correct format based on API version."""
if conf[CONF_API_VERSION] == API_VERSION_2:
if CONF_QUERIES_FLUX not in conf:
raise vol.Invalid(
f"{CONF_QUERIES_FLUX} is required when {CONF_API_VERSION} is {API_VERSION_2}"
)
for query in conf[CONF_QUERIES_FLUX]:
_merge_connection_config_into_query(conf, query)
query[CONF_LANGUAGE] = LANGUAGE_FLUX
del conf[CONF_BUCKET]
else:
if CONF_QUERIES not in conf:
raise vol.Invalid(
f"{CONF_QUERIES} is required when {CONF_API_VERSION} is {DEFAULT_API_VERSION}"
)
for query in conf[CONF_QUERIES]:
_merge_connection_config_into_query(conf, query)
query[CONF_LANGUAGE] = LANGUAGE_INFLUXQL
del conf[CONF_DB_NAME]
return conf
_QUERY_SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
)
_QUERY_SCHEMA = {
LANGUAGE_INFLUXQL: _QUERY_SENSOR_SCHEMA.extend(
{
vol.Optional(CONF_DB_NAME): cv.string,
vol.Required(CONF_MEASUREMENT_NAME): cv.string,
vol.Optional(
CONF_GROUP_FUNCTION, default=DEFAULT_GROUP_FUNCTION
): cv.string,
vol.Optional(CONF_FIELD, default=DEFAULT_FIELD): cv.string,
vol.Required(CONF_WHERE): cv.template,
}
),
LANGUAGE_FLUX: _QUERY_SENSOR_SCHEMA.extend(
{
vol.Optional(CONF_BUCKET): cv.string,
vol.Optional(CONF_RANGE_START, default=DEFAULT_RANGE_START): cv.string,
vol.Optional(CONF_RANGE_STOP, default=DEFAULT_RANGE_STOP): cv.string,
vol.Required(CONF_QUERY): cv.template,
vol.Optional(CONF_IMPORTS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_GROUP_FUNCTION): cv.string,
}
),
}
PLATFORM_SCHEMA = vol.All(
SENSOR_PLATFORM_SCHEMA.extend(COMPONENT_CONFIG_SCHEMA_CONNECTION).extend(
{
vol.Exclusive(CONF_QUERIES, "queries"): [_QUERY_SCHEMA[LANGUAGE_INFLUXQL]],
vol.Exclusive(CONF_QUERIES_FLUX, "queries"): [_QUERY_SCHEMA[LANGUAGE_FLUX]],
}
),
validate_version_specific_config,
validate_query_format_for_version,
create_influx_url,
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the InfluxDB component."""
try:
influx = get_influx_connection(config, test_read=True)
except ConnectionError as exc:
_LOGGER.error(exc)
raise PlatformNotReady()
queries = config[CONF_QUERIES_FLUX if CONF_QUERIES_FLUX in config else CONF_QUERIES]
entities = [InfluxSensor(hass, influx, query) for query in queries]
add_entities(entities, update_before_add=True)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lambda _: influx.close())
class InfluxSensor(Entity):
"""Implementation of a Influxdb sensor."""
def __init__(self, hass, influx, query):
"""Initialize the sensor."""
self._name = query.get(CONF_NAME)
self._unit_of_measurement = query.get(CONF_UNIT_OF_MEASUREMENT)
value_template = query.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
self._value_template = value_template
self._value_template.hass = hass
else:
self._value_template = None
self._state = None
self._hass = hass
if query[CONF_LANGUAGE] == LANGUAGE_FLUX:
query_clause = query.get(CONF_QUERY)
query_clause.hass = hass
self.data = InfluxFluxSensorData(
influx,
query.get(CONF_BUCKET),
query.get(CONF_RANGE_START),
query.get(CONF_RANGE_STOP),
query_clause,
query.get(CONF_IMPORTS),
query.get(CONF_GROUP_FUNCTION),
)
else:
where_clause = query.get(CONF_WHERE)
where_clause.hass = hass
self.data = InfluxQLSensorData(
influx,
query.get(CONF_DB_NAME),
query.get(CONF_GROUP_FUNCTION),
query.get(CONF_FIELD),
query.get(CONF_MEASUREMENT_NAME),
where_clause,
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return True
def update(self):
"""Get the latest data from Influxdb and updates the states."""
self.data.update()
value = self.data.value
if value is None:
value = STATE_UNKNOWN
if self._value_template is not None:
value = self._value_template.render_with_possible_json_value(
str(value), STATE_UNKNOWN
)
self._state = value
class InfluxFluxSensorData:
"""Class for handling the data retrieval from Influx with Flux query."""
def __init__(self, influx, bucket, range_start, range_stop, query, imports, group):
"""Initialize the data object."""
self.influx = influx
self.bucket = bucket
self.range_start = range_start
self.range_stop = range_stop
self.query = query
self.imports = imports
self.group = group
self.value = None
self.full_query = None
self.query_prefix = f'from(bucket:"{bucket}") |> range(start: {range_start}, stop: {range_stop}) |>'
if imports is not None:
for i in imports:
self.query_prefix = f'import "{i}" {self.query_prefix}'
if group is None:
self.query_postfix = DEFAULT_FUNCTION_FLUX
else:
self.query_postfix = f'|> {group}(column: "{INFLUX_CONF_VALUE_V2}")'
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data by querying influx."""
_LOGGER.debug(RENDERING_QUERY_MESSAGE, self.query)
try:
rendered_query = self.query.render()
except TemplateError as ex:
_LOGGER.error(RENDERING_QUERY_ERROR_MESSAGE, ex)
return
self.full_query = f"{self.query_prefix} {rendered_query} {self.query_postfix}"
_LOGGER.debug(RUNNING_QUERY_MESSAGE, self.full_query)
try:
tables = self.influx.query(self.full_query)
except (ConnectionError, ValueError) as exc:
_LOGGER.error(exc)
self.value = None
return
if not tables:
_LOGGER.warning(QUERY_NO_RESULTS_MESSAGE, self.full_query)
self.value = None
else:
if len(tables) > 1 or len(tables[0].records) > 1:
_LOGGER.warning(QUERY_MULTIPLE_RESULTS_MESSAGE, self.full_query)
self.value = tables[0].records[0].values[INFLUX_CONF_VALUE_V2]
class InfluxQLSensorData:
"""Class for handling the data retrieval with v1 API."""
def __init__(self, influx, db_name, group, field, measurement, where):
"""Initialize the data object."""
self.influx = influx
self.db_name = db_name
self.group = group
self.field = field
self.measurement = measurement
self.where = where
self.value = None
self.query = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data with a shell command."""
_LOGGER.debug(RENDERING_WHERE_MESSAGE, self.where)
try:
where_clause = self.where.render()
except TemplateError as ex:
_LOGGER.error(RENDERING_WHERE_ERROR_MESSAGE, ex)
return
self.query = f"select {self.group}({self.field}) as {INFLUX_CONF_VALUE} from {self.measurement} where {where_clause}"
_LOGGER.debug(RUNNING_QUERY_MESSAGE, self.query)
try:
points = self.influx.query(self.query, self.db_name)
except (ConnectionError, ValueError) as exc:
_LOGGER.error(exc)
self.value = None
return
if not points:
_LOGGER.warning(QUERY_NO_RESULTS_MESSAGE, self.query)
self.value = None
else:
if len(points) > 1:
_LOGGER.warning(QUERY_MULTIPLE_RESULTS_MESSAGE, self.query)
self.value = points[0].get(INFLUX_CONF_VALUE)
|
mesonbuild/meson | refs/heads/master | mesonbuild/__init__.py | 12133432 | |
fengxuangit/blog-api-view | refs/heads/master | src/accounts/migrations/__init__.py | 12133432 | |
sdcooke/django | refs/heads/master | tests/unmanaged_models/__init__.py | 12133432 | |
hyperized/ansible | refs/heads/devel | lib/ansible/modules/cloud/centurylink/__init__.py | 12133432 | |
iAMr00t/opencog | refs/heads/master | opencog/python/pln_old/rules/inheritance_rules.py | 31 | from opencog.atomspace import types, TruthValue, get_type_name
import formulas
from pln.rule import Rule
'''
Some Rules evaluate various kinds of logical links based explicitly on
set membership. A set = a ConceptNode. Other Rules calculate them
heuristically, based on set probabilities and logical links.
'''
# Todo: try to separate these rules further into several files by
# category. The rules in this file were under the header 'inheritance
# rules' in rules.py, but may need to be further classified.
__VERBOSE__ = False
BOOLEAN_LINKS = [types.AndLink,
types.OrLink,
types.NotLink]
FIRST_ORDER_LINKS = [types.InheritanceLink,
types.SubsetLink,
types.IntensionalInheritanceLink,
types.SimilarityLink,
types.ExtensionalSimilarityLink,
types.IntensionalSimilarityLink]
HIGHER_ORDER_LINKS = [types.ImplicationLink,
types.EquivalenceLink]
class InversionRule(Rule):
"""
A->B entails B->A
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name = "InversionRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [B, A])],
inputs=[chainer.link(link_type, [A, B]), A, B],
formula=formulas.inversionFormula)
class DeductionRule(Rule):
"""
A->B, B->C entails A->C
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
Rule.__init__(self,
name = "DeductionRule<%s>"%(get_type_name(link_type),),
formula=formulas.deductionIndependenceBasedFormula,
outputs=[chainer.link(link_type, [A, C])],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [B, C]),
B,
C])
# Todo: It doesn't have the right formula
class DeductionGeometryRule(Rule):
"""
A->B, B->C entails A->C. Uses concept geometry.
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
Rule.__init__(self,
name="DeductionGeometryRule<%s>"%(get_type_name(link_type),),
formula=formulas.deductionGeometryFormula,
outputs=[chainer.link(link_type, [A, C])],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [B, C])])
# TODO add macro-rules for Abduction and Induction based on Deduction
# and Inversion
'''
deduction
S is M, M is L, then S is L
induction
M is S, M is L, then S is L
invert same same
abduction
S is M, L is M, then S is L
invert
'''
class InductionRule(Rule):
"""
M->S, M->L, S->L
"""
def __init__(self, chainer, link_type):
S = chainer.new_variable()
M = chainer.new_variable()
L = chainer.new_variable()
Rule.__init__(self,
name="InductionRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [S, L])],
inputs=[chainer.link(link_type, [M, S]),
chainer.link(link_type, [M, L]), S, M, L],
formula=formulas.inductionFormula)
class AbductionRule(Rule):
"""
S is M, L is M, S->L
"""
def __init__(self, chainer, link_type):
S = chainer.new_variable()
M = chainer.new_variable()
L = chainer.new_variable()
Rule.__init__(self,
name="AbductionRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [S, L])],
inputs=[chainer.link(link_type, [S, M]),
chainer.link(link_type, [L, M]), S, M, L],
formula=formulas.abductionFormula)
class TransitiveSimilarityRule(Rule):
"""
Similarity A B, Similarity B C => Similarity A C
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
Rule.__init__(self,
name="TransitiveSimilarityRule<%s>"%(get_type_name(link_type),),
formula=formulas.transitiveSimilarityFormula,
outputs=[chainer.link(link_type, [A, C])],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [B, C]),
A, B, C])
class PreciseModusPonensRule(Rule):
"""
Given P(A->B) and P(NOT(A)->B) and sA, estimate sB
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
notA = chainer.link(types.NotLink, [A])
Rule.__init__(self,
name="PreciseModusPonensRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [notA, B]),
A],
formula=formulas.preciseModusPonensFormula)
class ModusPonensRule(Rule):
"""
Given P(A->B) and sA, estimate sB
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="ModusPonensRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[chainer.link(link_type, [A, B]),
A],
formula=formulas.modusPonensFormula)
class SymmetricModusPonensRule(Rule):
"""
Given (Similarity A B) and sA, estimate sB
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="SymmetricModusPonensRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[chainer.link(link_type, [A, B]),
A],
formula=formulas.symmetricModusPonensFormula)
class TermProbabilityRule(Rule):
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
AB = chainer.link(link_type, [A, B])
BA = chainer.link(link_type, [B, A])
Rule.__init__(self,
name="TermProbabilityRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[AB, BA, A],
formula=formulas.termProbabilityFormula)
class InheritanceRule(Rule):
"""
Create a (mixed) InheritanceLink based on the SubsetLink and
IntensionalInheritanceLink (based on the definition of mixed
InheritanceLinks)
"""
def __init__(self, chainer):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
outputs=[chainer.link(types.InheritanceLink, [A, B])],
inputs=[chainer.link(types.SubsetLink, [A, B]),
chainer.link(types.IntensionalInheritanceLink,
[A, B])],
formula=formulas.inheritanceFormula)
class SimilarityRule(Rule):
"""
SimilarityLink A B
|A and B| / |A or B|
"""
def __init__(self, chainer):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
outputs=[chainer.link(types.SimilarityLink, [A, B])],
inputs=[chainer.link(types.AndLink, [A, B]),
chainer.link(types.OrLink, [A, B])],
formula=formulas.extensionalSimilarityFormula)
class SubsetRule1(Rule):
"""
SubsetLink A B
|A and B| / |A|
= P(B|A)
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="SubsetRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [A, B])],
inputs=[chainer.link(types.AndLink, [A, B]),
A],
formula=formulas.subsetFormula)
class AndToSubsetRule1(Rule):
"""
SubsetLink A B
|A and B| / |A|
= P(B|A)
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="AndToSubsetRule1<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [A, B])],
inputs=[chainer.link(types.AndLink, [A, B]),
A],
formula=formulas.subsetFormula)
class AndToSubsetRuleN(Rule):
"""
SubsetLink And(A B C) D
|And(A B C D)| / |And A B C|
= P(B|A)
"""
def __init__(self, chainer, link_type, N):
vars = chainer.make_n_variables(N)
lhs = chainer.link(types.AndLink, vars[:-1])
rhs = vars[-1]
Rule.__init__(self,
name="AndToSubsetRuleN<%s,%s>"%(get_type_name(link_type),N),
outputs=[chainer.link(link_type, [lhs, rhs])],
inputs=[chainer.link(types.AndLink, vars),
lhs],
formula=formulas.subsetFormula)
class AndAs1stArgInsideLinkRule(Rule):
"""
ANDLink
InheritanceLink A C
InheritanceLink B C
|-
InheritanceLink
ANDLink A B
C
Created to create AndLinks inside InheritanceLinks (original use case:
context rules); could be useful for other link types as well
@see: https://github.com/opencog/opencog/pull/904
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
AndAB = chainer.link(types.AndLink, [A, B])
Rule.__init__(self,
name="AndAs1stArgInsideLinkRule<%s>"
%(get_type_name(link_type)),
inputs=[C, chainer.link(link_type, [A, C]),
chainer.link(link_type, [B, C]), A, B],
outputs=[chainer.link(link_type, [AndAB, C]),
AndAB],
formula=formulas.andAs1stArgInsideLinkFormula)
class AndAs2ndArgInsideLinkRule(Rule):
"""
ANDLink
InheritanceLink A B
InheritanceLink A C
|-
InheritanceLink
A
ANDLink B C
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
AndBC = chainer.link(types.AndLink, [B, C])
Rule.__init__(self,
name="AndAs2ndArgInsideLinkRule<%s>"
%(get_type_name(link_type)),
inputs=[chainer.link(types.InheritanceLink, [A, B]),
chainer.link(types.InheritanceLink, [A, C]),
A, B, C],
outputs=[chainer.link(types.InheritanceLink, [A, AndBC]),
AndBC],
formula=formulas.andAs2ndArgInsideLinkFormula)
|
cntnboys/410Lab6 | refs/heads/master | build/django/tests/utils_tests/test_os_utils.py | 52 | import os
import unittest
from django.utils._os import safe_join
class SafeJoinTests(unittest.TestCase):
def test_base_path_ends_with_sep(self):
drive, path = os.path.splitdrive(safe_join("/abc/", "abc"))
self.assertEqual(
path,
"{0}abc{0}abc".format(os.path.sep)
)
def test_root_path(self):
drive, path = os.path.splitdrive(safe_join("/", "path"))
self.assertEqual(
path,
"{0}path".format(os.path.sep),
)
drive, path = os.path.splitdrive(safe_join("/", ""))
self.assertEqual(
path,
os.path.sep,
)
|
drawks/ansible | refs/heads/devel | lib/ansible/plugins/terminal/enos.py | 101 | # (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Contains terminal Plugin methods for ENOS Config Module
# Lenovo Networking
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
re.compile(br">[\r\n]?")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
]
def on_open_shell(self):
try:
for cmd in (b'\n', b'terminal-length 0\n'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text
# on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]?password: $",
errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd),
errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
msg = 'unable to elevate privilege to enable mode'
raise AnsibleConnectionFailure(msg)
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
|
chhao91/pysal | refs/heads/master | pysal/test_NameSpace.py | 15 | import os
import unittest
import pysal
class TestNameSpace(unittest.TestCase):
"""
This test makes sure we don't remove anything from the pysal NameSpace that
1.0 users might expect to be there. 1.0 Namespace was taken from the 1.1
Code sprint wave, with special names removes (__all__, etc)
"""
def test_contents(self):
namespace_v1_0 = ['Box_Plot', 'DistanceBand', 'Equal_Interval',
'Fisher_Jenks', 'Geary', 'Jenks_Caspall',
'Jenks_Caspall_Forced', 'Jenks_Caspall_Sampled',
'Join_Counts', 'K_classifiers', 'Kernel',
'LISA_Markov', 'Markov', 'Max_P_Classifier',
'Maximum_Breaks', 'Maxp', 'Maxp_LISA', 'Moran',
'Moran_BV', 'Moran_BV_matrix', 'Moran_Local',
'Natural_Breaks', 'Percentiles', 'Quantiles',
'SpatialTau', 'Spatial_Markov', 'Std_Mean', 'Theil',
'TheilD', 'TheilDSim', 'Theta', 'User_Defined', 'W', 'adaptive_kernelW',
'adaptive_kernelW_from_shapefile', 'bin', 'bin1d',
'binC', 'buildContiguity', 'cg', 'comb', 'common',
'core', 'directional', 'ergodic', 'esda', 'full',
'gadf', 'higher_order', 'inequality', 'kernelW',
'kernelW_from_shapefile', 'knnW', 'knnW_from_array',
'knnW_from_shapefile', 'lag_spatial', 'lat2W',
'min_threshold_dist_from_shapefile', 'open',
'order', 'quantile', 'queen_from_shapefile',
'block_weights', 'region', 'remap_ids',
'rook_from_shapefile', 'shimbel', 'spatial_dynamics',
'threshold_binaryW_from_array', 'threshold_binaryW_from_shapefile',
'threshold_continuousW_from_array', 'threshold_continuousW_from_shapefile',
'version', 'w_difference', 'w_intersection', 'w_subset',
'w_symmetric_difference', 'w_union', 'weights']
current_namespace = dir(pysal)
for item in namespace_v1_0:
self.assertTrue(item in current_namespace)
for item in current_namespace:
if item not in namespace_v1_0 and not item.startswith('__'):
print item, "added to name space"
suite = unittest.TestLoader().loadTestsFromTestCase(TestNameSpace)
if __name__ == '__main__':
unittest.main()
runner = unittest.TextTestRunner()
runner.run(suite)
|
EntityFXCode/arsenalsuite | refs/heads/master | cpp/lib/PyQt4/pyuic/uic/Loader/__init__.py | 32 | #############################################################################
##
## Copyright (c) 2012 Riverbank Computing Limited <info@riverbankcomputing.com>
##
## This file is part of PyQt.
##
## This file may be used under the terms of the GNU General Public
## License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3
## included in the packaging of this file. Alternatively you may (at
## your option) use any later version of the GNU General Public
## License if such license has been publicly approved by Riverbank
## Computing Limited (or its successors, if any) and the KDE Free Qt
## Foundation. In addition, as a special exception, Riverbank gives you
## certain additional rights. These rights are described in the Riverbank
## GPL Exception version 1.1, which can be found in the file
## GPL_EXCEPTION.txt in this package.
##
## If you are unsure which license is appropriate for your use, please
## contact the sales department at sales@riverbankcomputing.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
|
philanthropy-u/edx-platform | refs/heads/master | common/djangoapps/track/management/commands/tracked_dummy_command.py | 16 | """
Command used for testing TrackedCommands
"""
import json
from eventtracking import tracker as eventtracker
from track.management.tracked_command import TrackedCommand
class Command(TrackedCommand):
"""A locally-defined command, for testing, that returns the current context as a JSON string."""
def add_arguments(self, parser):
parser.add_argument('dummy_arg')
parser.add_argument('--key1')
parser.add_argument('--key2')
def handle(self, *args, **options):
return json.dumps(eventtracker.get_tracker().resolve_context())
|
darcamo/python-crash-course | refs/heads/gh-pages | node_modules/node-gyp/gyp/pylib/gyp/generator/android.py | 193 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
# Map of android build system variables to set.
'aosp_build_settings',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
ALL_MODULES_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target, sdk_version):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
sdk_version: what to emit for LOCAL_SDK_VERSION in output
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
elif sdk_version > 0:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version)
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
raise gyp.common.GypError(
'Action input filename "%s" in target %s contains a space' %
(input, self.target))
for output in outputs:
if not output.startswith('$(') and ' ' in output:
raise gyp.common.GypError(
'Action output filename "%s" in target %s contains a space' %
(output, self.target))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn()
self.WriteLn()
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
# Android uses separate flags for assembly file invocations, but gyp expects
# the same CFLAGS to be applied:
self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'
else:
suffix = '_gyp'
if self.path:
middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target))
else:
middle = make.StringToMakefileVariable(self.target)
return ''.join([prefix, middle, suffix])
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = ('$(call intermediates-dir-for,%s,%s,true,,'
'$(GYP_HOST_VAR_PREFIX))' % (self.android_class,
self.android_module))
else:
path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'
% (self.android_class, self.android_module))
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def FilterLibraries(self, libraries):
"""Filter the 'libraries' key to separate things that shouldn't be ldflags.
Library entries that look like filenames should be converted to android
module names instead of being passed to the linker as flags.
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules, ldflags)
"""
static_lib_modules = []
dynamic_lib_modules = []
ldflags = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
if lib.startswith('-l'):
ldflags.append(lib)
return (static_lib_modules, dynamic_lib_modules, ldflags)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
# Libraries (i.e. -lfoo)
# These must be included even for static libraries as some of them provide
# implicit include paths through the build system.
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries)
if self.type != 'static_library':
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS')
self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) '
'$(LOCAL_GYP_LIBS)')
# Link dependencies (i.e. other gyp targets this target depends on)
# These need not be included for static libraries as within the gyp build
# we do not use the implicit include path mechanism.
if self.type != 'static_library':
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
else:
static_link_deps = []
shared_link_deps = []
# Only write the lists if they are non-empty.
if static_libs or static_link_deps:
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
if dynamic_libs or shared_link_deps:
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all,
write_alias_target):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
settings = spec.get('aosp_build_settings', {})
if settings:
self.WriteLn('### Set directly by aosp_build_settings.')
for k, v in settings.iteritems():
if isinstance(v, list):
self.WriteList(v, k)
else:
self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v)))
self.WriteLn('')
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all and write_alias_target:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module and write_alias_target:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
# Executables are for build and test purposes only, so they're installed
# to a directory that doesn't get included in the system image.
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
if self.toolset == 'target':
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)')
else:
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
self.WriteLn()
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return os.path.normpath(path)
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
write_alias_targets = generator_flags.get('write_alias_targets', True)
sdk_version = generator_flags.get('aosp_sdk_version', 0)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = qualified_target in needed_targets
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all,
write_alias_target=write_alias_targets,
sdk_version=sdk_version)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
root_makefile.write('GYP_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_MULTILIB ?=\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if write_alias_targets:
root_makefile.write(ALL_MODULES_FOOTER)
root_makefile.close()
|
manasapte/pants | refs/heads/master | tests/python/pants_test/ivy/test_bootstrapper.py | 18 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from pants.ivy.bootstrapper import Bootstrapper
from pants.ivy.ivy_subsystem import IvySubsystem
from pants_test.subsystem.subsystem_util import subsystem_instance
class BootstrapperTest(unittest.TestCase):
def test_simple(self):
with subsystem_instance(IvySubsystem) as ivy_subsystem:
bootstrapper = Bootstrapper(ivy_subsystem=ivy_subsystem)
ivy = bootstrapper.ivy()
self.assertIsNotNone(ivy.ivy_cache_dir)
self.assertIsNone(ivy.ivy_settings)
bootstrap_jar_path = os.path.join(ivy_subsystem.get_options().pants_bootstrapdir,
'tools', 'jvm', 'ivy', 'bootstrap.jar')
self.assertTrue(os.path.exists(bootstrap_jar_path))
def test_reset(self):
with subsystem_instance(IvySubsystem):
bootstrapper1 = Bootstrapper.instance()
Bootstrapper.reset_instance()
bootstrapper2 = Bootstrapper.instance()
self.assertIsNot(bootstrapper1, bootstrapper2)
def test_default_ivy(self):
with subsystem_instance(IvySubsystem):
ivy = Bootstrapper.default_ivy()
self.assertIsNotNone(ivy.ivy_cache_dir)
self.assertIsNone(ivy.ivy_settings)
|
jzoldak/edx-platform | refs/heads/master | pavelib/paver_tests/test_prereqs.py | 9 | """
Tests covering the Open edX Paver prequisites installation workflow
"""
import os
import unittest
from mock import call, patch
from paver.easy import BuildFailure
from pavelib.prereqs import no_prereq_install, node_prereqs_installation
from pavelib.paver_tests.utils import (
PaverTestCase, unexpected_fail_on_npm_install, fail_on_npm_install
)
class TestPaverPrereqInstall(unittest.TestCase):
"""
Test the status of the NO_PREREQ_INSTALL variable, its presence and how
paver handles it.
"""
def check_val(self, set_val, expected_val):
"""
Verify that setting the variable to a certain value returns
the expected boolean for it.
As environment variables are only stored as strings, we have to cast
whatever it's set at to a boolean that does not violate expectations.
"""
_orig_environ = dict(os.environ)
os.environ['NO_PREREQ_INSTALL'] = set_val
self.assertEqual(
no_prereq_install(),
expected_val,
'NO_PREREQ_INSTALL is set to {}, but we read it as {}'.format(
set_val, expected_val),
)
# Reset Environment back to original state
os.environ.clear()
os.environ.update(_orig_environ)
def test_no_prereq_install_true_lowercase(self):
"""
Ensure that 'true' will be True.
"""
self.check_val('true', True)
def test_no_prereq_install_false_lowercase(self):
"""
Ensure that 'false' will be False.
"""
self.check_val('false', False)
def test_no_prereq_install_true(self):
"""
Ensure that 'True' will be True.
"""
self.check_val('True', True)
def test_no_prereq_install_false(self):
"""
Ensure that 'False' will be False.
"""
self.check_val('False', False)
def test_no_prereq_install_0(self):
"""
Ensure that '0' will be False.
"""
self.check_val('0', False)
def test_no_prereq_install_1(self):
"""
Ensure that '1' will be True.
"""
self.check_val('1', True)
class TestPaverNodeInstall(PaverTestCase):
"""
Test node install logic
"""
def setUp(self):
super(TestPaverNodeInstall, self).setUp()
# Ensure prereqs will be run
os.environ['NO_PREREQ_INSTALL'] = 'false'
patcher = patch('pavelib.prereqs.sh', return_value=True)
self._mock_paver_sh = patcher.start()
self.addCleanup(patcher.stop)
def test_npm_install_with_subprocess_error(self):
"""
An exit with subprocess exit 1 is what paver receives when there is
an npm install error ("cb() never called!"). Test that we can handle
this kind of failure. For more info see TE-1767.
"""
self._mock_paver_sh.side_effect = fail_on_npm_install
with self.assertRaises(BuildFailure):
node_prereqs_installation()
actual_calls = self._mock_paver_sh.mock_calls
# npm install will be called twice
self.assertEqual(actual_calls.count(call('npm install')), 2)
def test_npm_install_called_once_when_successful(self):
"""
Vanilla npm install should only be calling npm install one time
"""
node_prereqs_installation()
actual_calls = self._mock_paver_sh.mock_calls
# when there's no failure, npm install is only called once
self.assertEqual(actual_calls.count(call('npm install')), 1)
def test_npm_install_with_unexpected_subprocess_error(self):
"""
If there's some other error, only call npm install once, and raise a failure
"""
self._mock_paver_sh.side_effect = unexpected_fail_on_npm_install
with self.assertRaises(BuildFailure):
node_prereqs_installation()
actual_calls = self._mock_paver_sh.mock_calls
self.assertEqual(actual_calls.count(call('npm install')), 1)
|
hyperized/ansible | refs/heads/devel | lib/ansible/modules/storage/vexata/vexata_eg.py | 25 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vexata_eg
version_added: 2.9
short_description: Manage export groups on Vexata VX100 storage arrays
description:
- Create or delete export groups on a Vexata VX100 array.
- An export group is a tuple of a volume group, initiator group and port
group that allows a set of volumes to be exposed to one or more hosts
through specific array ports.
author:
- Sandeep Kasargod (@vexata)
options:
name:
description:
- Export group name.
required: true
type: str
state:
description:
- Creates export group when present or delete when absent.
default: present
choices: [ present, absent ]
type: str
vg:
description:
- Volume group name.
type: str
ig:
description:
- Initiator group name.
type: str
pg:
description:
- Port group name.
type: str
extends_documentation_fragment:
- vexata.vx100
'''
EXAMPLES = r'''
- name: Create export group named db_export.
vexata_eg:
name: db_export
vg: dbvols
ig: dbhosts
pg: pg1
state: present
array: vx100_ultra.test.com
user: admin
password: secret
- name: Delete export group named db_export
vexata_eg:
name: db_export
state: absent
array: vx100_ultra.test.com
user: admin
password: secret
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vexata import (
argument_spec, get_array, required_together)
def get_eg(module, array):
"""Retrieve a named vg if it exists, None if absent."""
name = module.params['name']
try:
egs = array.list_egs()
eg = filter(lambda eg: eg['name'] == name, egs)
if len(eg) == 1:
return eg[0]
else:
return None
except Exception:
module.fail_json(msg='Error while attempting to retrieve export groups.')
def get_vg_id(module, array):
"""Retrieve a named vg's id if it exists, error if absent."""
name = module.params['vg']
try:
vgs = array.list_vgs()
vg = filter(lambda vg: vg['name'] == name, vgs)
if len(vg) == 1:
return vg[0]['id']
else:
module.fail_json(msg='Volume group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve volume groups.')
def get_ig_id(module, array):
"""Retrieve a named ig's id if it exists, error if absent."""
name = module.params['ig']
try:
igs = array.list_igs()
ig = filter(lambda ig: ig['name'] == name, igs)
if len(ig) == 1:
return ig[0]['id']
else:
module.fail_json(msg='Initiator group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve initiator groups.')
def get_pg_id(module, array):
"""Retrieve a named pg's id if it exists, error if absent."""
name = module.params['pg']
try:
pgs = array.list_pgs()
pg = filter(lambda pg: pg['name'] == name, pgs)
if len(pg) == 1:
return pg[0]['id']
else:
module.fail_json(msg='Port group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve port groups.')
def create_eg(module, array):
""""Create a new export group."""
changed = False
eg_name = module.params['name']
vg_id = get_vg_id(module, array)
ig_id = get_ig_id(module, array)
pg_id = get_pg_id(module, array)
if module.check_mode:
module.exit_json(changed=changed)
try:
eg = array.create_eg(
eg_name,
'Ansible export group',
(vg_id, ig_id, pg_id))
if eg:
module.log(msg='Created export group {0}'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
module.exit_json(changed=changed)
def delete_eg(module, array, eg):
changed = False
eg_name = eg['name']
if module.check_mode:
module.exit_json(changed=changed)
try:
ok = array.delete_eg(
eg['id'])
if ok:
module.log(msg='Export group {0} deleted.'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
module.exit_json(changed=changed)
def main():
arg_spec = argument_spec()
arg_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
vg=dict(type='str'),
ig=dict(type='str'),
pg=dict(type='str')
)
)
module = AnsibleModule(arg_spec,
supports_check_mode=True,
required_together=required_together())
state = module.params['state']
array = get_array(module)
eg = get_eg(module, array)
if state == 'present' and not eg:
create_eg(module, array)
elif state == 'absent' and eg:
delete_eg(module, array, eg)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.