repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
oskar456/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/vrak.py
|
61
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveNewIE
from ..utils import (
int_or_none,
parse_age_limit,
smuggle_url,
unescapeHTML,
)
class VrakIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vrak\.tv/videos\?.*?\btarget=(?P<id>[\d.]+)'
_TEST = {
'url': 'http://www.vrak.tv/videos?target=1.2306782&filtre=emission&id=1.1806721',
'info_dict': {
'id': '5345661243001',
'ext': 'mp4',
'title': 'Obésité, film de hockey et Roseline Filion',
'timestamp': 1488492126,
'upload_date': '20170302',
'uploader_id': '2890187628001',
'creator': 'VRAK.TV',
'age_limit': 8,
'series': 'ALT (Actualité Légèrement Tordue)',
'episode': 'Obésité, film de hockey et Roseline Filion',
'tags': list,
},
'params': {
'skip_download': True,
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/2890187628001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h\d\b[^>]+\bclass=["\']videoTitle["\'][^>]*>([^<]+)',
webpage, 'title', default=None) or self._og_search_title(webpage)
content = self._parse_json(
self._search_regex(
r'data-player-options-content=(["\'])(?P<content>{.+?})\1',
webpage, 'content', default='{}', group='content'),
video_id, transform_source=unescapeHTML)
ref_id = content.get('refId') or self._search_regex(
r'refId":"([^&]+)"', webpage, 'ref id')
brightcove_id = self._search_regex(
r'''(?x)
java\.lang\.String\s+value\s*=\s*["']brightcove\.article\.\d+\.%s
[^>]*
java\.lang\.String\s+value\s*=\s*["'](\d+)
''' % re.escape(ref_id), webpage, 'brightcove id')
return {
'_type': 'url_transparent',
'ie_key': BrightcoveNewIE.ie_key(),
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': ['CA']}),
'id': brightcove_id,
'description': content.get('description'),
'creator': content.get('brand'),
'age_limit': parse_age_limit(content.get('rating')),
'series': content.get('showName') or content.get(
'episodeName'), # this is intentional
'season_number': int_or_none(content.get('seasonNumber')),
'episode': title,
'episode_number': int_or_none(content.get('episodeNumber')),
'tags': content.get('tags', []),
}
|
ingadhoc/odoo
|
refs/heads/8.0
|
addons/l10n_do/__openerp__.py
|
309
|
# -*- coding: utf-8 -*-
# #############################################################################
#
# First author: Jose Ernesto Mendez <tecnologia@obsdr.com> (Open Business Solutions SRL.)
# Copyright (c) 2012 -TODAY Open Business Solutions, SRL. (http://obsdr.com). All rights reserved.
#
# This is a fork to upgrade to odoo 8.0
# by Marcos Organizador de Negocios - Eneldo Serrata - www.marcos.org.do
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Marcos Organizador de Negocios.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
{
'name': 'Dominican Republic - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Dominican Republic.
==============================================================================
* Chart of Accounts.
* The Tax Code Chart for Domincan Republic
* The main taxes used in Domincan Republic
* Fiscal position for local """,
'author': 'Eneldo Serrata - Marcos Organizador de Negocios, SRL.',
'website': 'http://marcos.do',
'depends': ['account', 'base_iban'],
'data': [
# basic accounting data
'data/ir_sequence_type.xml',
'data/ir_sequence.xml',
'data/account_journal.xml',
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account_chart_template.xml',
'data/account.tax.template.csv',
'data/l10n_do_base_data.xml',
# Adds fiscal position
'data/account.fiscal.position.template.csv',
'data/account.fiscal.position.tax.template.csv',
# configuration wizard, views, reports...
'data/l10n_do_wizard.xml'
],
'test': [],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
The-Compiler/pytest
|
refs/heads/master
|
testing/io/test_wcwidth.py
|
15
|
import pytest
from _pytest._io.wcwidth import wcswidth
from _pytest._io.wcwidth import wcwidth
@pytest.mark.parametrize(
("c", "expected"),
[
("\0", 0),
("\n", -1),
("a", 1),
("1", 1),
("א", 1),
("\u200B", 0),
("\u1ABE", 0),
("\u0591", 0),
("🉐", 2),
("$", 2),
],
)
def test_wcwidth(c: str, expected: int) -> None:
assert wcwidth(c) == expected
@pytest.mark.parametrize(
("s", "expected"),
[
("", 0),
("hello, world!", 13),
("hello, world!\n", -1),
("0123456789", 10),
("שלום, עולם!", 11),
("שְבֻעָיים", 6),
("🉐🉐🉐", 6),
],
)
def test_wcswidth(s: str, expected: int) -> None:
assert wcswidth(s) == expected
|
wdwvt1/qiime
|
refs/heads/master
|
qiime/split_libraries.py
|
12
|
#!/usr/bin/env python
# file split_libraries.py
"""Performs preprocessing steps for barcoded library analysis, e.g. 454.
Specifically, does the quality-filtering step (using several criteria) and
renames each read with the appropriate library id.
This module reads the linker+primer sequence from the input mapping file, and
associates these with the barcodes from the mapping file. If a barcode is
read that does not correspond to any in the mapping file, this module checks
against all possible primers from the mapping file. A rare situation could
arise if a barcode does not match any from the mapping file (either because
of sequencing errors or because the mapping file is incomplete) and variations
of the same primer are used for sequencing (e.g., a truncated form of the same
primer), where it is impossible to distinguish what primer was actually used
to amplify a given sequence. In these cases, portions of the given sequence
are sliced out in ascending order of primer sizes and compared to all possible
primers from the mapping file. The first matching primer is considered a hit
for the purposes of the determining where a primer ends and the actual
sequence data begins. Because of this, one should be careful about using
truncated forms of the same primer with this module. The -c option can be
used to disable attempts at barcode correction, and sequences that do not
have a barcode that matches the mapping file will not be recorded.
"""
__author__ = "Rob Knight and Micah Hamady"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Rob Knight", "Micah Hamady", "Greg Caporaso", "Kyle Bittinger",
"Jesse Stombaugh", "William Walters", "Jens Reeder",
"Emily TerAvest", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Walters"
__email__ = "rob@spot.colorado.edu, william.a.walters@colorado.edu"
import re
from gzip import GzipFile
from os import mkdir, stat
from collections import defaultdict
from string import upper
from numpy import array, mean, arange, histogram
from numpy import __version__ as numpy_version
import warnings
warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found')
from skbio.parse.sequences import parse_fasta
from cogent import DNA as DNA_cogent, LoadSeqs
from cogent.align.align import make_dna_scoring_dict, local_pairwise
from skbio.util import remove_files
from skbio.sequence import DNASequence
from qiime.check_id_map import process_id_map
from qiime.barcode import correct_barcode
from qiime.hamming import decode_barcode_8
from qiime.golay import decode as decode_golay_12
from qiime.format import format_histograms
from qiime.parse import QiimeParseError, parse_qual_scores
from qiime.util import create_dir, median_absolute_deviation
# Including new=True in the histogram() call is necessary to
# get the correct result in versions prior to NumPy 1.2.0,
# but the new keyword will be removed in NumPy 1.4. In NumPy 1.2.0
# or later, new=True raises a Warning regarding
# deprecation of new. One solution to this is to check the
# numpy version, and if it's less than 1.2.0, overwrite histogram
# with new=True as the default. This avoids a deprecation warning message
# in versions 1.2.0 through 1.3.*, and a try/except to handle errors from
# versions 1.4.0 or later.
numpy_version = re.split("[^\d]", numpy_version)
numpy_version = tuple([int(i) for i in numpy_version if i.isdigit()])
if numpy_version < (1, 3, 0):
numpy_histogram = histogram
def histogram(a, bins=10, range=None, normed=False, weights=None):
return numpy_histogram(a, bins=bins, range=range,
normed=normed, weights=weights, new=True)
# Supported barcode types - need to adapt these functions to ignore list
# of valid barcodes that the generic decoder requires
BARCODE_TYPES = {
"golay_12": (12, lambda bc, bcodes: decode_golay_12(bc)),
"hamming_8": (8, lambda bc, bcodes: decode_barcode_8(bc)),
# The decode function for variable length barcode does nothing -
# it's just provided to comply with the interface of the other
# barcode types. The returned barcode is always the same as the
# one passed in, and the number of mismatches is always 0. The
# length is None, corresponding to variable length.
"variable_length": (None, lambda bc, bcodes: (bc, 0))}
def get_infile(filename):
"""Returns filehandle, allowing gzip input."""
if filename.endswith(".gz"):
fin = GzipFile(filename, "rb")
else:
fin = open(filename, "U")
return fin
def count_mismatches(seq1, seq2, max_mm):
"""Counts mismatches, primer should be <= length of the seq.
"""
mm = 0
for i in range(min(len(seq2), len(seq1))):
if seq1[i] != seq2[i]:
mm += 1
if mm > max_mm:
return mm
return mm
def ok_mm_primer(primer_seq, all_primer_seqs, primer_mm):
"""Check if primer_seq matches any primer within max mismatches.
TODO: if we start using highly degenerate primers, should refactor using
faster algorithm.
"""
for curr_pat in all_primer_seqs:
if count_mismatches(primer_seq, curr_pat, primer_mm) <= primer_mm:
return True
return False
def MatchScorerAmbigs(match, mismatch, matches=None):
""" Alternative scorer factory for sw_align which allows match to ambiguous chars
It allows for matching to ambiguous characters which is useful for
primer/sequence matching. Not sure what should happen with gaps, but they
shouldn't be passed to this function anyway. Currently a gap will only match
a gap.
match and mismatch should both be numbers. Typically, match should be
positive and mismatch should be negative.
Resulting function has signature f(x,y) -> number.
Code original from Greg Caporaso
"""
matches = matches or \
{'A': {'A': None}, 'G': {'G': None}, 'C': {'C': None},
'T': {'T': None}, '-': {'-': None}}
for ambig, chars in DNASequence.iupac_degeneracies().iteritems():
try:
matches[ambig].update({}.fromkeys(chars))
except KeyError:
matches[ambig] = {}.fromkeys(chars)
for char in chars:
try:
matches[char].update({ambig: None})
except KeyError:
matches[char] = {ambig: None}
def scorer(x, y):
# need a better way to disallow unknown characters (could
# try/except for a KeyError on the next step, but that would only
# test one of the characters)
if x not in matches or y not in matches:
raise ValueError("Unknown character: %s or %s" % (x, y))
if y in matches[x]:
return match
else:
return mismatch
return scorer
# The scoring function which can be passed to
# cogent.alignment.algorithms.sw_align
equality_scorer_ambigs = MatchScorerAmbigs(1, -1)
expanded_equality_scorer_ambigs = MatchScorerAmbigs(1, -1,
matches=
{'A': {'A': None, 'G': None},
'G':
{'G': None,
'A': None,
'T': None,
'C': None,
},
'C':
{'C': None,
'G': None},
'T':
{'T': None,
'G': None},
'-': {'-': None}})
def pair_hmm_align_unaligned_seqs(seqs, moltype=DNA_cogent, params={}):
"""
Checks parameters for pairwise alignment, returns alignment.
Code from Greg Caporaso.
"""
seqs = LoadSeqs(data=seqs, moltype=moltype, aligned=False)
try:
s1, s2 = seqs.values()
except ValueError:
raise ValueError(
"Pairwise aligning of seqs requires exactly two seqs.")
try:
gap_open = params['gap_open']
except KeyError:
gap_open = 5
try:
gap_extend = params['gap_extend']
except KeyError:
gap_extend = 2
try:
score_matrix = params['score_matrix']
except KeyError:
score_matrix = make_dna_scoring_dict(
match=1, transition=-1, transversion=-1)
return local_pairwise(s1, s2, score_matrix, gap_open, gap_extend)
def local_align_primer_seq(primer, sequence, sw_scorer=equality_scorer_ambigs):
"""Perform local alignment of primer and sequence
primer: Input primer sequence
sequence: target sequence to test primer against
Returns the number of mismatches,
and the start position in sequence of the hit.
Modified from code written by Greg Caporaso.
"""
query_primer = primer
query_sequence = str(sequence)
# Get alignment object from primer, target sequence
alignment = pair_hmm_align_unaligned_seqs([query_primer, query_sequence])
# Extract sequence of primer, target site, may have gaps if insertions
# or deletions have occurred.
primer_hit = str(alignment.Seqs[0])
target_hit = str(alignment.Seqs[1])
# Count insertions and deletions
insertions = primer_hit.count('-')
deletions = target_hit.count('-')
mismatches = 0
for i in range(len(target_hit)):
# using the scoring function to check for
# matches, but might want to just access the dict
if sw_scorer(target_hit[i], primer_hit[i]) == -1 and \
target_hit[i] != '-' and primer_hit[i] != '-':
mismatches += 1
try:
hit_start = query_sequence.index(target_hit.replace('-', ''))
except ValueError:
raise ValueError(
'substring not found, query string %s, target_hit %s' %
(query_sequence, target_hit))
# sum total mismatches
mismatch_count = insertions + deletions + mismatches
return mismatch_count, hit_start
def expand_degeneracies(raw_primers):
"""Returns all non-degenerate versions of a given primer sequence.
Order is not guaranteed!
"""
expanded_primers = []
for raw_primer in raw_primers:
primer_seq = DNASequence(raw_primer.strip())
for expanded_primer in primer_seq.nondegenerates():
expanded_primers.append(str(expanded_primer))
return expanded_primers
def check_map(infile, disable_primer_check, barcode_type="golay_12",
added_demultiplex_field=None, has_barcodes=True):
"""Check mapping file and extract list of valid barcodes, primers """
if barcode_type == "variable_length":
var_len_barcodes = True
else:
var_len_barcodes = False
if barcode_type == "0":
has_barcodes = False
# hds, id_map, dsp, run_description, errors, warnings
hds, mapping_data, run_description, errors, warnings = \
process_id_map(infile, has_barcodes=has_barcodes,
disable_primer_check=disable_primer_check,
added_demultiplex_field=added_demultiplex_field,
variable_len_barcodes=var_len_barcodes)
if errors:
raise ValueError('Errors were found with mapping file, ' +
'please run validate_mapping_file.py to ' +
'identify problems.')
id_map = {}
for curr_data in mapping_data:
id_map[curr_data[0]] = {}
for header in range(len(hds)):
for curr_data in mapping_data:
id_map[curr_data[0]][hds[header]] = curr_data[header]
barcode_to_sample_id = {}
primer_seqs_lens = {}
all_primers = {}
for sample_id, sample in id_map.items():
if added_demultiplex_field:
barcode_to_sample_id[sample['BarcodeSequence'].upper() + "," +
sample[added_demultiplex_field]] = sample_id
else:
barcode_to_sample_id[sample['BarcodeSequence'].upper()] = sample_id
if not disable_primer_check:
raw_primers = sample['LinkerPrimerSequence'].upper().split(',')
if len(raw_primers[0].strip()) == 0:
raise ValueError('No primers detected, please use the ' +
'-p parameter to disable primer detection.')
expanded_primers = expand_degeneracies(raw_primers)
curr_bc_primers = {}
for primer in expanded_primers:
curr_bc_primers[primer] = len(primer)
all_primers[primer] = len(primer)
primer_seqs_lens[sample['BarcodeSequence']] = curr_bc_primers
return hds, id_map, barcode_to_sample_id, warnings, errors, \
primer_seqs_lens, all_primers
def fasta_ids(fasta_files, verbose=False):
""" Returns list of ids in FASTA files """
all_ids = set([])
for fasta_in in fasta_files:
for label, seq in parse_fasta(fasta_in):
rid = label.split()[0]
if rid in all_ids:
raise ValueError(
"Duplicate ID found in FASTA/qual file: %s" %
label)
all_ids.add(rid)
return all_ids
def count_ambig(curr_seq, valid_chars='ATCG'):
"""Counts non-standard characters in seq"""
up_seq = curr_seq.upper()
total = 0
for vchar in valid_chars:
total += up_seq.count(vchar)
return len(curr_seq) - total
def split_seq(curr_seq, barcode_len, primer_seq_len):
"""Split sequence into parts barcode, primer and remainder"""
curr_barcode = curr_seq[0:barcode_len]
rest_of_seq = curr_seq[barcode_len:]
primer_seq = rest_of_seq[0:primer_seq_len]
rest_of_seq = rest_of_seq[primer_seq_len:]
return curr_barcode, primer_seq, rest_of_seq
def get_barcode(curr_seq, barcode_len):
""" Split sequence into barcode and remaining sequence
Linker and primer part of remaining sequence, as one must first
read the barcode to find the associated primer from the mapping file"""
raw_barcode = curr_seq[0:barcode_len]
raw_seq = curr_seq[barcode_len:]
return raw_barcode, raw_seq
def primer_exceeds_mismatches(primer_seq, all_primer_seqs, max_primer_mm):
"""Returns True if primer exceeds allowed mismatches"""
if primer_seq not in all_primer_seqs:
if not ok_mm_primer(primer_seq, all_primer_seqs, max_primer_mm):
return True
return False
def seq_exceeds_homopolymers(curr_seq, max_len=6):
"""Returns False if primer contains any homopolymer > allowed length"""
for base in 'ATGC':
curr = base * (max_len + 1)
if curr in curr_seq:
return True
return False
def check_barcode(curr_barcode, barcode_type, valid_map,
attempt_correction=True, added_demultiplex_field=None,
curr_id=None):
"""Return whether barcode is valid, and attempt correction."""
corrected_bc = False
if added_demultiplex_field:
added_demultiplex_lens =\
set([len(bc.split(',')[1]) for bc in valid_map])
# using set() will put in order of smallest to largest and removes
# redundant lengths, converting to list to sort from largest to
# smallest
added_demultiplex_lens =\
[length for length in added_demultiplex_lens][::-1]
# Handle specific case of run_prefix
# Need to slice out size(s) of label that matches run prefix size(s)
if added_demultiplex_field.upper() == "RUN_PREFIX":
added_demultiplex =\
[curr_id.split()[0][0:added_demultiplex_len] for
added_demultiplex_len in added_demultiplex_lens]
else:
for label_item in curr_id.split():
if label_item.startswith(added_demultiplex_field):
added_demultiplex = [label_item.split('=')[1]]
all_bcs = [bc.split(',')[0] for bc in valid_map]
all_added_demultiplex = [bc.split(',')[1] for bc in valid_map]
for curr_added_demultiplex in added_demultiplex:
bc_and_demultiplex = curr_barcode + "," + curr_added_demultiplex
if bc_and_demultiplex in valid_map:
return False, bc_and_demultiplex, corrected_bc
elif attempt_correction == False:
return True, curr_barcode, corrected_bc
else:
if curr_barcode in valid_map:
return False, curr_barcode, corrected_bc
elif attempt_correction == False:
return True, curr_barcode, corrected_bc
if barcode_type in BARCODE_TYPES:
expect_len, curr_bc_fun = BARCODE_TYPES[barcode_type]
barcode, num_errors = curr_bc_fun(curr_barcode, valid_map)
corrected_bc = True
if added_demultiplex_field:
for curr_added_demultiplex in added_demultiplex:
bc_and_demultiplex = barcode + "," + curr_added_demultiplex
if bc_and_demultiplex in valid_map:
return num_errors, bc_and_demultiplex, corrected_bc
else:
return num_errors, barcode, corrected_bc
else:
try:
expect_len, curr_bc_fun = int(barcode_type), correct_barcode
barcode, num_errors = curr_bc_fun(curr_barcode, valid_map)
corrected_bc = True
if added_demultiplex_field:
for curr_added_demultiplex in added_demultiplex:
bc_and_demultiplex = barcode + "," + curr_added_demultiplex
if bc_and_demultiplex in valid_map:
return num_errors, bc_and_demultiplex, corrected_bc
except ValueError:
raise ValueError("Unsupported barcode type: %s" % barcode_type)
return num_errors, barcode, corrected_bc
def make_histograms(raw_lengths, pre_lengths, post_lengths, binwidth=10):
"""Makes histogram data for pre and post lengths"""
if post_lengths:
min_len = min([min(post_lengths), min(raw_lengths)])
else:
min_len = min(raw_lengths)
max_len = max(raw_lengths)
floor = (min_len / binwidth) * binwidth
ceil = ((max_len / binwidth) + 2) * binwidth
bins = arange(floor, ceil, binwidth)
raw_hist = histogram(raw_lengths, bins)[0]
pre_hist = histogram(pre_lengths, bins)[0]
post_hist, bin_edges = histogram(post_lengths, bins)
return raw_hist, pre_hist, post_hist, bin_edges
class SeqQualBad(object):
"""Checks if a seq and qual score are bad, saving ids that are bad."""
def __init__(self, name, f):
"""New SeqQualBad keeps track of failed ids."""
self.FailedIds = []
self.Name = name
self.F = f
def __call__(self, id_, seq, qual):
"""SeqQualBad called on id, seq and qual returns bool.
Note: saves failed ids in self.FailedIds."""
result = self.F(id_, seq, qual)
if result:
self.FailedIds.append(id_)
return result
def __str__(self):
"""SeqQualBad str returns tab-delimited output of counts."""
return "%s\t%s" % (self.Name, len(self.FailedIds))
def qual_missing(id_, seq, qual):
"""returns True if qual is None"""
return qual is None
QualMissing = SeqQualBad('Missing Qual Score', qual_missing)
def get_seq_lengths(seq_lengths, bc_counts):
"""Convenience wrapper for getting lengths of good and bad seqs"""
all_seq_lengths = seq_lengths.values()
all_seq_ids = set(seq_lengths.keys())
bad_seq_ids = set(bc_counts[None]).union(set(bc_counts['#FAILED']))
good_seq_ids = all_seq_ids - bad_seq_ids
good_seq_lengths = map(seq_lengths.__getitem__, good_seq_ids)
return all_seq_lengths, good_seq_lengths
def check_window_qual_scores(qual_scores, window=50, min_average=25):
"""Check that all windows have ave qual score > threshold."""
# Code from Jens Reeder, added 1-13-2010
l = len(qual_scores)
window = min(window, l)
if (window == 0):
return True
# initialize with sum of first window
window_score = sum(qual_scores[:window])
idx = 0
while (window_score / float(window) >= min_average
and idx < l - window):
#'Move' window
window_score += qual_scores[idx + window] - qual_scores[idx]
idx += 1
if (idx == l - window):
# we processed all qual_scores, must be good
# Return index for truncation purposes
return True, idx
else:
return False, idx
def check_seqs(fasta_out, fasta_files, starting_ix, valid_map, qual_mappings,
filters, barcode_len, keep_primer, keep_barcode, barcode_type,
max_bc_errors, retain_unassigned_reads, attempt_bc_correction,
primer_seqs_lens, all_primers, max_primer_mm, disable_primer_check,
reverse_primers, rev_primers, qual_out, qual_score_window=0,
discard_bad_windows=False, min_qual_score=25, min_seq_len=200,
median_length_filtering=None, added_demultiplex_field=None,
reverse_primer_mismatches=0, truncate_ambi_bases=False):
"""Checks fasta-format sequences and qual files for validity."""
seq_lengths = {}
# Record complete barcode + primer + sequence lengths
raw_seq_lengths = {}
# Record sequence lengths after all optional removal of components
final_seq_lengths = {}
bc_counts = defaultdict(list)
curr_ix = starting_ix
corr_ct = 0 # count of corrected barcodes
# get the list of barcode lengths in reverse order
barcode_length_order =\
sorted(set([len(bc.split(',')[0]) for bc in valid_map]))
barcode_length_order = barcode_length_order[::-1]
primer_mismatch_count = 0
all_primers_lens = sorted(set(all_primers.values()))
reverse_primer_not_found = 0
sliding_window_failed = 0
trunc_ambi_base_counts = 0
below_seq_min_after_trunc = 0
below_seq_min_after_ambi_trunc = 0
for fasta_in in fasta_files:
for curr_id, curr_seq in parse_fasta(fasta_in):
curr_rid = curr_id.split()[0]
curr_seq = upper(curr_seq)
curr_len = len(curr_seq)
curr_qual = qual_mappings.get(curr_rid, None)
# if qual_out:
# curr_qual_out_score = \
# "%2.2f" % float(float(sum(curr_qual))/float(len(curr_qual)))
seq_lengths[curr_rid] = curr_len
failed = False
for f in filters:
failed = failed or f(curr_rid, curr_seq, curr_qual)
if failed: # if we failed any of the checks, bail out here
bc_counts['#FAILED'].append(curr_rid)
continue
if barcode_type == 'variable_length':
# Reset the raw_barcode, raw_seq, and barcode_len -- if
# we don't match a barcode from the mapping file, we want
# these values to be None
raw_barcode, raw_seq, barcode_len = (None, None, None)
curr_valid_map =\
[curr_bc.split(',')[0] for curr_bc in valid_map]
# Iterate through the barcode length from longest to shortest
for l in barcode_length_order:
# extract the current length barcode from the sequence
bc, seq = get_barcode(curr_seq, l)
# check if the sliced sequence corresponds to a valid
# barcode, and if so set raw_barcode, raw_seq, and
# barcode_len for use in the next steps
if bc in curr_valid_map:
raw_barcode, raw_seq = bc, seq
barcode_len = len(raw_barcode)
break
# if we haven't found a valid barcode, log this sequence as
# failing to match a barcode, and move on to the next sequence
if not raw_barcode:
bc_counts['#FAILED'].append(curr_rid)
continue
else:
# Get the current barcode to look up the associated primer(s)
raw_barcode, raw_seq = get_barcode(curr_seq, barcode_len)
if not disable_primer_check:
try:
current_primers = primer_seqs_lens[raw_barcode]
# In this case, all values will be the same, i.e. the length
# of the given primer, or degenerate variations thereof.
primer_len = current_primers.values()[0]
if primer_exceeds_mismatches(raw_seq[:primer_len],
current_primers, max_primer_mm):
bc_counts['#FAILED'].append(curr_rid)
primer_mismatch_count += 1
continue
except KeyError:
# If the barcode read does not match any of those in the
# mapping file, the situation becomes more complicated. We do
# not know the length the sequence to slice out to compare to
# our primer sets, so, in ascending order of all the given
# primer lengths, a sequence will the sliced out and compared
# to the primer set.
current_primers = all_primers
found_match = False
for seq_slice_len in all_primers_lens:
if not(
primer_exceeds_mismatches(raw_seq[:seq_slice_len],
current_primers, max_primer_mm)):
primer_len = seq_slice_len
found_match = True
break
if not found_match:
bc_counts['#FAILED'].append(curr_rid)
primer_mismatch_count += 1
continue
except IndexError:
# Try to raise meaningful error if problem reading primers
raise IndexError('Error reading primer sequences. If ' +
'primers were purposefully not included in the mapping ' +
'file, disable usage with the -p option.')
else:
# Set primer length to zero if primers are disabled.
primer_len = 0
# split seqs
cbc, cpr, cres = split_seq(curr_seq, barcode_len,
primer_len)
total_bc_primer_len = len(cbc) + len(cpr)
# get current barcode
try:
bc_diffs, curr_bc, corrected_bc = \
check_barcode(cbc, barcode_type, valid_map.keys(),
attempt_bc_correction, added_demultiplex_field, curr_id)
if bc_diffs > max_bc_errors:
raise ValueError("Too many errors in barcode")
corr_ct += bool(corrected_bc)
except Exception as e:
bc_counts[None].append(curr_rid)
continue
curr_samp_id = valid_map.get(curr_bc, 'Unassigned')
new_id = "%s_%d" % (curr_samp_id, curr_ix)
# check if writing out primer
write_seq = cres
if reverse_primers == "truncate_only":
try:
rev_primer = rev_primers[curr_bc]
mm_tested = {}
for curr_rev_primer in rev_primer:
# Try to find lowest count of mismatches for all
# reverse primers
rev_primer_mm, rev_primer_index = \
local_align_primer_seq(curr_rev_primer, cres)
mm_tested[rev_primer_mm] = rev_primer_index
rev_primer_mm = min(mm_tested.keys())
rev_primer_index = mm_tested[rev_primer_mm]
if rev_primer_mm <= reverse_primer_mismatches:
write_seq = write_seq[0:rev_primer_index]
if qual_out:
curr_qual = curr_qual[0:barcode_len +
primer_len + rev_primer_index]
else:
reverse_primer_not_found += 1
except KeyError:
pass
elif reverse_primers == "truncate_remove":
try:
rev_primer = rev_primers[curr_bc]
mm_tested = {}
for curr_rev_primer in rev_primer:
# Try to find lowest count of mismatches for all
# reverse primers
rev_primer_mm, rev_primer_index = \
local_align_primer_seq(curr_rev_primer, cres)
mm_tested[rev_primer_mm] = rev_primer_index
rev_primer_mm = min(mm_tested.keys())
rev_primer_index = mm_tested[rev_primer_mm]
if rev_primer_mm <= reverse_primer_mismatches:
write_seq = write_seq[0:rev_primer_index]
if qual_out:
curr_qual = curr_qual[0:barcode_len +
primer_len + rev_primer_index]
else:
reverse_primer_not_found += 1
write_seq = False
except KeyError:
bc_counts['#FAILED'].append(curr_rid)
continue
# Check for quality score windows, truncate or remove sequence
# if poor window found. Previously tested whole sequence-now
# testing the post barcode/primer removed sequence only.
if qual_score_window:
passed_window_check, window_index =\
check_window_qual_scores(curr_qual, qual_score_window,
min_qual_score)
# Throw out entire sequence if discard option True
if discard_bad_windows and not passed_window_check:
sliding_window_failed += 1
write_seq = False
# Otherwise truncate to index of bad window
elif not discard_bad_windows and not passed_window_check:
sliding_window_failed += 1
if write_seq:
write_seq = write_seq[0:window_index]
if qual_out:
curr_qual = curr_qual[0:barcode_len +
primer_len + window_index]
#Check for sequences that are too short after truncation
if len(write_seq) + total_bc_primer_len < min_seq_len:
write_seq = False
below_seq_min_after_trunc += 1
if truncate_ambi_bases and write_seq:
write_seq_ambi_ix = True
# Skip if no "N" characters detected.
try:
ambi_ix = write_seq.index("N")
write_seq = write_seq[0:ambi_ix]
except ValueError:
write_seq_ambi_ix = False
pass
if write_seq_ambi_ix:
# Discard if too short after truncation
if len(write_seq) + total_bc_primer_len < min_seq_len:
write_seq = False
below_seq_min_after_ambi_trunc += 1
else:
trunc_ambi_base_counts += 1
if qual_out:
curr_qual = curr_qual[0:barcode_len +
primer_len + ambi_ix]
# Slice out regions of quality scores that correspond to the
# written sequence, i.e., remove the barcodes/primers and reverse
# primers if option is enabled.
if qual_out:
qual_barcode, qual_primer, qual_scores_out = \
split_seq(curr_qual, barcode_len, primer_len)
# Convert to strings instead of numpy arrays, strip off
# brackets
qual_barcode = format_qual_output(qual_barcode)
qual_primer = format_qual_output(qual_primer)
qual_scores_out = format_qual_output(qual_scores_out)
if not write_seq:
bc_counts['#FAILED'].append(curr_rid)
continue
if keep_primer:
write_seq = cpr + write_seq
if qual_out:
qual_scores_out = qual_primer + qual_scores_out
if keep_barcode:
write_seq = cbc + write_seq
if qual_out:
qual_scores_out = qual_barcode + qual_scores_out
# Record number of seqs associated with particular barcode.
bc_counts[curr_bc].append(curr_rid)
if retain_unassigned_reads and curr_samp_id == "Unassigned":
fasta_out.write(
">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\n%s\n" %
(new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))
if qual_out:
qual_out.write(
">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\n%s" %
(new_id, curr_rid, cbc, curr_bc, int(bc_diffs),
qual_scores_out))
elif not retain_unassigned_reads and curr_samp_id == "Unassigned":
bc_counts['#FAILED'].append(curr_rid)
else:
fasta_out.write(
">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\n%s\n" %
(new_id, curr_rid, cbc, curr_bc, int(bc_diffs), write_seq))
if qual_out:
qual_out.write(
">%s %s orig_bc=%s new_bc=%s bc_diffs=%s\n%s" %
(new_id, curr_rid, cbc, curr_bc, int(bc_diffs),
qual_scores_out))
curr_len = len(write_seq)
#seq_lengths[curr_rid] = curr_len
curr_ix += 1
# Record the raw and written seq length of everything passing
# filters
raw_seq_lengths[curr_rid] = len(curr_seq)
final_seq_lengths[curr_id] = curr_len
if median_length_filtering:
# Read original fasta file output to get sequence lengths
fasta_out.close()
fasta_out = open(fasta_out.name, "U")
# Record sequence lengths for median/mad calculation
sequence_lens = []
for label, seq in parse_fasta(fasta_out):
sequence_lens.append(len(seq))
'''# Create a temporary file to copy the contents of the fasta file, will
# need to delete once operations complete.
fasta_temp = open(fasta_out.name + "_tmp.fasta", "w")
sequence_lens = []
for label, seq in parse_fasta(fasta_lens):
sequence_lens.append(len(seq))
fasta_temp.write(">%s\n%s\n" % (label, seq))
fasta_temp.close()
fasta_temp = open(fasta_out.name + "_tmp.fasta", "U")
fasta_lens.close()
# Overwrite seqs.fna with length filtered data
fasta_out = open(fasta_out.name, "w")'''
med_abs_dev, med_length = median_absolute_deviation(sequence_lens)
min_corrected_len = med_length - med_abs_dev *\
float(median_length_filtering)
max_corrected_len = med_length + med_abs_dev *\
float(median_length_filtering)
seqs_discarded_median = 0
fasta_out.seek(0)
final_written_lens = []
# Create final seqs.fna
final_fasta_out = open(fasta_out.name.replace('.tmp', ''), "w")
for label, seq in parse_fasta(fasta_out):
curr_len = len(seq)
if curr_len < min_corrected_len or curr_len > max_corrected_len:
seqs_discarded_median += 1
else:
final_fasta_out.write(">%s\n%s\n" % (label, seq))
final_written_lens.append(len(seq))
final_fasta_out.close()
fasta_out.close()
remove_files([fasta_out.name])
else:
min_corrected_len = 0
max_corrected_len = 0
seqs_discarded_median = 0
final_written_lens = 0
# Copy tmp seqs file to final seqs.fna file
fasta_out.close()
fasta_out = open(fasta_out.name, "U")
# Create final seqs.fna
final_fasta_out = open(fasta_out.name.replace('.tmp', ''), "w")
for label, seq in parse_fasta(fasta_out):
final_fasta_out.write(">%s\n%s\n" % (label, seq))
final_fasta_out.close()
fasta_out.close()
remove_files([fasta_out.name])
median_results = (median_length_filtering, min_corrected_len,
max_corrected_len, seqs_discarded_median, final_written_lens)
raw_seq_lengths = raw_seq_lengths.values()
final_seq_lengths = final_seq_lengths.values()
log_out = format_log(bc_counts, corr_ct, valid_map, seq_lengths, filters,
retain_unassigned_reads, attempt_bc_correction, primer_mismatch_count,
max_primer_mm, reverse_primers, reverse_primer_not_found,
sliding_window_failed, below_seq_min_after_trunc, qual_score_window,
discard_bad_windows, min_seq_len, raw_seq_lengths,
final_seq_lengths, median_results, truncate_ambi_bases,
below_seq_min_after_ambi_trunc, )
#all_seq_lengths, good_seq_lengths = get_seq_lengths(seq_lengths, bc_counts)
return log_out, seq_lengths.values(), raw_seq_lengths, final_seq_lengths
def format_qual_output(qual_array):
""" Converts to string from numpy arrays, removes brackets """
# Size of lines needed for proper quality score file format
qual_line_size = 60
qual_scores = ""
for slice in range(0, len(qual_array), qual_line_size):
current_segment = qual_array[slice:slice + qual_line_size]
current_segment =\
" ".join(str(score) for score in current_segment) + "\n"
qual_scores += current_segment
'''qual_array = str(qual_array)
qual_array = qual_array.replace('[','')
qual_array = qual_array.replace(']','') '''
return qual_scores
def format_log(bc_counts, corr_ct, valid_map, seq_lengths, filters,
retain_unassigned_reads, attempt_bc_correction,
primer_mismatch_count, max_primer_mm,
reverse_primers, reverse_primer_not_found, sliding_window_failed,
below_seq_min_after_trunc, qual_score_window,
discard_bad_windows, min_seq_len,
raw_seq_lengths, final_seq_lengths, median_results=(None),
truncate_ambi_bases=False, below_seq_min_after_ambi_trunc=0,
):
"""Makes log lines"""
log_out = []
all_seq_lengths, good_seq_lengths = get_seq_lengths(seq_lengths, bc_counts)
log_out.append("Number raw input seqs\t%d\n" % len(seq_lengths))
# append log data for median absolute deviation sequence length filtering
# if was performed.
if median_results[0]:
if (not median_results[1] or not median_results[2] or
not median_results[3]):
log_out.append("No sequences written, so no median length data " +
"available.")
actual_median_results = False
else:
log_out.append("Specified allowed number of median absolute " +
"deviations for sequence retention: %3.2f" % (float(median_results[0])))
log_out.append("Sequences with lengths outside bounds of " +
"%d and %d: %d\n" %
(int(median_results[1]), int(median_results[2]),
int(median_results[3])))
actual_median_results = True
for f in filters:
log_out.append(str(f))
log_out.append('Num mismatches in primer exceeds limit of %s: %d\n' %
(max_primer_mm, primer_mismatch_count))
if reverse_primers == "truncate_only":
log_out.append('Number of sequences with identifiable barcode ' +
'but without identifiable reverse primer: ' +
'%d\n' % reverse_primer_not_found)
log_out.append('-z truncate_only option enabled; sequences ' +
'without a discernible reverse primer as well as sequences with a ' +
'valid barcode not found in the mapping file may still be written.\n')
if reverse_primers == "truncate_remove":
log_out.append('Number of sequences with identifiable barcode ' +
'but without identifiable reverse primer: ' +
'%d\n' % reverse_primer_not_found)
log_out.append('-z truncate_remove option enabled; sequences ' +
'without a discernible reverse primer as well as sequences with a ' +
'valid barcode not found in the mapping file will not be written.\n')
if qual_score_window:
log_out.append('Size of quality score window, in base pairs: %d' %
qual_score_window)
log_out.append('Number of sequences where a low quality score ' +
'window was detected: %d' % sliding_window_failed)
if discard_bad_windows:
log_out.append('Sequences with a low quality score were not ' +
'written, -g option enabled.\n')
else:
log_out.append('Sequences with low quality score window were ' +
'truncated to the first base of the window.')
log_out.append('Sequences discarded after truncation due to ' +
'sequence length below the minimum %d: %d\n' %
(min_seq_len, below_seq_min_after_trunc))
if truncate_ambi_bases:
log_out.append('Truncation at first ambiguous "N" character ' +
'enabled.\nSequences discarded after truncation due to sequence ' +
'length below the minimum %d: %d\n' %
(min_seq_len, below_seq_min_after_ambi_trunc))
log_out.append("Sequence length details for all sequences passing " +
"quality filters:")
if raw_seq_lengths:
log_out.append("Raw len min/max/avg\t%.1f/%.1f/%.1f" %
(min(raw_seq_lengths), max(raw_seq_lengths), mean(raw_seq_lengths)))
else:
log_out.append("No sequences passed quality filters for writing.")
if median_results[0] and actual_median_results:
log_out.append("Wrote len min/max/avg\t%.1f/%.1f/%.1f" %
(min(median_results[4]), max(median_results[4]),
mean(median_results[4])))
else:
if final_seq_lengths:
log_out.append("Wrote len min/max/avg\t%.1f/%.1f/%.1f" %
(min(final_seq_lengths), max(final_seq_lengths),
mean(final_seq_lengths)))
# figure out which barcodes we got that didn't come from valid samples
valid_bc_nomap = set(bc_counts) - set(valid_map) - set([None, '#FAILED'])
valid_bc_nomap_counts = [(len(bc_counts[b]), b) for b in valid_bc_nomap]
log_out.append("\nBarcodes corrected/not\t%d/%d" %
(corr_ct, len(bc_counts[None])))
if attempt_bc_correction:
log_out.append("Uncorrected barcodes will not be written to the " +
"output fasta file.\nCorrected barcodes will be written with " +
"the appropriate barcode category.\nCorrected but unassigned " +
"sequences will not be written unless --retain_unassigned_reads " +
"is enabled.\n")
else:
log_out.append("Barcode correction has been disabled via the -c " +
"option.\n")
log_out.append("Total valid barcodes that are not in mapping file\t%d" %
len(valid_bc_nomap_counts))
if valid_bc_nomap:
log_out.append("Barcodes not in mapping file\tCount")
for count, bc in reversed(sorted(valid_bc_nomap_counts)):
log_out.append("%s\t%d" % (bc, count))
if retain_unassigned_reads:
log_out.append("Sequences associated with valid barcodes that are not" +
" in the mapping file will be written. --retain_unassigned_reads " +
"option enabled.")
elif not attempt_bc_correction:
log_out.append("Barcode correction has been disabled (-c option), " +
"no unassigned or invalid barcode sequences will be recorded.")
else:
log_out.append("Sequences associated with valid barcodes that are " +
"not in the mapping file will not be written.")
log_out.append("\nBarcodes in mapping file")
sample_cts = [(len(bc_counts[bc]), bc, sample_id) for bc, sample_id
in valid_map.items()]
if sample_cts:
filtered_sample_cts = [s[0] for s in sample_cts if s[0]]
if filtered_sample_cts:
log_out.append("Num Samples\t%d" % len(filtered_sample_cts))
log_out.append("Sample ct min/max/mean: %d / %d / %.2f" % (
min(filtered_sample_cts), max(filtered_sample_cts),
mean(filtered_sample_cts)))
log_out.append("Sample\tSequence Count\tBarcode")
for count, bc, sample_id in reversed(sorted(sample_cts)):
log_out.append("%s\t%d\t%s" % (sample_id, count, bc))
if median_results[0]:
corrected_written_seqs = len(good_seq_lengths) - median_results[3]
else:
corrected_written_seqs = len(good_seq_lengths)
log_out.append("\nTotal number seqs written\t%d" % corrected_written_seqs)
return log_out
def get_reverse_primers(id_map):
""" Return a dictionary with barcodes and rev-complement of rev primers """
rev_primers = {}
for n in id_map.items():
# Generate a dictionary with Barcode:reverse primer
# Convert to reverse complement of the primer so its in the
# proper orientation with the input fasta sequences
rev_primers[n[1]['BarcodeSequence']] =\
[str(DNASequence(curr_rev_primer).rc()) for curr_rev_primer in
(n[1]['ReversePrimer']).split(',')]
return rev_primers
def preprocess(fasta_files, qual_files, mapping_file,
barcode_type="golay_12",
min_seq_len=200, max_seq_len=1000, min_qual_score=25, starting_ix=1,
keep_primer=False, max_ambig=0, max_primer_mm=0, trim_seq_len=False,
dir_prefix='.', max_bc_errors=2, max_homopolymer=4,
retain_unassigned_reads=False, keep_barcode=False,
attempt_bc_correction=True, qual_score_window=0,
disable_primer_check=False, reverse_primers='disable',
reverse_primer_mismatches=0,
record_qual_scores=False, discard_bad_windows=False,
median_length_filtering=None, added_demultiplex_field=None,
truncate_ambi_bases=False):
"""
Preprocess barcoded libraries, e.g. from 454.
Parameters:
fasta_files: list of raw 454 fasta files, fasta format.
qual_files: list of raw 454 qual file(s)
mapping_file: mapping file with BarcodeSequence column containing valid
barcodes used in the 454 run
barcode_type: type of barcode, e.g. golay_12. Should appear in list of
known barcode types.
min_seq_len: minimum sequence length to allow.
max_seq_len: maximum sequence length to allow.
min_qual_score: minimum average qual score considered acceptaable.
starting_ix: integer to start sample sequence numbering at.
keep_primer: when True, will keep primer sequence, otherwise will strip it
keep_barcode: when True, will keep barcode sequence, otherwise will strip it
max_ambig: maximum number of ambiguous bases to allow in the read.
max_primer_mm: maximum number of primer mismatches to allow.
trim_seq_len: if True, calculates lengths after trimming.
dir_prefix: prefix of directories to write files into.
max_bc_errors: maximum number of barcode errors to allow in output seqs
max_homopolymer: maximum number of a nucleotide that can be
repeated in a given sequence.
retain_unassigned_reads: If True (False default), will write seqs to the
output .fna file that have a valid barcode (by Golay or Hamming standard)
but are not included in the input mapping file, as "Unassigned."
attempt_bc_correction: (default True) will attempt to find nearest valid
barcode. Can be disabled to improve performance.
disable_primer_check: (default False) Disables testing for primers in the
input mapping file and primer testing in the input sequence files.
reverse_primers: (default 'disable') Enables removal of reverse primers and
any subsequence sequence data from output reads. Reverse primers have to
be in 5'->3' format and in correct IUPAC codes in a column "ReversePrimer"
in the input mapping file. Run validate_mapping_file.py to make test primers in this
column for valid formatting. The primers read from this column will be
reverse complemented and associated with the given barcode in the
mapping file. If set to 'truncate_only', sequences where primers are found
will be truncated, sequences where the primer is not found will be written
unchanged. If set to 'truncate_remove', sequences where primers are found
will be truncated, sequences where the primer is not found will not be
written and counted in the log file as failing for this reason. The
mismatches allowed for a reverse primer match are the same as specified
for the forward primer mismatches with the -M parameter (default 0).
reverse_primer_mismatches: Number of reverse primer mismatches allowed.
reverse_primers must be enabled for this to do anything.
record_qual_scores: (default False) Will record quality scores for all
sequences that are written to the output seqs.fna file in a separate
file (seqs_filtered.qual) containing the same sequence IDs and
quality scores for all bases found in the seqs.fna file.
discard_bad_windows: (default False) If True, will completely discard
sequences that have a low quality window. If False, sequences will be
truncated to the first base of the bad window.
median_length_filtering: (default None) If a value is specified, will
disable all min and max length filtering, and instead will calculate the
median length of all sequences output, and instead filter out sequences
based upon whether or not they fall within the number of median absolute
deviations given by this parameter.
added_demultiplex_field: (default None) If enabled, will attempt to
demultiplex by both a barcode and data that can be parsed from the fasta
label/comment. If 'run_prefix' is specified will pull the character string
immediatly following the '>' at the beginning of the fasta label. Any
other field specified will be pulled from the comment. Example: if 'plate'
is specified, the string following 'plate=' will be used. The mapping file
must contain a column with a header matching the name specified by the -j
option, and every combination of barcode + added demultiplex option must
be unique.
truncate_ambi_bases: (default False) If enabled, will truncate the
sequence at the first "N" character.
Result:
in dir_prefix, writes the following files:
id_map.xls: 2-column tab-delimited text format orig_id:new_id
error_map.xls: 2-column tab-delimited text format orig_id:fail_reasons
seqs.fasta: sequences with new ids lib_index in fasta format
lengths.xls: histograms of unfiltered and filtered lengths, resolution 10 bp
"""
if max_seq_len < 10:
raise ValueError("Max sequence must be >= 10")
if min_seq_len >= max_seq_len:
raise ValueError("Min len cannot be >= max len")
if min_qual_score < 0:
raise ValueError("Min qual score must be > 0")
if starting_ix < 1:
raise ValueError("Starting index must be > 0.")
if max_ambig < 0:
raise ValueError("Max ambig chars must be >= 0.")
if max_primer_mm < 0:
raise ValueError("Max primer mismatches must be >= 0.")
if reverse_primers not in ['disable', 'truncate_only', 'truncate_remove']:
raise ValueError("reverse_primers parameter must be 'disable', " +
"truncate_only, or truncate_remove.")
create_dir(dir_prefix, fail_on_exist=False)
# try:
# stat(dir_prefix)
# except OSError:
# mkdir(dir_prefix)
"""# Generate primer sequence patterns - changing to mapping file primers.
all_primer_seqs, primer_seq_len = \
get_primer_seqs(primer_seq_pats.split(',')) """
# Check mapping file and get barcode mapping
map_file = open(mapping_file, 'U')
headers, id_map, valid_map, warnings, errors, \
primer_seqs_lens, all_primers = check_map(
map_file, disable_primer_check,
barcode_type, added_demultiplex_field)
if reverse_primers != 'disable':
if 'ReversePrimer' not in headers:
raise ValueError('To enable reverse primer check, there must ' +
'be a "ReversePrimer" column in the mapping file with a reverse ' +
'primer in each cell.')
rev_primers = get_reverse_primers(id_map)
else:
rev_primers = False
# *** Generate dictionary of {barcode: DNA(ReversePrimer).rc()}
# First check for ReversePrimer in headers, raise error if not found
# Implement local alignment for primer after barcode is determined.
# Add option to flag seq with error for rev_primer not found
# Check primer hit index, truncate sequence
# unit tests.
map_file.close()
if errors:
raise ValueError("Invalid mapping file. " +
"Validate with validate_mapping_file.py first: %s" % "\n".join(errors))
# Find actual length of barcodes in the mapping file, also check for
# variable lengths, in case of added_demultiplex, split on comma.
barcode_length_check =\
list(set([len(bc.split(',')[0]) for bc in valid_map]))
# Check barcode type
if barcode_type not in BARCODE_TYPES:
try:
barcode_len, barcode_fun = int(barcode_type), correct_barcode
except ValueError:
raise ValueError("Unsupported barcode type: %s" % barcode_type)
else:
barcode_len, barcode_fun = BARCODE_TYPES[barcode_type]
# As people often do not specify a barcode that matches the lengths
# of the barcodes used, a check on the actual barcode lengths needs to
# be done, and an exception raised if they are variable length and not
# specified as so.
if barcode_type != "variable_length":
# Raise error if variable length barcodes are present but not
# specified
if len(barcode_length_check) != 1:
raise ValueError('Mapping file has variable length ' +
'barcodes. If this is intended, specifiy variable lengths ' +
'with the -b variable_length option.')
# Raise error if the specified barcode length doesn't match what
# is present in the mapping file.
if barcode_len != barcode_length_check[0]:
raise ValueError('Barcode length detected in the mapping file, ' +
' %d does not match specified barcode length, %d. ' %
(barcode_length_check[0], barcode_len) + 'To specify a barcode ' +
'length use -b golay_12 or -b hamming_8 for 12 and 8 base pair ' +
'golay or hamming codes respectively, or -b # where # is the ' +
'length of the barcode used. E.g. -b 4 for 4 base pair barcodes.')
fasta_files = map(get_infile, fasta_files)
qual_files = map(get_infile, qual_files)
# Check fasta files valid format, no duplicate ids
# and ids match between fasta and qual files
all_fasta_ids = fasta_ids(fasta_files)
all_qual_ids = fasta_ids(qual_files)
if qual_files and (len(all_fasta_ids) != len(all_qual_ids)):
f_ids = all_fasta_ids.difference(all_qual_ids)
q_ids = all_qual_ids.difference(all_fasta_ids)
raise ValueError(
"Found %d ids in fasta file not in qual file, %d ids in qual file not in fasta" %
(len(f_ids), len(q_ids)))
for f in fasta_files:
f.seek(0)
if qual_files:
for q in qual_files:
q.seek(0)
# Load quality scores
qual_mappings = parse_qual_scores(qual_files)
for q in qual_files:
q.close()
else:
qual_mappings = {}
# make filters
filters = []
# seq len filter depends on whether we're including the barcode, if
# median_length_filtering turned on, no length filtering.
if not median_length_filtering:
if trim_seq_len:
# This processing occurs before primer testing, will use largest
# primer length to calculate lengths. the dict all_primers has
# keys of each primer with the length of said primer as the value
if disable_primer_check:
primer_seq_len = 0
else:
primer_seq_len = max(all_primers.values())
if barcode_type == "variable_length":
barcode_len = max(barcode_length_check)
trim = barcode_len + primer_seq_len
filters.append(SeqQualBad(
'Length outside bounds of %s and %s' % (
min_seq_len,
max_seq_len),
lambda id_, seq, qual:
not (min_seq_len <= len(seq) - trim <= max_seq_len)))
else:
filters.append(SeqQualBad(
'Length outside bounds of %s and %s' % (
min_seq_len,
max_seq_len),
lambda id_, seq, qual: not (min_seq_len <= len(seq) <= max_seq_len)))
if not truncate_ambi_bases:
filters.append(SeqQualBad(
'Num ambiguous bases exceeds limit of %s' % max_ambig,
lambda id_, seq, qual: count_ambig(seq) > max_ambig))
if qual_mappings:
filters.append(QualMissing)
filters.append(SeqQualBad(
'Mean qual score below minimum of %s' % min_qual_score,
lambda id_, seq, qual: mean(qual) < min_qual_score))
"""if qual_score_window:
filters.append(SeqQualBad('Mean window qual score below '+\
'minimum of %s' % min_qual_score,
lambda id_, seq, qual: \
not check_window_qual_scores(qual, qual_score_window, \
min_qual_score))) """
# Changed this to check entire sequence after barcode-could cause issue
# if barcode-linker-primer have long homopolymers though.
filters.append(SeqQualBad(
'Max homopolymer run exceeds limit of %s' % max_homopolymer,
lambda id_, seq, qual: seq_exceeds_homopolymers(
seq[barcode_len:], max_homopolymer)))
# Check seqs and write out
fasta_out = open(dir_prefix + '/' + 'seqs.fna.tmp', 'w+')
if record_qual_scores:
qual_out = open(dir_prefix + '/' + 'seqs_filtered.qual', 'w+')
else:
qual_out = False
'''log_stats, pre_lens, post_lens = check_seqs(fasta_out, fasta_files,
starting_ix, valid_map, qual_mappings, filters, barcode_len,
primer_seq_len, keep_primer, keep_barcode, barcode_type, max_bc_errors,
retain_unassigned_reads) '''
log_stats, raw_lens, pre_lens, post_lens = check_seqs(fasta_out,
fasta_files, starting_ix, valid_map, qual_mappings, filters,
barcode_len, keep_primer, keep_barcode, barcode_type, max_bc_errors,
retain_unassigned_reads, attempt_bc_correction,
primer_seqs_lens, all_primers, max_primer_mm, disable_primer_check,
reverse_primers, rev_primers, qual_out, qual_score_window,
discard_bad_windows, min_qual_score, min_seq_len,
median_length_filtering, added_demultiplex_field,
reverse_primer_mismatches, truncate_ambi_bases)
# Write log file
log_file = open(dir_prefix + '/' + "split_library_log.txt", 'w+')
log_file.write('\n'.join(log_stats))
log_file.close()
# Write sequence distros here
histogram_file = open(dir_prefix + '/' + 'histograms.txt', 'w+')
histogram_file.write(format_histograms
(*make_histograms(raw_lens, pre_lens, post_lens)))
histogram_file.close()
|
welex91/ansible-modules-core
|
refs/heads/devel
|
cloud/rackspace/rax_queue.py
|
157
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_queue
short_description: create / delete a queue in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud queue.
version_added: "1.5"
options:
name:
description:
- Name to give the queue
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Queue
gather_facts: False
hosts: local
connection: local
tasks:
- name: Queue create request
local_action:
module: rax_queue
credentials: ~/.raxpub
name: my-queue
region: DFW
state: present
register: my_queue
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_queue(module, state, name):
for arg in (state, name):
if not arg:
module.fail_json(msg='%s is required for rax_queue' % arg)
changed = False
queues = []
instance = {}
cq = pyrax.queues
if not cq:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
for queue in cq.list():
if name != queue.name:
continue
queues.append(queue)
if len(queues) > 1:
module.fail_json(msg='Multiple Queues were matched by name')
if state == 'present':
if not queues:
try:
queue = cq.create(name)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
queue = queues[0]
instance = dict(name=queue.name)
result = dict(changed=changed, queue=instance)
module.exit_json(**result)
elif state == 'absent':
if queues:
queue = queues[0]
try:
queue.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, queue=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
state = module.params.get('state')
setup_rax_module(module, pyrax)
cloud_queue(module, state, name)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
|
erdinc-me/GmailURLParser
|
refs/heads/master
|
sub/extractors.py
|
1
|
# -*- coding: utf-8 -*-
__project_name__ = 'GmailURLParser'
__author__ = 'Erdinç Yılmaz'
__date__ = '10/22/14'
# import lxml.html
from lxml.html import fromstring
import re
# from bs4 import BeautifulSoup
# #--------------------------------------------------------------------------
# def extract_links_bs4(messageHTML):
#
# # soup = BeautifulSoup(messageHTML, "html.parser")
# # soup = BeautifulSoup(messageHTML, "html5lib")
# soup = BeautifulSoup(messageHTML, "lxml")
#
# links = []
# for a in soup.findAll("a"):
# link = a.get("href")
# if link:
# links.append(link)
#
# return links
#---------------------------------------------------------------------
def extract_links_lxml(messageHTML):
links = []
dom = fromstring(messageHTML)
for link in dom.xpath('//a/@href'): # select the url in href for all a tags(links)
links.append(link)
return links
#---------------------------------------------------------------------
def extract_regex(messageTxt):
links = []
# match_urls = re.compile(r"""((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|(([^\s()<>]+|(([^\s()<>]+)))*))+(?:(([^\s()<>]+|(([^\s()<>]+)))*)|[^\s`!()[]{};:'".,<>?«»“”‘’]))""", re.DOTALL)
# re_match_urls = re.compile(r"""((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|(([^\s()<>]+|(([^\s()<>]+)))*))+(?:(([^\s()<>]+|(([^\s()<>]+)))*)|[^\s`!()[]{};:'".,<>?«»“”‘’]))""", re.DOTALL)
#And e.g. to add <a> tags for links could be done like this: re_match_urls.sub(lambda x: '<a href="%(url)s">%(url)s</a>' % dict(url=str(x.group())), string_to_match)
GRUBER_URLINTEXT_PAT = re.compile(ur'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?\xab\xbb\u201c\u201d\u2018\u2019]))')
grup = GRUBER_URLINTEXT_PAT.findall(messageTxt)
for link in grup:
links.append(link[0])
return links
# for line in messageTxt.split(" "):
# print [mgroups[0] for mgroups in GRUBER_URLINTEXT_PAT.findall(line)]
|
mpasternak/dojango-datable
|
refs/heads/master
|
test_project/test_app/tests.py
|
1
|
#
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.test.client import Client
from datable.testutil import TestPageWithDatableMixin
class TestBooks(TestPageWithDatableMixin, TestCase):
tableName = 'first_table'
urlName = 'books_demo'
def setUp(self):
super(TestBooks, self).setUp()
self.client = Client()
params = [
dict(),
dict(sort='title'),
dict(sort='-title'),
dict(__format__='widget,book_type'),
dict(__format__='widget,book_type', f='sci'),
dict(__format__='widget,authors'),
dict(__format__='widget,authors', f='brown'),
dict(periodic='true'),
dict(approved='true'),
]
class TestAuthors(TestBooks):
tableName = 'first_table'
urlName = 'authors_demo'
params = [
dict(),
dict(start=0, count=25, author='asf'),
dict(sort='last'),
dict(sort='-last'),
dict(sort='first'),
dict(sort='-first'),
dict(sort='authors'),
dict(sort='authors'),
]
class TestInitialValues(TestCase):
def test_initialValues(self):
client = Client()
res = client.get(reverse('authors_demo'))
self.assertContains(res, "first_tableGridFilter['author'] = 'joh';")
|
ivanyu/rosalind
|
refs/heads/master
|
algorithmic_heights/sc/sc.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def main(argv):
from sc_logic import check_semi_connectedness
graphs = []
if len(argv) < 2:
print('k = 2')
k = 2
print('Graph 1:')
print('n = 3')
n = 3
print('m = 2')
m = 2
g = [[0 for i in range(n)] for _ in range(n)]
print('3 2')
g[2][1] = 1
print('2 1')
g[1][0] = 1
graphs.append(g)
print('Graph 2:')
print('n = 3')
n = 3
print('m = 2')
m = 2
g = [[0 for i in range(n)] for _ in range(n)]
print('3 2')
g[2][1] = 1
print('1 2')
g[0][1] = 1
graphs.append(g)
else:
with open(argv[1]) as f:
k = int(f.readline().strip())
for _ in range(k):
f.readline()
line = f.readline()
n, m = [int(x.strip()) for x in line.strip().split()]
g = [[0 for _ in range(n)] for _ in range(n)]
for edge in range(m):
line = f.readline()
i, j = [int(x.strip()) for x in line.strip().split()]
g[i - 1][j - 1] = 1
graphs.append(g)
for g in graphs:
r = check_semi_connectedness(g)
print('1' if r else -1, end=' ')
if __name__ == "__main__":
import sys
main(sys.argv)
|
dati91/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/fetch/api/resources/method.py
|
161
|
def main(request, response):
headers = []
if "cors" in request.GET:
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Credentials", "true"))
headers.append(("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"))
headers.append(("Access-Control-Allow-Headers", "x-test, x-foo"))
headers.append(("Access-Control-Expose-Headers", "x-request-method"))
headers.append(("x-request-method", request.method))
return headers, request.body
|
kindersung/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/fetch/nosniff/resources/worker.py
|
219
|
def main(request, response):
type = request.GET.first("type", None)
content = "// nothing to see here"
content += "\n"
content += "this.postMessage('hi')"
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("x-content-type-options", "nosniff")
response.writer.write_header("content-length", len(content))
if(type != None):
response.writer.write_header("content-type", type)
response.writer.end_headers()
response.writer.write(content)
|
PXke/invenio
|
refs/heads/dev-pu
|
invenio/celery/registry.py
|
4
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from flask.ext.registry import ModuleAutoDiscoveryRegistry, RegistryProxy
tasks = RegistryProxy('tasks', ModuleAutoDiscoveryRegistry, 'tasks')
|
davgibbs/django
|
refs/heads/master
|
tests/validation/test_validators.py
|
320
|
from __future__ import unicode_literals
from . import ValidationTestCase
from .models import ModelToValidate
class TestModelsWithValidators(ValidationTestCase):
def test_custom_validator_passes_for_correct_value(self):
mtv = ModelToValidate(number=10, name='Some Name', f_with_custom_validator=42)
self.assertIsNone(mtv.full_clean())
def test_custom_validator_raises_error_for_incorrect_value(self):
mtv = ModelToValidate(number=10, name='Some Name', f_with_custom_validator=12)
self.assertFailsValidation(mtv.full_clean, ['f_with_custom_validator'])
self.assertFieldFailsValidationWithMessage(
mtv.full_clean,
'f_with_custom_validator',
['This is not the answer to life, universe and everything!']
)
|
uppsaladatavetare/foobar-api
|
refs/heads/develop
|
src/foobar/migrations/0020_auto_20170302_1359.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-02 13:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foobar', '0019_auto_20170221_1547'),
]
operations = [
migrations.AlterField(
model_name='account',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
]
|
FabianKnapp/nexmon
|
refs/heads/master
|
buildtools/gcc-arm-none-eabi-5_4-2016q2-linux-x86/arm-none-eabi/lib/armv7e-m/softfp/libstdc++.a-gdb.py
|
6
|
# -*- python -*-
# Copyright (C) 2009-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/home/build/work/GCC-5-0-build/install-native/share/gcc-arm-none-eabi'
libdir = '/home/build/work/GCC-5-0-build/install-native/arm-none-eabi/lib/armv7e-m/softfp'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
|
Validus-Kernel/kernel_oneplus2
|
refs/heads/cm-13.0
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
|
4653
|
# EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
|
wuhengzhi/chromium-crosswalk
|
refs/heads/master
|
third_party/WebKit/Source/devtools/scripts/generate_protocol_externs.py
|
15
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
type_traits = {
"any": "*",
"string": "string",
"integer": "number",
"number": "number",
"boolean": "boolean",
"array": "!Array.<*>",
"object": "!Object",
}
promisified_domains = {
"Accessibility",
"Animation",
"CSS",
"Emulation",
"Profiler"
}
ref_types = {}
def full_qualified_type_id(domain_name, type_id):
if type_id.find(".") == -1:
return "%s.%s" % (domain_name, type_id)
return type_id
def fix_camel_case(name):
refined = re.sub(r'-(\w)', lambda pat: pat.group(1).upper(), name)
refined = to_title_case(refined)
return re.sub(r'(?i)HTML|XML|WML|API', lambda pat: pat.group(0).upper(), refined)
def to_title_case(name):
return name[:1].upper() + name[1:]
def generate_enum(name, json):
enum_members = []
for member in json["enum"]:
enum_members.append(" %s: \"%s\"" % (fix_camel_case(member), member))
return "\n/** @enum {string} */\n%s = {\n%s\n};\n" % (name, (",\n".join(enum_members)))
def param_type(domain_name, param):
if "type" in param:
if param["type"] == "array":
items = param["items"]
return "!Array.<%s>" % param_type(domain_name, items)
else:
return type_traits[param["type"]]
if "$ref" in param:
type_id = full_qualified_type_id(domain_name, param["$ref"])
if type_id in ref_types:
return ref_types[type_id]
else:
print "Type not found: " + type_id
return "!! Type not found: " + type_id
def generate_protocol_externs(output_path, input_path):
input_file = open(input_path, "r")
json_string = input_file.read()
json_string = json_string.replace(": true", ": True")
json_string = json_string.replace(": false", ": False")
json_api = eval(json_string)["domains"]
output_file = open(output_path, "w")
output_file.write(
"""
var InspectorBackend = {}
var Protocol = {};
/** @typedef {string}*/
Protocol.Error;
""")
for domain in json_api:
domain_name = domain["domain"]
if "types" in domain:
for type in domain["types"]:
type_id = full_qualified_type_id(domain_name, type["id"])
ref_types[type_id] = "%sAgent.%s" % (domain_name, type["id"])
for domain in json_api:
domain_name = domain["domain"]
promisified = domain_name in promisified_domains
output_file.write("\n\n/**\n * @constructor\n*/\n")
output_file.write("Protocol.%sAgent = function(){};\n" % domain_name)
if "commands" in domain:
for command in domain["commands"]:
output_file.write("\n/**\n")
params = []
has_return_value = "returns" in command
explicit_parameters = promisified and has_return_value
if ("parameters" in command):
for in_param in command["parameters"]:
# All parameters are not optional in case of promisified domain with return value.
if (not explicit_parameters and "optional" in in_param):
params.append("opt_%s" % in_param["name"])
output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, in_param), in_param["name"]))
else:
params.append(in_param["name"])
output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, in_param), in_param["name"]))
returns = []
returns.append("?Protocol.Error")
if ("error" in command):
returns.append("%s=" % param_type(domain_name, command["error"]))
if (has_return_value):
for out_param in command["returns"]:
if ("optional" in out_param):
returns.append("%s=" % param_type(domain_name, out_param))
else:
returns.append("%s" % param_type(domain_name, out_param))
callback_return_type = "void="
if explicit_parameters:
callback_return_type = "T"
elif promisified:
callback_return_type = "T="
output_file.write(" * @param {function(%s):%s} opt_callback\n" % (", ".join(returns), callback_return_type))
if (promisified):
output_file.write(" * @return {!Promise.<T>}\n")
output_file.write(" * @template T\n")
params.append("opt_callback")
output_file.write(" */\n")
output_file.write("Protocol.%sAgent.prototype.%s = function(%s) {}\n" % (domain_name, command["name"], ", ".join(params)))
output_file.write("/** @param {function(%s):void=} opt_callback */\n" % ", ".join(returns))
output_file.write("Protocol.%sAgent.prototype.invoke_%s = function(obj, opt_callback) {}\n" % (domain_name, command["name"]))
output_file.write("\n\n\nvar %sAgent = function(){};\n" % domain_name)
if "types" in domain:
for type in domain["types"]:
if type["type"] == "object":
typedef_args = []
if "properties" in type:
for property in type["properties"]:
suffix = ""
if ("optional" in property):
suffix = "|undefined"
if "enum" in property:
enum_name = "%sAgent.%s%s" % (domain_name, type["id"], to_title_case(property["name"]))
output_file.write(generate_enum(enum_name, property))
typedef_args.append("%s:(%s%s)" % (property["name"], enum_name, suffix))
else:
typedef_args.append("%s:(%s%s)" % (property["name"], param_type(domain_name, property), suffix))
if (typedef_args):
output_file.write("\n/** @typedef {!{%s}} */\n%sAgent.%s;\n" % (", ".join(typedef_args), domain_name, type["id"]))
else:
output_file.write("\n/** @typedef {!Object} */\n%sAgent.%s;\n" % (domain_name, type["id"]))
elif type["type"] == "string" and "enum" in type:
output_file.write(generate_enum("%sAgent.%s" % (domain_name, type["id"]), type))
elif type["type"] == "array":
output_file.write("\n/** @typedef {!Array.<!%s>} */\n%sAgent.%s;\n" % (param_type(domain_name, type["items"]), domain_name, type["id"]))
else:
output_file.write("\n/** @typedef {%s} */\n%sAgent.%s;\n" % (type_traits[type["type"]], domain_name, type["id"]))
output_file.write("/** @interface */\n")
output_file.write("%sAgent.Dispatcher = function() {};\n" % domain_name)
if "events" in domain:
for event in domain["events"]:
params = []
if ("parameters" in event):
output_file.write("/**\n")
for param in event["parameters"]:
if ("optional" in param):
params.append("opt_%s" % param["name"])
output_file.write(" * @param {%s=} opt_%s\n" % (param_type(domain_name, param), param["name"]))
else:
params.append(param["name"])
output_file.write(" * @param {%s} %s\n" % (param_type(domain_name, param), param["name"]))
output_file.write(" */\n")
output_file.write("%sAgent.Dispatcher.prototype.%s = function(%s) {};\n" % (domain_name, event["name"], ", ".join(params)))
output_file.write("\n/** @constructor\n * @param {!Object.<string, !Object>} agentsMap\n */\n")
output_file.write("Protocol.Agents = function(agentsMap){this._agentsMap;};\n")
output_file.write("/**\n * @param {string} domain\n * @param {!Object} dispatcher\n */\n")
output_file.write("Protocol.Agents.prototype.registerDispatcher = function(domain, dispatcher){};\n")
for domain in json_api:
domain_name = domain["domain"]
uppercase_length = 0
while uppercase_length < len(domain_name) and domain_name[uppercase_length].isupper():
uppercase_length += 1
output_file.write("/** @return {!Protocol.%sAgent}*/\n" % domain_name)
output_file.write("Protocol.Agents.prototype.%s = function(){};\n" % (domain_name[:uppercase_length].lower() + domain_name[uppercase_length:] + "Agent"))
output_file.write("/**\n * @param {!%sAgent.Dispatcher} dispatcher\n */\n" % domain_name)
output_file.write("Protocol.Agents.prototype.register%sDispatcher = function(dispatcher) {}\n" % domain_name)
output_file.close()
if __name__ == "__main__":
import sys
import os.path
program_name = os.path.basename(__file__)
if len(sys.argv) < 4 or sys.argv[1] != "-o":
sys.stderr.write("Usage: %s -o OUTPUT_FILE INPUT_FILE\n" % program_name)
exit(1)
output_path = sys.argv[2]
input_path = sys.argv[3]
generate_protocol_externs(output_path, input_path)
|
fmaschler/networkit
|
refs/heads/SCD-weighted
|
networkit/algebraic.py
|
2
|
""" This module deals with the conversion of graphs into matrices and linear algebra operations on graphs """
__author__ = "Christian Staudt"
# local imports
# external imports
try:
import scipy.sparse
except ImportError:
print("module 'scipy' not available -- some functionality will be restricted")
try:
import numpy as np
except ImportError:
print("module 'numpy' not available -- some functionality will be restricted")
def column(matrix, i):
return [row[i] for row in matrix]
def adjacencyMatrix(G, matrixType="sparse"):
""" Get the adjacency matrix of the graph `G`.
Parameters
----------
G : Graph
The graph.
matrixType : string
represent"sparse" or "dense"
Returns
-------
:py:class:`scipy.sparse.csr_matrix`
The adjacency matrix of the graph.
"""
n = G.upperNodeIdBound()
if matrixType is "sparse":
A = scipy.sparse.lil_matrix((n,n))
elif matrixType is "dense":
A = np.zeros(shape=(n,n))
else:
raise InputError("unknown matrix type: '{0}'".format(matrixType))
# TODO: replace .edges() with efficient iterations
if G.isWeighted():
if G.isDirected():
def processEdge(u,v,w,id):
A[u, v] = w
else:
def processEdge(u,v,w,id):
A[u, v] = w
A[v, u] = w
else:
if G.isDirected():
def processEdge(u,v,w,id):
A[u, v] = 1
else:
def processEdge(u,v,w,id):
A[u, v] = 1
A[v, u] = 1
G.forEdges(processEdge)
if matrixType is "sparse":
A = A.tocsr() # convert to CSR for more efficient arithmetic operations
return A
def laplacianMatrix(G):
""" Get the laplacian matrix of the graph `G`.
Parameters
----------
G : Graph
The graph.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
"""
A = adjacencyMatrix(G)
return scipy.sparse.csgraph.laplacian(A)
def PageRankMatrix(G, damp=0.85):
"""
Builds the PageRank matrix of the undirected Graph `G`. This matrix corresponds with the
PageRank matrix used in the C++ backend.
Parameters
----------
G : Graph
The graph.
damp:
Damping factor of the PageRank algorithm (0.85 by default)
Returns
-------
pr : ndarray
The N x N page rank matrix of graph.
"""
A = adjacencyMatrix(G)
n = G.numberOfNodes()
stochastify = scipy.sparse.lil_matrix((n,n))
for v in G.nodes():
neighbors = G.degree(v)
if neighbors == 0:
stochastify[v,v] = 0.0 # TODO: check correctness
else:
stochastify[v,v] = 1.0 / neighbors
stochastify = stochastify.tocsr()
stochastic = A * stochastify
dampened = stochastic * damp
teleport = scipy.sparse.identity(G.numberOfNodes(), format="csr") * ((1 - damp) / G.numberOfNodes())
return dampened + teleport
def symmetricEigenvectors(matrix, cutoff=-1, reverse=False):
"""
Computes eigenvectors and -values of symmetric matrices.
Parameters
----------
matrix : sparse matrix
The matrix to compute the eigenvectors of
cutoff : int
The maximum (or minimum) magnitude of the eigenvectors needed
reverse : boolean
If set to true, the smaller eigenvalues will be computed before the larger ones
Returns
-------
pr : ( [ float ], [ ndarray ] )
A tuple of ordered lists, the first containing the eigenvalues in descending (ascending) magnitude, the
second one holding the corresponding eigenvectors
"""
if cutoff == -1:
cutoff = matrix.shape[0] - 2
if reverse:
mode = "SA"
else:
mode = "LA"
w, v = scipy.sparse.linalg.eigsh(matrix, cutoff + 1, which=mode)
orderlist = zip(w, range(0, len(w)))
orderlist = sorted(orderlist)
orderedW = column(orderlist, 0)
orderedV = [v[:,i] for i in column(orderlist, 1)]
return (orderedW, orderedV)
def eigenvectors(matrix, cutoff=-1, reverse=False):
"""
Computes eigenvectors and -values of matrices.
Parameters
----------
matrix : sparse matrix
The matrix to compute the eigenvectors of
cutoff : int
The maximum (or minimum) magnitude of the eigenvectors needed
reverse : boolean
If set to true, the smaller eigenvalues will be computed before the larger ones
Returns
-------
pr : ( [ float ], [ ndarray ] )
A tuple of ordered lists, the first containing the eigenvalues in descending (ascending) magnitude, the
second one holding the corresponding eigenvectors
"""
if cutoff == -1:
cutoff = matrix.shape[0] - 2
if reverse:
mode = "SR"
else:
mode = "LR"
w, v = scipy.sparse.linalg.eigs(matrix, cutoff + 1, which=mode)
orderlist = zip(w, range(0, len(w)))
orderlist = sorted(orderlist)
orderedW = column(orderlist, 0)
orderedV = [v[:,i] for i in column(orderlist, 1)]
return (orderedW, orderedV)
def laplacianEigenvectors(G, cutoff=-1, reverse=False):
if G.isDirected():
return eigenvectors(laplacianMatrix(G), cutoff=cutoff, reverse=reverse)
else:
return symmetricEigenvectors(laplacianMatrix(G), cutoff=cutoff, reverse=reverse)
def adjacencyEigenvectors(G, cutoff=-1, reverse=False):
if G.isDirected():
return eigenvectors(adjacencyMatrix(G), cutoff=cutoff, reverse=reverse)
else:
return symmetricEigenvectors(adjacencyMatrix(G), cutoff=cutoff, reverse=reverse)
def laplacianEigenvector(G, order, reverse=False):
if G.isDirected():
spectrum = eigenvectors(laplacianMatrix(G), cutoff=order, reverse=reverse)
else:
spectrum = symmetricEigenvectors(laplacianMatrix(G), cutoff=order, reverse=reverse)
return (spectrum[0][order], spectrum[1][order])
def adjacencyEigenvector(G, order, reverse=False):
if G.isDirected():
spectrum = eigenvectors(adjacencyMatrix(G), cutoff=order, reverse=reverse)
else:
spectrum = symmetricEigenvectors(adjacencyMatrix(G), cutoff=order, reverse=reverse)
return (spectrum[0][order], spectrum[1][order])
|
jgeskens/django
|
refs/heads/master
|
django/contrib/gis/admin/__init__.py
|
637
|
# Getting the normal admin routines, classes, and `site` instance.
from django.contrib.admin import autodiscover, site, AdminSite, ModelAdmin, StackedInline, TabularInline, HORIZONTAL, VERTICAL
# Geographic admin options classes and widgets.
from django.contrib.gis.admin.options import GeoModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
try:
from django.contrib.gis.admin.options import OSMGeoAdmin
HAS_OSM = True
except ImportError:
HAS_OSM = False
|
t794104/ansible
|
refs/heads/devel
|
lib/ansible/modules/identity/ipa/ipa_sudocmd.py
|
74
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_sudocmd
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA sudo command
description:
- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
options:
sudocmd:
description:
- Sudo Command.
aliases: ['name']
required: true
description:
description:
- A description of this command.
state:
description: State to ensure
default: present
choices: ['present', 'absent', 'enabled', 'disabled']
extends_documentation_fragment: ipa.documentation
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure sudo command exists
- ipa_sudocmd:
name: su
description: Allow to run su via sudo
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure sudo command does not exist
- ipa_sudocmd:
name: su
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
sudocmd:
description: Sudo command as return from IPA API
returned: always
type: dict
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class SudoCmdIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(SudoCmdIPAClient, self).__init__(module, host, port, protocol)
def sudocmd_find(self, name):
return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name})
def sudocmd_add(self, name, item):
return self._post_json(method='sudocmd_add', name=name, item=item)
def sudocmd_mod(self, name, item):
return self._post_json(method='sudocmd_mod', name=name, item=item)
def sudocmd_del(self, name):
return self._post_json(method='sudocmd_del', name=name)
def get_sudocmd_dict(description=None):
data = {}
if description is not None:
data['description'] = description
return data
def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd):
return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd)
def ensure(module, client):
name = module.params['sudocmd']
state = module.params['state']
module_sudocmd = get_sudocmd_dict(description=module.params['description'])
ipa_sudocmd = client.sudocmd_find(name=name)
changed = False
if state == 'present':
if not ipa_sudocmd:
changed = True
if not module.check_mode:
client.sudocmd_add(name=name, item=module_sudocmd)
else:
diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_sudocmd.get(key)
client.sudocmd_mod(name=name, item=data)
else:
if ipa_sudocmd:
changed = True
if not module.check_mode:
client.sudocmd_del(name=name)
return changed, client.sudocmd_find(name=name)
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(description=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
sudocmd=dict(type='str', required=True, aliases=['name']))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
client = SudoCmdIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, sudocmd = ensure(module, client)
module.exit_json(changed=changed, sudocmd=sudocmd)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
josepedro/acidentes_em_rodovias_refatoracao
|
refs/heads/master
|
acidentes_em_rodovias/manage.py
|
1
|
#!/usr/bin/env python
import os
import sys
"""
./menage.py makemessages --all
After setting LOCALE_PATHS, in settings.py, this command
generates one django.po for each language in the locale path.
"""
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "acidentes_em_rodovias.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
fusetools/Fuse.SublimePlugin
|
refs/heads/master
|
building.py
|
1
|
import subprocess
import threading
import os
import sublime
from .settings import getFusePathFromSettings, getSetting
from .log import log
class BuildManager:
def __init__(self, fuseNotFoundHandler):
self.builds = {}
self.fuseNotFoundHandler = fuseNotFoundHandler
self.previousBuildCommand = None
self._closeOldOutputViews()
def preview(self, target, path):
fusePath = getFusePathFromSettings()
start_preview = [fusePath, "preview", "--target=" + target, "--name=Sublime_Text_3", path]
name = target.capitalize() + " Preview"
self._start(target, start_preview, name, None)
def build(self, target, run, working_dir, error_handler):
platform = str(sublime.platform())
if self._isUnsupported(platform, target):
error_handler(target + " builds are not available on " + platform + ".")
return
name = target.capitalize() + " Build"
cmd = self._tryCreateBuildCommand(target, run)
if not cmd:
error_handler("No Fuse build target set.\n\nGo to Tools/Build With... to choose one.\n\nFuture attempts to build will use that.")
return
self.previousBuildCommand = cmd
self._start(target, cmd, name, working_dir)
def _tryCreateBuildCommand(self, target, run):
if target != "Default":
return [getFusePathFromSettings(), "build", "-t=" + target, "-c=Release"] + (["-r"] if run else [])
if self.previousBuildCommand:
return self.previousBuildCommand
return None
def _start(self, target, cmd, name, working_dir):
if name in self.builds:
self.builds[name].stop()
build = BuildInstance(cmd, name, working_dir, self.fuseNotFoundHandler)
self.builds[name] = build
build.start()
def _isUnsupported(self, platform, target):
unsupported = {
"windows" : [ "ios", "cmake"],
"osx" : ["dotnet", "msvc"]
}
return platform.lower() in unsupported and target.lower() in unsupported[platform]
def _closeOldOutputViews(self):
for window in sublime.windows():
for view in window.views():
if view.settings().has("is_fuse_output_view"):
view.close()
class BuildInstance(threading.Thread):
def __init__(self, cmd, title, working_dir, fuseNotFoundHandler):
threading.Thread.__init__(self)
self.cmd = cmd
self.daemon = True
self.output = OutputView(title) if getSetting("fuse_show_build_results") else NullOutputView()
self.fuseNotFoundHandler = fuseNotFoundHandler
self.process = None
self.working_dir = working_dir
def run(self):
log().info("Opening subprocess %s", str(self.cmd))
try:
creationflags = 0x08000000 if os.name == "nt" else 0
self.process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=creationflags, cwd=self.working_dir)
except:
self.fuseNotFoundHandler()
self.output.close()
return
for line in iter(self.process.stdout.readline,b''):
self.output.append(line.decode("utf-8").replace('\r',''))
self.process.wait()
def stop(self):
if self.process:
try:
self.process.kill()
except ProcessLookupError:
pass #It died by itself, which is fine
self.output.close()
class OutputView:
def __init__(self, title):
self.title = title
window = sublime.active_window()
self.view = window.new_file()
self.view.set_scratch(True)
self.view.set_name(title)
self.view.settings().set("is_fuse_output_view", True)
def append(self, line):
self.view.run_command("append", {"characters": line})
def close(self):
try:
window = self.view.window()
groupIndex, viewIndex = window.get_view_index(self.view)
window.run_command("close_by_index", { "group": groupIndex, "index": viewIndex })
except:
pass #Failing to close a tab is not critical
class NullOutputView:
def append(self, line):
pass
def close(self):
pass
|
Imaginashion/cloud-vision
|
refs/heads/master
|
.fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/site-packages/requests/packages/chardet/constants.py
|
3007
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
piMoll/SEILAPLAN
|
refs/heads/master
|
lib/reportlab/pdfbase/pdfutils.py
|
1
|
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://hg.reportlab.com/hg-public/reportlab/log/tip/src/reportlab/pdfbase/pdfutils.py
__version__='3.3.0'
__doc__=''
# pdfutils.py - everything to do with images, streams,
# compression, and some constants
import sys
import os
import binascii
from reportlab import rl_config
from reportlab.lib.utils import getBytesIO, ImageReader, isUnicode, isPy3
from reportlab.lib.rl_accel import asciiBase85Encode, asciiBase85Decode
def _chunker(src,dst=[],chunkSize=60):
for i in range(0,len(src),chunkSize):
dst.append(src[i:i+chunkSize])
return dst
##########################################################
#
# Image compression helpers. Preprocessing a directory
# of images will offer a vast speedup.
#
##########################################################
_mode2cs = {'RGB':'RGB', 'CMYK': 'CMYK', 'L': 'G'}
_mode2bpp = {'RGB': 3, 'CMYK':4, 'L':1}
def makeA85Image(filename,IMG=None, detectJpeg=False):
import zlib
img = ImageReader(filename)
if IMG is not None:
IMG.append(img)
if detectJpeg and img.jpeg_fh():
return None
imgwidth, imgheight = img.getSize()
raw = img.getRGBData()
code = []
append = code.append
# this describes what is in the image itself
append('BI')
append('/W %s /H %s /BPC 8 /CS /%s /F [/A85 /Fl]' % (imgwidth, imgheight,_mode2cs[img.mode]))
append('ID')
#use a flate filter and Ascii Base 85
assert len(raw) == imgwidth * imgheight*_mode2bpp[img.mode], "Wrong amount of data for image"
compressed = zlib.compress(raw) #this bit is very fast...
encoded = asciiBase85Encode(compressed) #...sadly this may not be
#append in blocks of 60 characters
_chunker(encoded,code)
append('EI')
return code
def makeRawImage(filename,IMG=None,detectJpeg=False):
import zlib
img = ImageReader(filename)
if IMG is not None:
IMG.append(img)
if detectJpeg and img.jpeg_fh():
return None
imgwidth, imgheight = img.getSize()
raw = img.getRGBData()
code = []
append = code.append
# this describes what is in the image itself
append('BI')
append('/W %s /H %s /BPC 8 /CS /%s /F [/Fl]' % (imgwidth, imgheight,_mode2cs[img.mode]))
append('ID')
#use a flate filter
assert len(raw) == imgwidth * imgheight*_mode2bpp[img.mode], "Wrong amount of data for image"
compressed = zlib.compress(raw) #this bit is very fast...
#append in blocks of 60 characters
_chunker(compressed,code)
append('EI')
return code
def cacheImageFile(filename, returnInMemory=0, IMG=None):
"Processes image as if for encoding, saves to a file with .a85 extension."
cachedname = os.path.splitext(filename)[0] + (rl_config.useA85 and '.a85' or '.bin')
if filename==cachedname:
if cachedImageExists(filename):
from reportlab.lib.utils import open_for_read
if returnInMemory: return filter(None,open_for_read(cachedname).read().split('\r\n'))
else:
raise IOError('No such cached image %s' % filename)
else:
if rl_config.useA85:
code = makeA85Image(filename,IMG)
else:
code = makeRawImage(filename,IMG)
if returnInMemory: return code
#save it to a file
f = open(cachedname,'wb')
f.write('\r\n'.join(code)+'\r\n')
f.close()
if rl_config.verbose:
print('cached image as %s' % cachedname)
def preProcessImages(spec):
"""Preprocesses one or more image files.
Accepts either a filespec ('C:\\mydir\\*.jpg') or a list
of image filenames, crunches them all to save time. Run this
to save huge amounts of time when repeatedly building image
documents."""
import glob
if isinstance(spec,str):
filelist = glob.glob(spec)
else: #list or tuple OK
filelist = spec
for filename in filelist:
if cachedImageExists(filename):
if rl_config.verbose:
print('cached version of %s already exists' % filename)
else:
cacheImageFile(filename)
def cachedImageExists(filename):
"""Determines if a cached image already exists for a given file.
Determines if a cached image exists which has the same name
and equal or newer date to the given file."""
cachedname = os.path.splitext(filename)[0] + (rl_config.useA85 and '.a85' or 'bin')
if os.path.isfile(cachedname):
#see if it is newer
original_date = os.stat(filename)[8]
cached_date = os.stat(cachedname)[8]
if original_date > cached_date:
return 0
else:
return 1
else:
return 0
##############################################################
#
# PDF Helper functions
#
##############################################################
def _normalizeLineEnds(text,desired='\r\n',unlikely='\x00\x01\x02\x03'):
"""Normalizes different line end character(s).
Ensures all instances of CR, LF and CRLF end up as
the specified one."""
return (text
.replace('\r\n', unlikely)
.replace('\r', unlikely)
.replace('\n', unlikely)
.replace(unlikely, desired))
def _AsciiHexEncode(input):
"""Encodes input using ASCII-Hex coding.
This is a verbose encoding used for binary data within
a PDF file. One byte binary becomes two bytes of ASCII.
Helper function used by images."""
if isUnicode(input):
input = input.encode('utf-8')
output = getBytesIO()
output.write(binascii.b2a_hex(input))
output.write(b'>')
return output.getvalue()
def _AsciiHexDecode(input):
"""Decodes input using ASCII-Hex coding.
Not used except to provide a test of the inverse function."""
#strip out all whitespace
if not isUnicode(input):
input = input.decode('utf-8')
stripped = ''.join(input.split())
assert stripped[-1] == '>', 'Invalid terminator for Ascii Hex Stream'
stripped = stripped[:-1] #chop off terminator
assert len(stripped) % 2 == 0, 'Ascii Hex stream has odd number of bytes'
return ''.join([chr(int(stripped[i:i+2],16)) for i in range(0,len(stripped),2)])
def _wrap(input, columns=60):
"Wraps input at a given column size by inserting \r\n characters."
output = []
length = len(input)
i = 0
pos = columns * i
while pos < length:
output.append(input[pos:pos+columns])
i = i + 1
pos = columns * i
#avoid HP printer problem
if len(output[-1])==1:
output[-2:] = [output[-2][:-1],output[-2][-1]+output[-1]]
return '\r\n'.join(output)
#########################################################################
#
# JPEG processing code - contributed by Eric Johnson
#
#########################################################################
# Read data from the JPEG file. We should probably be using PIL to
# get this information for us -- but this way is more fun!
# Returns (width, height, color components) as a triple
# This is based on Thomas Merz's code from GhostScript (viewjpeg.ps)
def readJPEGInfo(image):
"Read width, height and number of components from open JPEG file."
import struct
from reportlab.pdfbase.pdfdoc import PDFError
#Acceptable JPEG Markers:
# SROF0=baseline, SOF1=extended sequential or SOF2=progressive
validMarkers = [0xC0, 0xC1, 0xC2]
#JPEG markers without additional parameters
noParamMarkers = \
[ 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0x01 ]
#Unsupported JPEG Markers
unsupportedMarkers = \
[ 0xC3, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCD, 0xCE, 0xCF ]
#read JPEG marker segments until we find SOFn marker or EOF
dpi = (72,72)
done = 0
while not done:
x = struct.unpack('B', image.read(1))
if x[0] == 0xFF: #found marker
x = struct.unpack('B', image.read(1))
#print('marker=%2x' % x[0])
if x[0] in validMarkers:
image.seek(2, 1) #skip segment length
x = struct.unpack('B', image.read(1)) #data precision
if x[0] != 8:
raise PDFError('JPEG must have 8 bits per component')
y = struct.unpack('BB', image.read(2))
height = (y[0] << 8) + y[1]
y = struct.unpack('BB', image.read(2))
width = (y[0] << 8) + y[1]
y = struct.unpack('B', image.read(1))
color = y[0]
return width, height, color, dpi
elif x[0]==0xE0:
x = struct.unpack('BB', image.read(2))
n = (x[0] << 8) + x[1] - 2
x = image.read(n)
y = struct.unpack('BB', x[10:12])
x = struct.unpack('BB', x[8:10])
dpi = ((x[0]<<8) + x[1],(y[0]<<8)+y[1])
elif x[0] in unsupportedMarkers:
raise PDFError('JPEG Unsupported JPEG marker: %0.2x' % x[0])
elif x[0] not in noParamMarkers:
#skip segments with parameters
#read length and skip the data
x = struct.unpack('BB', image.read(2))
image.seek( (x[0] << 8) + x[1] - 2, 1)
class _fusc:
def __init__(self,k, n):
assert k, 'Argument k should be a non empty string'
self._k = k
self._klen = len(k)
self._n = int(n) or 7
def encrypt(self,s):
return self.__rotate(asciiBase85Encode(''.join(map(chr,self.__fusc(list(map(ord,s)))))),self._n)
def decrypt(self,s):
return ''.join(map(chr,self.__fusc(list(map(ord,asciiBase85Decode(self.__rotate(s,-self._n)))))))
def __rotate(self,s,n):
l = len(s)
if n<0: n = l+n
n %= l
if not n: return s
return s[-n:]+s[:l-n]
def __fusc(self,s):
slen = len(s)
return list(map(lambda x,y: x ^ y,s,list(map(ord,((int(slen/self._klen)+1)*self._k)[:slen]))))
|
Yas3r/OWASP-ZSC
|
refs/heads/master
|
lib/generator/linux_arm/exc.py
|
20
|
#!/usr/bin/env python
'''
OWASP ZSC | ZCR Shellcoder
ZeroDay Cyber Research
Z3r0D4y.Com
Ali Razmjoo
'''
def run(filename):
return 'N'
|
liamgh/liamgreenhughes-sl4a-tf101
|
refs/heads/master
|
python/src/Demo/sockets/broadcast.py
|
47
|
# Send UDP broadcast packets
MYPORT = 50000
import sys, time
from socket import *
s = socket(AF_INET, SOCK_DGRAM)
s.bind(('', 0))
s.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
while 1:
data = repr(time.time()) + '\n'
s.sendto(data, ('<broadcast>', MYPORT))
time.sleep(2)
|
arahuja/scikit-learn
|
refs/heads/master
|
sklearn/neural_network/tests/test_rbm.py
|
142
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
kadubarbosa/hydra1
|
refs/heads/master
|
mcmc_flush.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 17:39:22 2013
@author: cbarbosa
Delete models of MCMC
"""
import os
import shutil
from config import *
if __name__ == "__main__":
os.chdir(os.path.join(home, "single2"))
folders = [x for x in os.listdir(".") if os.path.isdir(x)]
for folder in folders:
if folder in ["figs", "logs", "mc_logs"]:
continue
else:
shutil.rmtree(folder)
|
hynnet/openwrt-mt7620
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_asynchat.py
|
94
|
# test asynchat
import asyncore, asynchat, socket, time
import unittest
import sys
from test import test_support
try:
import threading
except ImportError:
threading = None
HOST = test_support.HOST
SERVER_QUIT = 'QUIT\n'
if threading:
class echo_server(threading.Thread):
# parameter to determine the number of bytes passed back to the
# client each send
chunk_size = 1
def __init__(self, event):
threading.Thread.__init__(self)
self.event = event
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.sock)
# This will be set if the client wants us to wait before echoing data
# back.
self.start_resend_event = None
def run(self):
self.sock.listen(1)
self.event.set()
conn, client = self.sock.accept()
self.buffer = ""
# collect data until quit message is seen
while SERVER_QUIT not in self.buffer:
data = conn.recv(1)
if not data:
break
self.buffer = self.buffer + data
# remove the SERVER_QUIT message
self.buffer = self.buffer.replace(SERVER_QUIT, '')
if self.start_resend_event:
self.start_resend_event.wait()
# re-send entire set of collected data
try:
# this may fail on some tests, such as test_close_when_done, since
# the client closes the channel when it's done sending
while self.buffer:
n = conn.send(self.buffer[:self.chunk_size])
time.sleep(0.001)
self.buffer = self.buffer[n:]
except:
pass
conn.close()
self.sock.close()
class echo_client(asynchat.async_chat):
def __init__(self, terminator, server_port):
asynchat.async_chat.__init__(self)
self.contents = []
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((HOST, server_port))
self.set_terminator(terminator)
self.buffer = ''
def handle_connect(self):
pass
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
def collect_incoming_data(self, data):
self.buffer += data
def found_terminator(self):
self.contents.append(self.buffer)
self.buffer = ""
def start_echo_server():
event = threading.Event()
s = echo_server(event)
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
return s, event
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestAsynchat(unittest.TestCase):
usepoll = False
def setUp (self):
self._threads = test_support.threading_setup()
def tearDown (self):
test_support.threading_cleanup(*self._threads)
def line_terminator_check(self, term, server_chunk):
event = threading.Event()
s = echo_server(event)
s.chunk_size = server_chunk
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
c = echo_client(term, s.port)
c.push("hello ")
c.push("world%s" % term)
c.push("I'm not dead yet!%s" % term)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, ["hello world", "I'm not dead yet!"])
# the line terminator tests below check receiving variously-sized
# chunks back from the server in order to exercise all branches of
# async_chat.handle_read
def test_line_terminator1(self):
# test one-character terminator
for l in (1,2,3):
self.line_terminator_check('\n', l)
def test_line_terminator2(self):
# test two-character terminator
for l in (1,2,3):
self.line_terminator_check('\r\n', l)
def test_line_terminator3(self):
# test three-character terminator
for l in (1,2,3):
self.line_terminator_check('qqq', l)
def numeric_terminator_check(self, termlen):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(termlen, s.port)
data = "hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [data[:termlen]])
def test_numeric_terminator1(self):
# check that ints & longs both work (since type is
# explicitly checked in async_chat.handle_read)
self.numeric_terminator_check(1)
self.numeric_terminator_check(1L)
def test_numeric_terminator2(self):
self.numeric_terminator_check(6L)
def test_none_terminator(self):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(None, s.port)
data = "hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, [])
self.assertEqual(c.buffer, data)
def test_simple_producer(self):
s, event = start_echo_server()
c = echo_client('\n', s.port)
data = "hello world\nI'm not dead yet!\n"
p = asynchat.simple_producer(data+SERVER_QUIT, buffer_size=8)
c.push_with_producer(p)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, ["hello world", "I'm not dead yet!"])
def test_string_producer(self):
s, event = start_echo_server()
c = echo_client('\n', s.port)
data = "hello world\nI'm not dead yet!\n"
c.push_with_producer(data+SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, ["hello world", "I'm not dead yet!"])
def test_empty_line(self):
# checks that empty lines are handled correctly
s, event = start_echo_server()
c = echo_client('\n', s.port)
c.push("hello world\n\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join()
self.assertEqual(c.contents, ["hello world", "", "I'm not dead yet!"])
def test_close_when_done(self):
s, event = start_echo_server()
s.start_resend_event = threading.Event()
c = echo_client('\n', s.port)
c.push("hello world\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
c.close_when_done()
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
# Only allow the server to start echoing data back to the client after
# the client has closed its connection. This prevents a race condition
# where the server echoes all of its data before we can check that it
# got any down below.
s.start_resend_event.set()
s.join()
self.assertEqual(c.contents, [])
# the server might have been able to send a byte or two back, but this
# at least checks that it received something and didn't just fail
# (which could still result in the client not having received anything)
self.assertTrue(len(s.buffer) > 0)
class TestAsynchat_WithPoll(TestAsynchat):
usepoll = True
class TestHelperFunctions(unittest.TestCase):
def test_find_prefix_at_end(self):
self.assertEqual(asynchat.find_prefix_at_end("qwerty\r", "\r\n"), 1)
self.assertEqual(asynchat.find_prefix_at_end("qwertydkjf", "\r\n"), 0)
class TestFifo(unittest.TestCase):
def test_basic(self):
f = asynchat.fifo()
f.push(7)
f.push('a')
self.assertEqual(len(f), 2)
self.assertEqual(f.first(), 7)
self.assertEqual(f.pop(), (1, 7))
self.assertEqual(len(f), 1)
self.assertEqual(f.first(), 'a')
self.assertEqual(f.is_empty(), False)
self.assertEqual(f.pop(), (1, 'a'))
self.assertEqual(len(f), 0)
self.assertEqual(f.is_empty(), True)
self.assertEqual(f.pop(), (0, None))
def test_given_list(self):
f = asynchat.fifo(['x', 17, 3])
self.assertEqual(len(f), 3)
self.assertEqual(f.pop(), (1, 'x'))
self.assertEqual(f.pop(), (1, 17))
self.assertEqual(f.pop(), (1, 3))
self.assertEqual(f.pop(), (0, None))
def test_main(verbose=None):
test_support.run_unittest(TestAsynchat, TestAsynchat_WithPoll,
TestHelperFunctions, TestFifo)
if __name__ == "__main__":
test_main(verbose=True)
|
dsfsdgsbngfggb/odoo
|
refs/heads/8.0
|
openerp/addons/test_converter/tests/__init__.py
|
259
|
# -*- coding: utf-8 -*-
from . import test_html, test_gbf
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
erkrishna9/odoo
|
refs/heads/master
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/About.py
|
90
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
from com.sun.star.task import XJobExecutor
if __name__<>'package':
from lib.gui import *
class About(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
self.win = DBModalDialog(60, 50, 175, 115, "About OpenERP Report Designer")
fdBigFont = createUnoStruct("com.sun.star.awt.FontDescriptor")
fdBigFont.Width = 20
fdBigFont.Height = 25
fdBigFont.Weight = 120
fdBigFont.Family= 3
oLabelTitle1 = self.win.addFixedText("lblTitle1", 1, 1, 35, 30)
oLabelTitle1.Model.TextColor = 16056320
oLabelTitle1.Model.FontDescriptor = fdBigFont
oLabelTitle1.Model.FontRelief = 1
oLabelTitle1.Text = "Open"
oLabelTitle2 = self.win.addFixedText("lblTitle2", 35, 1, 30, 30)
oLabelTitle2.Model.TextColor = 1
oLabelTitle2.Model.FontDescriptor = fdBigFont
oLabelTitle2.Model.FontRelief = 1
oLabelTitle2.Text = "ERP"
oLabelProdDesc = self.win.addFixedText("lblProdDesc", 1, 30, 173, 75)
oLabelProdDesc.Model.TextColor = 1
fdBigFont.Width = 10
fdBigFont.Height = 11
fdBigFont.Weight = 76
oLabelProdDesc.Model.FontDescriptor = fdBigFont
oLabelProdDesc.Model.Align = 1
oLabelProdDesc.Model.FontRelief = 1
oLabelProdDesc.Model.MultiLine = True
oLabelProdDesc.Text = "This package helps you to create or modify\nreports in OpenERP. Once connected to the\nserver, you can design your template of reports\nusing fields and expressions and browsing the\ncomplete structure of OpenERP object database."
oLabelFooter = self.win.addFixedText("lblFooter", -1, -1, 173, 25)
oLabelFooter.Model.TextColor = 255
#oLabelFooter.Model.BackgroundColor = 1
oLabelFooter.Model.Border = 2
oLabelFooter.Model.BorderColor = 255
fdBigFont.Width = 8
fdBigFont.Height = 9
fdBigFont.Weight = 100
oLabelFooter.Model.FontDescriptor = fdBigFont
oLabelFooter.Model.Align = 1
oLabelFooter.Model.FontRelief = 1
oLabelFooter.Model.MultiLine = True
sMessage = "OpenERP Report Designer v1.0 \nCopyright 2007-TODAY Tiny sprl \nThis product is free software, under the GNU Affero General Public License."
oLabelFooter.Text = sMessage
self.win.doModalDialog("",None)
if __name__<>"package" and __name__=="__main__":
About(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( About, "org.openoffice.openerp.report.about", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
odooindia/odoo
|
refs/heads/master
|
addons/gamification/__openerp__.py
|
62
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Human Ressources',
'depends': ['mail', 'email_template', 'web_kanban_gauge'],
'description': """
Gamification process
====================
The Gamification module provides ways to evaluate and motivate the users of OpenERP.
The users can be evaluated using goals and numerical objectives to reach.
**Goals** are assigned through **challenges** to evaluate and compare members of a team with each others and through time.
For non-numerical achievements, **badges** can be granted to users. From a simple "thank you" to an exceptional achievement, a badge is an easy way to exprimate gratitude to a user for their good work.
Both goals and badges are flexibles and can be adapted to a large range of modules and actions. When installed, this module creates easy goals to help new users to discover OpenERP and configure their user profile.
""",
'data': [
'wizard/update_goal.xml',
'wizard/grant_badge.xml',
'views/badge.xml',
'views/challenge.xml',
'views/goal.xml',
'data/cron.xml',
'security/gamification_security.xml',
'security/ir.model.access.csv',
'data/goal_base.xml',
'data/badge.xml',
'views/gamification.xml',
],
'installable': True,
'application': True,
'auto_install': False,
'qweb': ['static/src/xml/gamification.xml'],
}
|
IsCoolEntertainment/debpkg_python-boto
|
refs/heads/master
|
boto/gs/connection.py
|
2
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.gs.bucket import Bucket
from boto.s3.connection import S3Connection
from boto.s3.connection import SubdomainCallingFormat
from boto.s3.connection import check_lowercase_bucketname
from boto.utils import get_utf8_value
class Location(object):
DEFAULT = 'US'
EU = 'EU'
class GSConnection(S3Connection):
DefaultHost = 'storage.googleapis.com'
QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host=DefaultHost, debug=0, https_connection_factory=None,
calling_format=SubdomainCallingFormat(), path='/',
suppress_consec_slashes=True):
super(GSConnection, self).__init__(gs_access_key_id, gs_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, calling_format, path,
"google", Bucket,
suppress_consec_slashes=suppress_consec_slashes)
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None,
storage_class='STANDARD'):
"""
Creates a new bucket. By default it's located in the USA. You can
pass Location.EU to create bucket in the EU. You can also pass
a LocationConstraint for where the bucket should be located, and
a StorageClass describing how the data should be stored.
:type bucket_name: string
:param bucket_name: The name of the new bucket.
:type headers: dict
:param headers: Additional headers to pass along with the request to GCS.
:type location: :class:`boto.gs.connection.Location`
:param location: The location of the new bucket.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GCS.
:type storage_class: string
:param storage_class: Either 'STANDARD' or 'DURABLE_REDUCED_AVAILABILITY'.
"""
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header : policy}
if not location:
location = Location.DEFAULT
location_elem = ('<LocationConstraint>%s</LocationConstraint>'
% location)
if storage_class:
storage_class_elem = ('<StorageClass>%s</StorageClass>'
% storage_class)
else:
storage_class_elem = ''
data = ('<CreateBucketConfiguration>%s%s</CreateBucketConfiguration>'
% (location_elem, storage_class_elem))
response = self.make_request(
'PUT', get_utf8_value(bucket_name), headers=headers,
data=get_utf8_value(data))
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body)
def get_bucket(self, bucket_name, validate=True, headers=None):
"""
Retrieves a bucket by name.
If the bucket does not exist, an ``S3ResponseError`` will be raised. If
you are unsure if the bucket exists or not, you can use the
``S3Connection.lookup`` method, which will either return a valid bucket
or ``None``.
:type bucket_name: string
:param bucket_name: The name of the bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to
AWS.
:type validate: boolean
:param validate: If ``True``, it will try to fetch all keys within the
given bucket. (Default: ``True``)
"""
bucket = self.bucket_class(self, bucket_name)
if validate:
bucket.get_all_keys(headers, maxkeys=0)
return bucket
|
protocol7/python-koans
|
refs/heads/master
|
python 2/koans/about_lambdas.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based slightly on the lambdas section of AboutBlocks in the Ruby Koans
#
from runner.koan import *
class AboutLambdas(Koan):
def test_lambdas_can_be_assigned_to_variables_and_called_explicitly(self):
add_one = lambda n: n + 1
self.assertEqual(11, add_one(10))
# ------------------------------------------------------------------
def make_order(self, order):
return lambda qty: str(qty) + " " + order + "s"
def test_accessing_lambda_via_assignment(self):
sausages = self.make_order('sausage')
eggs = self.make_order('egg')
self.assertEqual("3 sausages", sausages(3))
self.assertEqual("2 eggs", eggs(2))
def test_accessing_lambda_without_assignment(self):
self.assertEqual("39823 spams", self.make_order('spam')(39823))
|
RichardLeeK/MachineLearning
|
refs/heads/master
|
img_process/tester.py
|
1
|
import sys
sys.path.insert(0, 'D:/Sources/Python Source Code')
import img_process.signal_to_img as si
file = open('D:/Richard/CBFV/Auto-encoder/001040SE_interpolated.csv')
lines = file.readlines()
file.close()
singal_map = {}
for line in lines:
sl = line.split(',')
cur_sig = []
for v in sl[1:]:
cur_sig.append(float(v))
sl[int(line[0])] = cur_sig
val = si.interpolated_signal_to_img(sl[1])
|
nadley/Sick-Beard
|
refs/heads/development
|
bs4/builder/_htmlparser.py
|
120
|
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import (
HTMLParser,
HTMLParseError,
)
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
self.soup.handle_starttag(name, None, None, dict(attrs))
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
if data.endswith("?") and data.lower().startswith("xml"):
# "An XHTML processing instruction using the trailing '?'
# will cause the '?' to be included in data." - HTMLParser
# docs.
#
# Strip the question mark so we don't end up with two
# question marks.
data = data[:-1]
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
|
wimnat/ansible
|
refs/heads/devel
|
test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py
|
35
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success.
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
- This is NOT ICMP ping, this is just a trivial test module.
options: {}
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
ansible webservers -m ping
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(required=False, default=None),
),
supports_check_mode=True
)
result = dict(ping='pong')
if module.params['data']:
if module.params['data'] == 'crash':
raise Exception("boom")
result['ping'] = module.params['data']
result['location'] = 'role: bar'
module.exit_json(**result)
if __name__ == '__main__':
main()
|
TeachAtTUM/edx-platform
|
refs/heads/master
|
common/test/acceptance/tests/lms/test_lms_user_preview.py
|
11
|
# -*- coding: utf-8 -*-
"""
Tests the "preview" selector in the LMS that allows changing between Staff, Learner, and Content Groups.
"""
from textwrap import dedent
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.instructor_dashboard import InstructorDashboardPage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.tests.helpers import UniqueCourseTest, create_user_partition_json
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, MINIMUM_STATIC_PARTITION_ID, Group
@attr(shard=10)
class StaffViewTest(UniqueCourseTest):
"""
Tests that verify the staff view.
"""
USERNAME = "STAFF_TESTER"
EMAIL = "johndoe@example.com"
def setUp(self):
super(StaffViewTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.populate_course_fixture(self.course_fixture) # pylint: disable=no-member
self.course_fixture.install()
# Auto-auth register for the course.
# Do this as global staff so that you will see the Staff View
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=True).visit()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.courseware_page.visit()
staff_page = StaffCoursewarePage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
@attr(shard=10)
class CourseWithoutContentGroupsTest(StaffViewTest):
"""
Setup for tests that have no content restricted to specific content groups.
"""
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 2 problems.
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=problem_data),
XBlockFixtureDesc('problem', 'Test Problem 2', data=problem_data)
)
)
)
@attr(shard=10)
class StaffViewToggleTest(CourseWithoutContentGroupsTest):
"""
Tests for the staff view toggle button.
"""
def test_instructor_tab_visibility(self):
"""
Test that the instructor tab is hidden when viewing as a student.
"""
course_page = self._goto_staff_page()
self.assertTrue(course_page.has_tab('Instructor'))
course_page.set_staff_view_mode('Learner')
self.assertEqual(course_page.staff_view_mode, 'Learner')
self.assertFalse(course_page.has_tab('Instructor'))
@attr(shard=10)
class StaffDebugTest(CourseWithoutContentGroupsTest):
"""
Tests that verify the staff debug info.
"""
def test_reset_attempts_empty(self):
"""
Test that we reset even when there is no student state
"""
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(
u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg,
)
def test_delete_state_empty(self):
"""
Test that we delete properly even when there isn't state to delete.
"""
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(
u'Successfully deleted student state for user {}'.format(self.USERNAME), msg,
)
def test_reset_attempts_state(self):
"""
Successfully reset the student attempts
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(
u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg,
)
def test_rescore_problem(self):
"""
Rescore the student
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user {}'.format(self.USERNAME), msg)
def test_rescore_problem_if_higher(self):
"""
Rescore the student
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore_if_higher()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem to improve score for user {}'.format(self.USERNAME), msg)
def test_student_state_delete(self):
"""
Successfully delete the student state with an answer
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state for user {}'.format(self.USERNAME), msg)
def test_student_by_email(self):
"""
Successfully reset the student attempts using their email address
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts(self.EMAIL)
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts for user {}'.format(self.EMAIL), msg)
def test_bad_student(self):
"""
Test negative response with invalid user
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state('INVALIDUSER')
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Failed to delete student state for user. User does not exist.', msg)
def test_reset_attempts_for_problem_loaded_via_ajax(self):
"""
Successfully reset the student attempts for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts for user {}'.format(self.USERNAME), msg)
def test_rescore_state_for_problem_loaded_via_ajax(self):
"""
Rescore the student for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user {}'.format(self.USERNAME), msg)
def test_student_state_delete_for_problem_loaded_via_ajax(self):
"""
Successfully delete the student state for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state for user {}'.format(self.USERNAME), msg)
class CourseWithContentGroupsTest(StaffViewTest):
"""
Verifies that changing the "View this course as" selector works properly for content groups.
"""
def setUp(self):
super(CourseWithContentGroupsTest, self).setUp()
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
MINIMUM_STATIC_PARTITION_ID,
'Configuration alpha,beta',
'Content Group Partition',
[
Group(MINIMUM_STATIC_PARTITION_ID + 1, 'alpha'),
Group(MINIMUM_STATIC_PARTITION_ID + 2, 'beta')
],
scheme="cohort"
)
],
},
})
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 3 problems.
One problem is visible to all, one problem is visible only to Group "alpha", and
one problem is visible only to Group "beta".
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<choiceresponse>
<label>Choose Yes.</label>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
self.alpha_text = "VISIBLE TO ALPHA"
self.beta_text = "VISIBLE TO BETA"
self.audit_text = "VISIBLE TO AUDIT"
self.everyone_text = "VISIBLE TO EVERYONE"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'problem',
self.alpha_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 1]}}
),
XBlockFixtureDesc(
'problem',
self.beta_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 2]}}
),
XBlockFixtureDesc(
'problem',
self.audit_text,
data=problem_data,
# Below 1 is the hardcoded group ID for "Audit"
metadata={"group_access": {ENROLLMENT_TRACK_PARTITION_ID: [1]}}
),
XBlockFixtureDesc(
'problem',
self.everyone_text,
data=problem_data
)
)
)
)
)
@attr(shard=10)
def test_staff_sees_all_problems(self):
"""
Scenario: Staff see all problems
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
Then I see all the problems, regardless of their group_access property
"""
course_page = self._goto_staff_page()
verify_expected_problem_visibility(
self,
course_page,
[self.alpha_text, self.beta_text, self.audit_text, self.everyone_text]
)
@attr(shard=3)
def test_student_not_in_content_group(self):
"""
Scenario: When previewing as a learner, only content visible to all is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner
Then I see only problems visible to all users
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner')
verify_expected_problem_visibility(self, course_page, [self.everyone_text])
@attr(shard=3)
def test_as_student_in_alpha(self):
"""
Scenario: When previewing as a learner in group alpha, only content visible to alpha is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner in group alpha
Then I see only problems visible to group alpha
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner in alpha')
verify_expected_problem_visibility(self, course_page, [self.alpha_text, self.everyone_text])
@attr(shard=3)
def test_as_student_in_beta(self):
"""
Scenario: When previewing as a learner in group beta, only content visible to beta is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner in group beta
Then I see only problems visible to group beta
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner in beta')
verify_expected_problem_visibility(self, course_page, [self.beta_text, self.everyone_text])
@attr(shard=3)
def test_as_student_in_audit(self):
"""
Scenario: When previewing as a learner in the audit enrollment track, only content visible to audit is shown
Given I have a course with an enrollment_track user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Learner in audit enrollment track
Then I see only problems visible to audit enrollment track
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Learner in Audit')
verify_expected_problem_visibility(self, course_page, [self.audit_text, self.everyone_text])
def create_cohorts_and_assign_students(self, student_a_username, student_b_username):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one learner.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
cohort_management_page.is_cohorted = True
def add_cohort_with_student(cohort_name, content_group, student):
""" Create cohort and assign learner to it. """
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort Alpha", "alpha", student_a_username)
add_cohort_with_student("Cohort Beta", "beta", student_b_username)
cohort_management_page.wait_for_ajax()
@attr(shard=3)
def test_as_specific_student(self):
student_a_username = 'tass_student_a'
student_b_username = 'tass_student_b'
AutoAuthPage(self.browser, username=student_a_username, course_id=self.course_id, no_login=True).visit()
AutoAuthPage(self.browser, username=student_b_username, course_id=self.course_id, no_login=True).visit()
self.create_cohorts_and_assign_students(student_a_username, student_b_username)
# Masquerade as learner in alpha cohort:
course_page = self._goto_staff_page()
course_page.set_staff_view_mode_specific_student(student_a_username)
verify_expected_problem_visibility(self, course_page, [self.alpha_text, self.audit_text, self.everyone_text])
# Masquerade as learner in beta cohort:
course_page.set_staff_view_mode_specific_student(student_b_username)
verify_expected_problem_visibility(self, course_page, [self.beta_text, self.audit_text, self.everyone_text])
@attr('a11y')
def test_course_page(self):
"""
Run accessibility audit for course staff pages.
"""
course_page = self._goto_staff_page()
course_page.a11y_audit.config.set_rules({
'ignore': [
'aria-allowed-attr', # TODO: AC-559
'aria-roles', # TODO: AC-559,
'aria-valid-attr', # TODO: AC-559
'color-contrast', # TODO: AC-559
'link-href', # TODO: AC-559
'section', # TODO: AC-559
]
})
course_page.a11y_audit.check_for_accessibility_errors()
def verify_expected_problem_visibility(test, courseware_page, expected_problems):
"""
Helper method that checks that the expected problems are visible on the current page.
"""
test.assertEqual(
len(expected_problems), courseware_page.num_xblock_components, "Incorrect number of visible problems"
)
for index, expected_problem in enumerate(expected_problems):
test.assertIn(expected_problem, courseware_page.xblock_components[index].text)
|
Schibum/sndlatr
|
refs/heads/master
|
gae/pytz/tests/test_pytz_appengine.py
|
1
|
"""
Test the appengine-specific components
"""
import pytz
import logging
import unittest
class pytzAppengineTest(unittest.TestCase):
"""
Check that loading works as expected and we see the appropriate model
instances
"""
def test_pytz_appengine(self):
"ensure we are using pytz-appengine"
self.assertTrue(pytz.APPENGINE_PYTZ)
def test_zones(self):
"""Check that the models do what we expect"""
from pytz import NDB_NAMESPACE, Zoneinfo
from google.appengine.ext import ndb
est = pytz.timezone('Canada/Eastern')
logging.error(est)
EXPECT_ZONES = 589 # this may change with each iteration
zones = Zoneinfo.query(namespace=NDB_NAMESPACE).count()
self.assertEqual(zones, EXPECT_ZONES)
|
levigross/pyscanner
|
refs/heads/master
|
mytests/django/conf/locale/cy/formats.py
|
433
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'g:i:s A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
kevinaangstadt/rapid
|
refs/heads/master
|
test/batchSim.py
|
1
|
# Minor changes by Kevin Angstadt
# to conform with rapidsim output
from argparse import ArgumentParser
from collections import defaultdict
from os import popen
from re import compile
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4).pprint
def batchSim(ANML_file, stimulus_file, args='-e'):
"""Python binding to batchSim functional ANML simulation
Usage: If you want literal interpretation of your input symbols,
pass an empty string as the third argument. Otherwise, it will assume
escaped characters, e.g. '\xb0\xff\05'"""
ANML_file = './' + ANML_file
stimulus_file = './' + stimulus_file
cmd = ' '.join(['batchSim', args, ANML_file, stimulus_file])
#print cmd
cmd_output = popen(cmd).read()
if '-e' in args:
num_bytes_to_read = 4
else:
num_bytes_to_read = 1
in_symbols_to_offsets = {}
with open(stimulus_file,'r') as f:
offset_count = 0
in_symbol = f.read(num_bytes_to_read)
while in_symbol:
in_symbols_to_offsets[offset_count] = in_symbol
in_symbol = f.read(num_bytes_to_read)
offset_count = 1 + offset_count
f.close()
offsets_to_out_symbols = defaultdict(list)
last_offset = None
offsetElementRE = compile(r'Element id:\s(?P<element_id>.*)\sreporting at index\s(?P<element>\d+)')
for line_num,line in enumerate(cmd_output.splitlines()):
foo = offsetElementRE.search(line)
if foo:
match_element, offset = foo.groups()
offsets_to_out_symbols[offset].append(match_element)
return (in_symbols_to_offsets,offsets_to_out_symbols)
if __name__ == '__main__':
parser = ArgumentParser(description=__doc__)
parser.add_argument('-a', '--ANML_filename', required=True,
help='ANML file filename')
parser.add_argument('-s', '--stimulus_file', required=True,
help='Stimulus file filename')
args = parser.parse_args()
in_symbols_to_offsets,offsets_to_out_symbols = batchSim(args.ANML_filename,args.stimulus_file,'')
items_list = offsets_to_out_symbols.items()
items_list.sort(key = lambda tup: int(tup[0]))
for (key,value) in items_list:
print key, '->', len(value)
#pp(in_symbols_to_offsets)
#pp(offsets_to_out_symbols)
|
Jorge-Rodriguez/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/openbsd_pkg.py
|
79
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrik Lundin <patrik@sigterm.se>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openbsd_pkg
author:
- Patrik Lundin (@eest)
version_added: "1.1"
short_description: Manage packages on OpenBSD
description:
- Manage packages on OpenBSD using the pkg tools.
requirements:
- python >= 2.5
options:
name:
description:
- A name or a list of names of the packages.
required: yes
state:
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
choices: [ absent, latest, present ]
default: present
build:
description:
- Build the package from source instead of downloading and installing
a binary. Requires that the port source tree is already installed.
Automatically builds and installs the 'sqlports' package, if it is
not already installed.
type: bool
default: 'no'
version_added: "2.1"
ports_dir:
description:
- When used in combination with the C(build) option, allows overriding
the default ports source directory.
default: /usr/ports
version_added: "2.1"
clean:
description:
- When updating or removing packages, delete the extra configuration
file(s) in the old packages which are annotated with @extra in
the packaging-list.
type: bool
default: 'no'
version_added: "2.3"
quick:
description:
- Replace or delete packages quickly; do not bother with checksums
before removing normal files.
type: bool
default: 'no'
version_added: "2.3"
notes:
- When used with a `loop:` each package will be processed individually,
it is much more efficient to pass the list directly to the `name` option.
'''
EXAMPLES = '''
- name: Make sure nmap is installed
openbsd_pkg:
name: nmap
state: present
- name: Make sure nmap is the latest version
openbsd_pkg:
name: nmap
state: latest
- name: Make sure nmap is not installed
openbsd_pkg:
name: nmap
state: absent
- name: Make sure nmap is installed, build it from source if it is not
openbsd_pkg:
name: nmap
state: present
build: yes
- name: Specify a pkg flavour with '--'
openbsd_pkg:
name: vim--no_x11
state: present
- name: Specify the default flavour to avoid ambiguity errors
openbsd_pkg:
name: vim--
state: present
- name: Specify a package branch (requires at least OpenBSD 6.0)
openbsd_pkg:
name: python%3.5
state: present
- name: Update all packages on the system
openbsd_pkg:
name: '*'
state: latest
- name: Purge a package and it's configuration files
openbsd_pkg:
name: mpd
clean: yes
state: absent
- name: Quickly remove a package without checking checksums
openbsd_pkg:
name: qt5
quick: yes
state: absent
'''
import os
import platform
import re
import shlex
import sqlite3
from distutils.version import StrictVersion
from ansible.module_utils.basic import AnsibleModule
# Function used for executing commands.
def execute_command(cmd, module):
# Break command line into arguments.
# This makes run_command() use shell=False which we need to not cause shell
# expansion of special characters like '*'.
cmd_args = shlex.split(cmd)
return module.run_command(cmd_args)
# Function used to find out if a package is currently installed.
def get_package_state(names, pkg_spec, module):
info_cmd = 'pkg_info -Iq'
for name in names:
command = "%s inst:%s" % (info_cmd, name)
rc, stdout, stderr = execute_command(command, module)
if stderr:
module.fail_json(msg="failed in get_package_state(): " + stderr)
if stdout:
# If the requested package name is just a stem, like "python", we may
# find multiple packages with that name.
pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
pkg_spec[name]['installed_state'] = True
else:
pkg_spec[name]['installed_state'] = False
# Function used to make sure a package is present.
def package_present(names, pkg_spec, module):
build = module.params['build']
for name in names:
# It is possible package_present() has been called from package_latest().
# In that case we do not want to operate on the whole list of names,
# only the leftovers.
if pkg_spec['package_latest_leftovers']:
if name not in pkg_spec['package_latest_leftovers']:
module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
continue
else:
module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
if module.check_mode:
install_cmd = 'pkg_add -Imn'
else:
if build is True:
port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
if os.path.isdir(port_dir):
if pkg_spec[name]['flavor']:
flavors = pkg_spec[name]['flavor'].replace('-', ' ')
install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
elif pkg_spec[name]['subpackage']:
install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
pkg_spec[name]['subpackage'])
else:
install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
else:
module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
else:
install_cmd = 'pkg_add -Im'
if pkg_spec[name]['installed_state'] is False:
# Attempt to install the package
if build is True and not module.check_mode:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
else:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
# The behaviour of pkg_add is a bit different depending on if a
# specific version is supplied or not.
#
# When a specific version is supplied the return code will be 0 when
# a package is found and 1 when it is not. If a version is not
# supplied the tool will exit 0 in both cases.
#
# It is important to note that "version" relates to the
# packages-specs(7) notion of a version. If using the branch syntax
# (like "python%3.5") even though a branch name may look like a
# version string it is not used an one by pkg_add.
if pkg_spec[name]['version'] or build is True:
# Depend on the return code.
module.debug("package_present(): depending on return code for name '%s'" % name)
if pkg_spec[name]['rc']:
pkg_spec[name]['changed'] = False
else:
# Depend on stderr instead.
module.debug("package_present(): depending on stderr for name '%s'" % name)
if pkg_spec[name]['stderr']:
# There is a corner case where having an empty directory in
# installpath prior to the right location will result in a
# "file:/local/package/directory/ is empty" message on stderr
# while still installing the package, so we need to look for
# for a message like "packagename-1.0: ok" just in case.
match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout'])
if match:
# It turns out we were able to install the package.
module.debug("package_present(): we were able to install package for name '%s'" % name)
else:
# We really did fail, fake the return code.
module.debug("package_present(): we really did fail for name '%s'" % name)
pkg_spec[name]['rc'] = 1
pkg_spec[name]['changed'] = False
else:
module.debug("package_present(): stderr was not set for name '%s'" % name)
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
else:
pkg_spec[name]['rc'] = 0
pkg_spec[name]['stdout'] = ''
pkg_spec[name]['stderr'] = ''
pkg_spec[name]['changed'] = False
# Function used to make sure a package is the latest available version.
def package_latest(names, pkg_spec, module):
if module.params['build'] is True:
module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
upgrade_cmd = 'pkg_add -um'
if module.check_mode:
upgrade_cmd += 'n'
if module.params['clean']:
upgrade_cmd += 'c'
if module.params['quick']:
upgrade_cmd += 'q'
for name in names:
if pkg_spec[name]['installed_state'] is True:
# Attempt to upgrade the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
# Look for output looking something like "nmap-6.01->6.25: ok" to see if
# something changed (or would have changed). Use \W to delimit the match
# from progress meter output.
pkg_spec[name]['changed'] = False
for installed_name in pkg_spec[name]['installed_names']:
module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
if match:
module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
pkg_spec[name]['changed'] = True
break
# FIXME: This part is problematic. Based on the issues mentioned (and
# handled) in package_present() it is not safe to blindly trust stderr
# as an indicator that the command failed, and in the case with
# empty installpath directories this will break.
#
# For now keep this safeguard here, but ignore it if we managed to
# parse out a successful update above. This way we will report a
# successful run when we actually modify something but fail
# otherwise.
if pkg_spec[name]['changed'] is not True:
if pkg_spec[name]['stderr']:
pkg_spec[name]['rc'] = 1
else:
# Note packages that need to be handled by package_present
module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
pkg_spec['package_latest_leftovers'].append(name)
# If there were any packages that were not installed we call
# package_present() which will handle those.
if pkg_spec['package_latest_leftovers']:
module.debug("package_latest(): calling package_present() to handle leftovers")
package_present(names, pkg_spec, module)
# Function used to make sure a package is not installed.
def package_absent(names, pkg_spec, module):
remove_cmd = 'pkg_delete -I'
if module.check_mode:
remove_cmd += 'n'
if module.params['clean']:
remove_cmd += 'c'
if module.params['quick']:
remove_cmd += 'q'
for name in names:
if pkg_spec[name]['installed_state'] is True:
# Attempt to remove the package.
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
if pkg_spec[name]['rc'] == 0:
pkg_spec[name]['changed'] = True
else:
pkg_spec[name]['changed'] = False
else:
pkg_spec[name]['rc'] = 0
pkg_spec[name]['stdout'] = ''
pkg_spec[name]['stderr'] = ''
pkg_spec[name]['changed'] = False
# Function used to parse the package name based on packages-specs(7).
# The general name structure is "stem-version[-flavors]".
#
# Names containing "%" are a special variation not part of the
# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
# description.
def parse_package_name(names, pkg_spec, module):
# Initialize empty list of package_latest() leftovers.
pkg_spec['package_latest_leftovers'] = []
for name in names:
module.debug("parse_package_name(): parsing name: %s" % name)
# Do some initial matches so we can base the more advanced regex on that.
version_match = re.search("-[0-9]", name)
versionless_match = re.search("--", name)
# Stop if someone is giving us a name that both has a version and is
# version-less at the same time.
if version_match and versionless_match:
module.fail_json(msg="package name both has a version and is version-less: " + name)
# All information for a given name is kept in the pkg_spec keyed by that name.
pkg_spec[name] = {}
# If name includes a version.
if version_match:
match = re.search("^(?P<stem>[^%]+)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = '-'
pkg_spec[name]['version'] = match.group('version')
pkg_spec[name]['flavor_separator'] = match.group('flavor_separator')
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['branch'] = match.group('branch')
pkg_spec[name]['style'] = 'version'
module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, "
"flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
else:
module.fail_json(msg="unable to parse package name at version_match: " + name)
# If name includes no version but is version-less ("--").
elif versionless_match:
match = re.search("^(?P<stem>[^%]+)--(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = '-'
pkg_spec[name]['version'] = None
pkg_spec[name]['flavor_separator'] = '-'
pkg_spec[name]['flavor'] = match.group('flavor')
pkg_spec[name]['branch'] = match.group('branch')
pkg_spec[name]['style'] = 'versionless'
module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
else:
module.fail_json(msg="unable to parse package name at versionless_match: " + name)
# If name includes no version, and is not version-less, it is all a
# stem, possibly with a branch (%branchname) tacked on at the
# end.
else:
match = re.search("^(?P<stem>[^%]+)(%(?P<branch>.+))?$", name)
if match:
pkg_spec[name]['stem'] = match.group('stem')
pkg_spec[name]['version_separator'] = None
pkg_spec[name]['version'] = None
pkg_spec[name]['flavor_separator'] = None
pkg_spec[name]['flavor'] = None
pkg_spec[name]['branch'] = match.group('branch')
pkg_spec[name]['style'] = 'stem'
module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
else:
module.fail_json(msg="unable to parse package name at else: " + name)
# Verify that the managed host is new enough to support branch syntax.
if pkg_spec[name]['branch']:
branch_release = "6.0"
if StrictVersion(platform.release()) < StrictVersion(branch_release):
module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
# Sanity check that there are no trailing dashes in flavor.
# Try to stop strange stuff early so we can be strict later.
if pkg_spec[name]['flavor']:
match = re.search("-$", pkg_spec[name]['flavor'])
if match:
module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
# Function used for figuring out the port path.
def get_package_source_path(name, pkg_spec, module):
pkg_spec[name]['subpackage'] = None
if pkg_spec[name]['stem'] == 'sqlports':
return 'databases/sqlports'
else:
# try for an exact match first
sqlports_db_file = '/usr/local/share/sqlports'
if not os.path.isfile(sqlports_db_file):
module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
conn = sqlite3.connect(sqlports_db_file)
first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
query = first_part_of_query + ' = ?'
module.debug("package_package_source_path(): exact query: %s" % query)
cursor = conn.execute(query, (name,))
results = cursor.fetchall()
# next, try for a fuzzier match
if len(results) < 1:
looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
query = first_part_of_query + ' LIKE ?'
if pkg_spec[name]['flavor']:
looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
cursor = conn.execute(query, (looking_for,))
elif pkg_spec[name]['style'] == 'versionless':
query += ' AND fullpkgname NOT LIKE ?'
module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
else:
module.debug("package_package_source_path(): fuzzy query: %s" % query)
cursor = conn.execute(query, (looking_for,))
results = cursor.fetchall()
# error if we don't find exactly 1 match
conn.close()
if len(results) < 1:
module.fail_json(msg="could not find a port by the name '%s'" % name)
if len(results) > 1:
matches = map(lambda x: x[1], results)
module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
# there's exactly 1 match, so figure out the subpackage, if any, then return
fullpkgpath = results[0][0]
parts = fullpkgpath.split(',')
if len(parts) > 1 and parts[1][0] == '-':
pkg_spec[name]['subpackage'] = parts[1]
return parts[0]
# Function used for upgrading all installed packages.
def upgrade_packages(pkg_spec, module):
if module.check_mode:
upgrade_cmd = 'pkg_add -Imnu'
else:
upgrade_cmd = 'pkg_add -Imu'
# Create a minimal pkg_spec entry for '*' to store return values.
pkg_spec['*'] = {}
# Attempt to upgrade all packages.
pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module)
# Try to find any occurrence of a package changing version like:
# "bzip2-1.0.6->1.0.6p0: ok".
match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
if match:
pkg_spec['*']['changed'] = True
else:
pkg_spec['*']['changed'] = False
# It seems we can not trust the return value, so depend on the presence of
# stderr to know if something failed.
if pkg_spec['*']['stderr']:
pkg_spec['*']['rc'] = 1
else:
pkg_spec['*']['rc'] = 0
# ===========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', required=True),
state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
build=dict(type='bool', default=False),
ports_dir=dict(type='path', default='/usr/ports'),
quick=dict(type='bool', default=False),
clean=dict(type='bool', default=False),
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
build = module.params['build']
ports_dir = module.params['ports_dir']
rc = 0
stdout = ''
stderr = ''
result = {}
result['name'] = name
result['state'] = state
result['build'] = build
# The data structure used to keep track of package information.
pkg_spec = {}
if build is True:
if not os.path.isdir(ports_dir):
module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
# build sqlports if its not installed yet
parse_package_name(['sqlports'], pkg_spec, module)
get_package_state(['sqlports'], pkg_spec, module)
if not pkg_spec['sqlports']['installed_state']:
module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
package_present(['sqlports'], pkg_spec, module)
asterisk_name = False
for n in name:
if n == '*':
if len(name) != 1:
module.fail_json(msg="the package name '*' can not be mixed with other names")
asterisk_name = True
if asterisk_name:
if state != 'latest':
module.fail_json(msg="the package name '*' is only valid when using state=latest")
else:
# Perform an upgrade of all installed packages.
upgrade_packages(pkg_spec, module)
else:
# Parse package names and put results in the pkg_spec dictionary.
parse_package_name(name, pkg_spec, module)
# Not sure how the branch syntax is supposed to play together
# with build mode. Disable it for now.
for n in name:
if pkg_spec[n]['branch'] and module.params['build'] is True:
module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
# Get state for all package names.
get_package_state(name, pkg_spec, module)
# Perform requested action.
if state in ['installed', 'present']:
package_present(name, pkg_spec, module)
elif state in ['absent', 'removed']:
package_absent(name, pkg_spec, module)
elif state == 'latest':
package_latest(name, pkg_spec, module)
# The combined changed status for all requested packages. If anything
# is changed this is set to True.
combined_changed = False
# The combined failed status for all requested packages. If anything
# failed this is set to True.
combined_failed = False
# We combine all error messages in this comma separated string, for example:
# "msg": "Can't find nmapp\n, Can't find nmappp\n"
combined_error_message = ''
# Loop over all requested package names and check if anything failed or
# changed.
for n in name:
if pkg_spec[n]['rc'] != 0:
combined_failed = True
if pkg_spec[n]['stderr']:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stderr']
else:
combined_error_message = pkg_spec[n]['stderr']
else:
if combined_error_message:
combined_error_message += ", %s" % pkg_spec[n]['stdout']
else:
combined_error_message = pkg_spec[n]['stdout']
if pkg_spec[n]['changed'] is True:
combined_changed = True
# If combined_error_message contains anything at least some part of the
# list of requested package names failed.
if combined_failed:
module.fail_json(msg=combined_error_message, **result)
result['changed'] = combined_changed
module.exit_json(**result)
if __name__ == '__main__':
main()
|
lepricon49/headphones
|
refs/heads/master
|
lib/unidecode/x063.py
|
252
|
data = (
'Bo ', # 0x00
'Chi ', # 0x01
'Gua ', # 0x02
'Zhi ', # 0x03
'Kuo ', # 0x04
'Duo ', # 0x05
'Duo ', # 0x06
'Zhi ', # 0x07
'Qie ', # 0x08
'An ', # 0x09
'Nong ', # 0x0a
'Zhen ', # 0x0b
'Ge ', # 0x0c
'Jiao ', # 0x0d
'Ku ', # 0x0e
'Dong ', # 0x0f
'Ru ', # 0x10
'Tiao ', # 0x11
'Lie ', # 0x12
'Zha ', # 0x13
'Lu ', # 0x14
'Die ', # 0x15
'Wa ', # 0x16
'Jue ', # 0x17
'Mushiru ', # 0x18
'Ju ', # 0x19
'Zhi ', # 0x1a
'Luan ', # 0x1b
'Ya ', # 0x1c
'Zhua ', # 0x1d
'Ta ', # 0x1e
'Xie ', # 0x1f
'Nao ', # 0x20
'Dang ', # 0x21
'Jiao ', # 0x22
'Zheng ', # 0x23
'Ji ', # 0x24
'Hui ', # 0x25
'Xun ', # 0x26
'Ku ', # 0x27
'Ai ', # 0x28
'Tuo ', # 0x29
'Nuo ', # 0x2a
'Cuo ', # 0x2b
'Bo ', # 0x2c
'Geng ', # 0x2d
'Ti ', # 0x2e
'Zhen ', # 0x2f
'Cheng ', # 0x30
'Suo ', # 0x31
'Suo ', # 0x32
'Keng ', # 0x33
'Mei ', # 0x34
'Long ', # 0x35
'Ju ', # 0x36
'Peng ', # 0x37
'Jian ', # 0x38
'Yi ', # 0x39
'Ting ', # 0x3a
'Shan ', # 0x3b
'Nuo ', # 0x3c
'Wan ', # 0x3d
'Xie ', # 0x3e
'Cha ', # 0x3f
'Feng ', # 0x40
'Jiao ', # 0x41
'Wu ', # 0x42
'Jun ', # 0x43
'Jiu ', # 0x44
'Tong ', # 0x45
'Kun ', # 0x46
'Huo ', # 0x47
'Tu ', # 0x48
'Zhuo ', # 0x49
'Pou ', # 0x4a
'Le ', # 0x4b
'Ba ', # 0x4c
'Han ', # 0x4d
'Shao ', # 0x4e
'Nie ', # 0x4f
'Juan ', # 0x50
'Ze ', # 0x51
'Song ', # 0x52
'Ye ', # 0x53
'Jue ', # 0x54
'Bu ', # 0x55
'Huan ', # 0x56
'Bu ', # 0x57
'Zun ', # 0x58
'Yi ', # 0x59
'Zhai ', # 0x5a
'Lu ', # 0x5b
'Sou ', # 0x5c
'Tuo ', # 0x5d
'Lao ', # 0x5e
'Sun ', # 0x5f
'Bang ', # 0x60
'Jian ', # 0x61
'Huan ', # 0x62
'Dao ', # 0x63
'[?] ', # 0x64
'Wan ', # 0x65
'Qin ', # 0x66
'Peng ', # 0x67
'She ', # 0x68
'Lie ', # 0x69
'Min ', # 0x6a
'Men ', # 0x6b
'Fu ', # 0x6c
'Bai ', # 0x6d
'Ju ', # 0x6e
'Dao ', # 0x6f
'Wo ', # 0x70
'Ai ', # 0x71
'Juan ', # 0x72
'Yue ', # 0x73
'Zong ', # 0x74
'Chen ', # 0x75
'Chui ', # 0x76
'Jie ', # 0x77
'Tu ', # 0x78
'Ben ', # 0x79
'Na ', # 0x7a
'Nian ', # 0x7b
'Nuo ', # 0x7c
'Zu ', # 0x7d
'Wo ', # 0x7e
'Xi ', # 0x7f
'Xian ', # 0x80
'Cheng ', # 0x81
'Dian ', # 0x82
'Sao ', # 0x83
'Lun ', # 0x84
'Qing ', # 0x85
'Gang ', # 0x86
'Duo ', # 0x87
'Shou ', # 0x88
'Diao ', # 0x89
'Pou ', # 0x8a
'Di ', # 0x8b
'Zhang ', # 0x8c
'Gun ', # 0x8d
'Ji ', # 0x8e
'Tao ', # 0x8f
'Qia ', # 0x90
'Qi ', # 0x91
'Pai ', # 0x92
'Shu ', # 0x93
'Qian ', # 0x94
'Ling ', # 0x95
'Yi ', # 0x96
'Ya ', # 0x97
'Jue ', # 0x98
'Zheng ', # 0x99
'Liang ', # 0x9a
'Gua ', # 0x9b
'Yi ', # 0x9c
'Huo ', # 0x9d
'Shan ', # 0x9e
'Zheng ', # 0x9f
'Lue ', # 0xa0
'Cai ', # 0xa1
'Tan ', # 0xa2
'Che ', # 0xa3
'Bing ', # 0xa4
'Jie ', # 0xa5
'Ti ', # 0xa6
'Kong ', # 0xa7
'Tui ', # 0xa8
'Yan ', # 0xa9
'Cuo ', # 0xaa
'Zou ', # 0xab
'Ju ', # 0xac
'Tian ', # 0xad
'Qian ', # 0xae
'Ken ', # 0xaf
'Bai ', # 0xb0
'Shou ', # 0xb1
'Jie ', # 0xb2
'Lu ', # 0xb3
'Guo ', # 0xb4
'Haba ', # 0xb5
'[?] ', # 0xb6
'Zhi ', # 0xb7
'Dan ', # 0xb8
'Mang ', # 0xb9
'Xian ', # 0xba
'Sao ', # 0xbb
'Guan ', # 0xbc
'Peng ', # 0xbd
'Yuan ', # 0xbe
'Nuo ', # 0xbf
'Jian ', # 0xc0
'Zhen ', # 0xc1
'Jiu ', # 0xc2
'Jian ', # 0xc3
'Yu ', # 0xc4
'Yan ', # 0xc5
'Kui ', # 0xc6
'Nan ', # 0xc7
'Hong ', # 0xc8
'Rou ', # 0xc9
'Pi ', # 0xca
'Wei ', # 0xcb
'Sai ', # 0xcc
'Zou ', # 0xcd
'Xuan ', # 0xce
'Miao ', # 0xcf
'Ti ', # 0xd0
'Nie ', # 0xd1
'Cha ', # 0xd2
'Shi ', # 0xd3
'Zong ', # 0xd4
'Zhen ', # 0xd5
'Yi ', # 0xd6
'Shun ', # 0xd7
'Heng ', # 0xd8
'Bian ', # 0xd9
'Yang ', # 0xda
'Huan ', # 0xdb
'Yan ', # 0xdc
'Zuan ', # 0xdd
'An ', # 0xde
'Xu ', # 0xdf
'Ya ', # 0xe0
'Wo ', # 0xe1
'Ke ', # 0xe2
'Chuai ', # 0xe3
'Ji ', # 0xe4
'Ti ', # 0xe5
'La ', # 0xe6
'La ', # 0xe7
'Cheng ', # 0xe8
'Kai ', # 0xe9
'Jiu ', # 0xea
'Jiu ', # 0xeb
'Tu ', # 0xec
'Jie ', # 0xed
'Hui ', # 0xee
'Geng ', # 0xef
'Chong ', # 0xf0
'Shuo ', # 0xf1
'She ', # 0xf2
'Xie ', # 0xf3
'Yuan ', # 0xf4
'Qian ', # 0xf5
'Ye ', # 0xf6
'Cha ', # 0xf7
'Zha ', # 0xf8
'Bei ', # 0xf9
'Yao ', # 0xfa
'[?] ', # 0xfb
'[?] ', # 0xfc
'Lan ', # 0xfd
'Wen ', # 0xfe
'Qin ', # 0xff
)
|
huguesv/PTVS
|
refs/heads/master
|
Python/Product/Miniconda/Miniconda3-x64/Lib/asyncio/queues.py
|
11
|
__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty')
import collections
import heapq
from . import events
from . import locks
class QueueEmpty(Exception):
"""Raised when Queue.get_nowait() is called on an empty Queue."""
pass
class QueueFull(Exception):
"""Raised when the Queue.put_nowait() method is called on a full Queue."""
pass
class Queue:
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "await put()" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
with qsize(), since your single-threaded asyncio application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
def __init__(self, maxsize=0, *, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._maxsize = maxsize
# Futures.
self._getters = collections.deque()
# Futures.
self._putters = collections.deque()
self._unfinished_tasks = 0
self._finished = locks.Event(loop=self._loop)
self._finished.set()
self._init(maxsize)
# These three are overridable in subclasses.
def _init(self, maxsize):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def _wakeup_next(self, waiters):
# Wake up the next waiter (if any) that isn't cancelled.
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
break
def __repr__(self):
return f'<{type(self).__name__} at {id(self):#x} {self._format()}>'
def __str__(self):
return f'<{type(self).__name__} {self._format()}>'
def _format(self):
result = f'maxsize={self._maxsize!r}'
if getattr(self, '_queue', None):
result += f' _queue={list(self._queue)!r}'
if self._getters:
result += f' _getters[{len(self._getters)}]'
if self._putters:
result += f' _putters[{len(self._putters)}]'
if self._unfinished_tasks:
result += f' tasks={self._unfinished_tasks}'
return result
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return not self._queue
def full(self):
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize
async def put(self, item):
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
"""
while self.full():
putter = self._loop.create_future()
self._putters.append(putter)
try:
await putter
except:
putter.cancel() # Just in case putter is not done yet.
try:
# Clean self._putters from canceled putters.
self._putters.remove(putter)
except ValueError:
# The putter could be removed from self._putters by a
# previous get_nowait call.
pass
if not self.full() and not putter.cancelled():
# We were woken up by get_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._putters)
raise
return self.put_nowait(item)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
if self.full():
raise QueueFull
self._put(item)
self._unfinished_tasks += 1
self._finished.clear()
self._wakeup_next(self._getters)
async def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
"""
while self.empty():
getter = self._loop.create_future()
self._getters.append(getter)
try:
await getter
except:
getter.cancel() # Just in case getter is not done yet.
try:
# Clean self._getters from canceled getters.
self._getters.remove(getter)
except ValueError:
# The getter could be removed from self._getters by a
# previous put_nowait call.
pass
if not self.empty() and not getter.cancelled():
# We were woken up by put_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._getters)
raise
return self.get_nowait()
def get_nowait(self):
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
if self.empty():
raise QueueEmpty
item = self._get()
self._wakeup_next(self._putters)
return item
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
async def join(self):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
await self._finished.wait()
class PriorityQueue(Queue):
"""A subclass of Queue; retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
"""
def _init(self, maxsize):
self._queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self._queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self._queue)
class LifoQueue(Queue):
"""A subclass of Queue that retrieves most recently added entries first."""
def _init(self, maxsize):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
|
rimbalinux/MSISDNArea
|
refs/heads/master
|
django/conf/locale/sk/formats.py
|
3
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. F Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
makokal/funzo
|
refs/heads/master
|
docs/conf.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import sphinx_rtd_theme
# sys.path.insert(0, os.path.abspath('../sphinxext'))
# sys.path.insert(0, os.path.abspath('../funzo'))
# sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode', # create HTML file of source code and link to it
# 'sphinx.ext.linkcode', # link to github, see linkcode_resolve() below
'numpydoc',
'sphinx_gallery.gen_gallery'
]
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples'
}
# Generate the plots for the gallery
plot_gallery = False
# See https://github.com/rtfd/readthedocs.org/issues/283
mathjax_path = ('https://cdn.mathjax.org/mathjax/latest/MathJax.js?'
'config=TeX-AMS-MML_HTMLorMML')
# see http://stackoverflow.com/q/12206334/562769
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'funzo'
copyright = u'2015–2016, Billy Okal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import funzo
# The short X.Y version.
version = '.'.join(funzo.__version__.split('.', 2)[:2])
# The full version, including alpha/beta/rc tags.
release = funzo.__version__
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def setup(app):
app.add_stylesheet("fix_rtd.css")
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'funzodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'funzo.tex', u'funzo Documentation',
u'Billy Okal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'funzo', u'funzo Documentation',
[u'Billy Okal'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'funzo', u'funzo Documentation',
u'Billy Okal', 'funzo',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
affansyed/bcc
|
refs/heads/master
|
tools/execsnoop.py
|
1
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# execsnoop Trace new processes via exec() syscalls.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: execsnoop [-h] [-t] [-x] [-n NAME]
#
# This currently will print up to a maximum of 19 arguments, plus the process
# name, so 20 fields in total (MAXARG).
#
# This won't catch all new processes: an application may fork() but not exec().
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 07-Feb-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
import argparse
import ctypes as ct
import re
import time
from collections import defaultdict
# arguments
examples = """examples:
./execsnoop # trace all exec() syscalls
./execsnoop -x # include failed exec()s
./execsnoop -t # include timestamps
./execsnoop -n main # only print command lines containing "main"
"""
parser = argparse.ArgumentParser(
description="Trace exec() syscalls",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-x", "--fails", action="store_true",
help="include failed exec()s")
parser.add_argument("-n", "--name",
help="only print commands matching this name (regex), any arg")
args = parser.parse_args()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#include <linux/fs.h>
#define MAXARG 20
#define ARGSIZE 128
enum event_type {
EVENT_ARG,
EVENT_RET,
};
struct data_t {
u32 pid; // PID as in the userspace term (i.e. task->tgid in kernel)
char comm[TASK_COMM_LEN];
enum event_type type;
char argv[ARGSIZE];
int retval;
};
BPF_PERF_OUTPUT(events);
static int __submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data)
{
bpf_probe_read(data->argv, sizeof(data->argv), ptr);
events.perf_submit(ctx, data, sizeof(struct data_t));
return 1;
}
static int submit_arg(struct pt_regs *ctx, void *ptr, struct data_t *data)
{
const char *argp = NULL;
bpf_probe_read(&argp, sizeof(argp), ptr);
if (argp) {
return __submit_arg(ctx, (void *)(argp), data);
}
return 0;
}
int kprobe__sys_execve(struct pt_regs *ctx, struct filename *filename,
const char __user *const __user *__argv,
const char __user *const __user *__envp)
{
// create data here and pass to submit_arg to save stack space (#555)
struct data_t data = {};
data.pid = bpf_get_current_pid_tgid() >> 32;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
data.type = EVENT_ARG;
__submit_arg(ctx, (void *)filename, &data);
int i = 1; // skip first arg, as we submitted filename
// unrolled loop to walk argv[] (MAXARG)
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++; // X
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++;
if (submit_arg(ctx, (void *)&__argv[i], &data) == 0) goto out; i++; // XX
// handle truncated argument list
char ellipsis[] = "...";
__submit_arg(ctx, (void *)ellipsis, &data);
out:
return 0;
}
int kretprobe__sys_execve(struct pt_regs *ctx)
{
struct data_t data = {};
data.pid = bpf_get_current_pid_tgid() >> 32;
bpf_get_current_comm(&data.comm, sizeof(data.comm));
data.type = EVENT_RET;
data.retval = PT_REGS_RC(ctx);
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
"""
# initialize BPF
b = BPF(text=bpf_text)
# header
if args.timestamp:
print("%-8s" % ("TIME(s)"), end="")
print("%-16s %-6s %-6s %3s %s" % ("PCOMM", "PID", "PPID", "RET", "ARGS"))
TASK_COMM_LEN = 16 # linux/sched.h
ARGSIZE = 128 # should match #define in C above
class Data(ct.Structure):
_fields_ = [
("pid", ct.c_uint),
("comm", ct.c_char * TASK_COMM_LEN),
("type", ct.c_int),
("argv", ct.c_char * ARGSIZE),
("retval", ct.c_int),
]
class EventType(object):
EVENT_ARG = 0
EVENT_RET = 1
start_ts = time.time()
argv = defaultdict(list)
# TODO: This is best-effort PPID matching. Short-lived processes may exit
# before we get a chance to read the PPID. This should be replaced with
# fetching PPID via C when available (#364).
def get_ppid(pid):
try:
with open("/proc/%d/status" % pid) as status:
for line in status:
if line.startswith("PPid:"):
return int(line.split()[1])
except IOError:
pass
return 0
# process event
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
skip = False
if event.type == EventType.EVENT_ARG:
argv[event.pid].append(event.argv)
elif event.type == EventType.EVENT_RET:
if args.fails and event.retval == 0:
skip = True
if args.name and not re.search(args.name, event.comm):
skip = True
if not skip:
if args.timestamp:
print("%-8.3f" % (time.time() - start_ts), end="")
ppid = get_ppid(event.pid)
print("%-16s %-6s %-6s %3s %s" % (event.comm, event.pid,
ppid if ppid > 0 else "?", event.retval,
' '.join(argv[event.pid])))
del(argv[event.pid])
# loop with callback to print_event
b["events"].open_perf_buffer(print_event)
while 1:
b.kprobe_poll()
|
dfdx2/django
|
refs/heads/master
|
tests/postgres_tests/fields.py
|
61
|
"""
Indirection layer for PostgreSQL-specific fields, so the tests don't fail when
run with a backend other than PostgreSQL.
"""
from django.db import models
try:
from django.contrib.postgres.fields import (
ArrayField, BigIntegerRangeField, CICharField, CIEmailField,
CITextField, DateRangeField, DateTimeRangeField, FloatRangeField,
HStoreField, IntegerRangeField, JSONField,
)
from django.contrib.postgres.search import SearchVectorField
except ImportError:
class DummyArrayField(models.Field):
def __init__(self, base_field, size=None, **kwargs):
super().__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs.update({
'base_field': '',
'size': 1,
})
return name, path, args, kwargs
class DummyJSONField(models.Field):
def __init__(self, encoder=None, **kwargs):
super().__init__(**kwargs)
ArrayField = DummyArrayField
BigIntegerRangeField = models.Field
CICharField = models.Field
CIEmailField = models.Field
CITextField = models.Field
DateRangeField = models.Field
DateTimeRangeField = models.Field
FloatRangeField = models.Field
HStoreField = models.Field
IntegerRangeField = models.Field
JSONField = DummyJSONField
SearchVectorField = models.Field
|
DrMeers/django
|
refs/heads/master
|
django/contrib/gis/management/commands/ogrinspect.py
|
17
|
import inspect
from optparse import make_option
from django.contrib.gis import gdal
from django.core.management.base import LabelCommand, CommandError
def layer_option(option, opt, value, parser):
"""
Callback for `make_option` for the `ogrinspect` `layer_key`
keyword option which may be an integer or a string.
"""
try:
dest = int(value)
except ValueError:
dest = value
setattr(parser.values, option.dest, dest)
def list_option(option, opt, value, parser):
"""
Callback for `make_option` for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
if value.lower() == 'true':
dest = True
else:
dest = [s for s in value.split(',')]
setattr(parser.values, option.dest, dest)
class Command(LabelCommand):
help = ('Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode')
args = '[data_source] [model_name]'
option_list = LabelCommand.option_list + (
make_option('--blank', dest='blank', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--decimal', dest='decimal', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.'),
make_option('--geom-name', dest='geom_name', type='string', default='geom',
help='Specifies the model name for the Geometry Field '
'(defaults to `geom`)'),
make_option('--layer', dest='layer_key', type='string', action='callback',
callback=layer_option, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.'),
make_option('--multi-geom', action='store_true', dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.'),
make_option('--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__`/`__str__` function.'),
make_option('--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` '
'statement.'),
make_option('--null', dest='null', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.'),
make_option('--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.')
)
requires_system_checks = False
def handle(self, *args, **options):
try:
data_source, model_name = args
except ValueError:
raise CommandError('Invalid arguments, must provide: %s' % self.args)
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.OGRException as msg:
raise CommandError(msg)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
# Filter options to params accepted by `_ogrinspect`
ogr_options = dict((k, v) for k, v in options.items()
if k in inspect.getargspec(_ogrinspect).args and v is not None)
output = [s for s in _ogrinspect(ds, model_name, **ogr_options)]
if options['mapping']:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {'geom_name': options['geom_name'],
'layer_key': options['layer_key'],
'multi_geom': options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = dict((v, k) for k, v in mapping_dict.items())
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend(" '%s' : '%s'," % (rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields)
output.extend(" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}')
return '\n'.join(output) + '\n'
|
YeelerG/scrapy
|
refs/heads/master
|
scrapy/contrib/linkextractors/regex.py
|
144
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.linkextractors.regex` is deprecated, "
"use `scrapy.linkextractors.regex` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.linkextractors.regex import *
|
acarmel/CouchPotatoServer
|
refs/heads/master
|
libs/html5lib/treewalkers/pulldom.py
|
1729
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
poeks/twitterbelle
|
refs/heads/master
|
appengine_django/tests/core_test.py
|
13
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that the core module functionality is present and functioning."""
import unittest
from appengine_django import appid
from appengine_django import have_appserver
class AppengineDjangoTest(unittest.TestCase):
"""Tests that the helper module has been correctly installed."""
def testAppidProvided(self):
"""Tests that application ID and configuration has been loaded."""
self.assert_(appid is not None)
def testAppserverDetection(self):
"""Tests that the appserver detection flag is present and correct."""
# It seems highly unlikely that these tests would ever be run from within
# an appserver.
self.assertEqual(have_appserver, False)
|
p2pu/learning-circles
|
refs/heads/master
|
studygroups/migrations/0052_course_created_by.py
|
1
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('studygroups', '0051_auto_20160115_0737'),
]
operations = [
migrations.AddField(
model_name='course',
name='created_by',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE),
),
]
|
Thor77/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/tunein.py
|
26
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
from ..compat import compat_urlparse
class TuneInBaseIE(InfoExtractor):
_API_BASE_URL = 'http://tunein.com/tuner/tune/'
def _real_extract(self, url):
content_id = self._match_id(url)
content_info = self._download_json(
self._API_BASE_URL + self._API_URL_QUERY % content_id,
content_id, note='Downloading JSON metadata')
title = content_info['Title']
thumbnail = content_info.get('Logo')
location = content_info.get('Location')
streams_url = content_info.get('StreamUrl')
if not streams_url:
raise ExtractorError('No downloadable streams found', expected=True)
if not streams_url.startswith('http://'):
streams_url = compat_urlparse.urljoin(url, streams_url)
streams = self._download_json(
streams_url, content_id, note='Downloading stream data',
transform_source=lambda s: re.sub(r'^\s*\((.*)\);\s*$', r'\1', s))['Streams']
is_live = None
formats = []
for stream in streams:
if stream.get('Type') == 'Live':
is_live = True
reliability = stream.get('Reliability')
format_note = (
'Reliability: %d%%' % reliability
if reliability is not None else None)
formats.append({
'preference': (
0 if reliability is None or reliability > 90
else 1),
'abr': stream.get('Bandwidth'),
'ext': stream.get('MediaType').lower(),
'acodec': stream.get('MediaType'),
'vcodec': 'none',
'url': stream.get('Url'),
'source_preference': reliability,
'format_note': format_note,
})
self._sort_formats(formats)
return {
'id': content_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'location': location,
'is_live': is_live,
}
class TuneInClipIE(TuneInBaseIE):
IE_NAME = 'tunein:clip'
_VALID_URL = r'https?://(?:www\.)?tunein\.com/station/.*?audioClipId\=(?P<id>\d+)'
_API_URL_QUERY = '?tuneType=AudioClip&audioclipId=%s'
_TESTS = [
{
'url': 'http://tunein.com/station/?stationId=246119&audioClipId=816',
'md5': '99f00d772db70efc804385c6b47f4e77',
'info_dict': {
'id': '816',
'title': '32m',
'ext': 'mp3',
},
},
]
class TuneInStationIE(TuneInBaseIE):
IE_NAME = 'tunein:station'
_VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-s|station/.*?StationId\=)(?P<id>\d+)'
_API_URL_QUERY = '?tuneType=Station&stationId=%s'
@classmethod
def suitable(cls, url):
return False if TuneInClipIE.suitable(url) else super(TuneInStationIE, cls).suitable(url)
_TESTS = [
{
'url': 'http://tunein.com/radio/Jazz24-885-s34682/',
'info_dict': {
'id': '34682',
'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2',
'ext': 'mp3',
'location': 'Tacoma, WA',
},
'params': {
'skip_download': True, # live stream
},
},
]
class TuneInProgramIE(TuneInBaseIE):
IE_NAME = 'tunein:program'
_VALID_URL = r'https?://(?:www\.)?tunein\.com/(?:radio/.*?-p|program/.*?ProgramId\=)(?P<id>\d+)'
_API_URL_QUERY = '?tuneType=Program&programId=%s'
_TESTS = [
{
'url': 'http://tunein.com/radio/Jazz-24-p2506/',
'info_dict': {
'id': '2506',
'title': 'Jazz 24 on 91.3 WUKY-HD3',
'ext': 'mp3',
'location': 'Lexington, KY',
},
'params': {
'skip_download': True, # live stream
},
},
]
class TuneInTopicIE(TuneInBaseIE):
IE_NAME = 'tunein:topic'
_VALID_URL = r'https?://(?:www\.)?tunein\.com/topic/.*?TopicId\=(?P<id>\d+)'
_API_URL_QUERY = '?tuneType=Topic&topicId=%s'
_TESTS = [
{
'url': 'http://tunein.com/topic/?TopicId=101830576',
'md5': 'c31a39e6f988d188252eae7af0ef09c9',
'info_dict': {
'id': '101830576',
'title': 'Votez pour moi du 29 octobre 2015 (29/10/15)',
'ext': 'mp3',
'location': 'Belgium',
},
},
]
class TuneInShortenerIE(InfoExtractor):
IE_NAME = 'tunein:shortener'
IE_DESC = False # Do not list
_VALID_URL = r'https?://tun\.in/(?P<id>[A-Za-z0-9]+)'
_TEST = {
# test redirection
'url': 'http://tun.in/ser7s',
'info_dict': {
'id': '34682',
'title': 'Jazz 24 on 88.5 Jazz24 - KPLU-HD2',
'ext': 'mp3',
'location': 'Tacoma, WA',
},
'params': {
'skip_download': True, # live stream
},
}
def _real_extract(self, url):
redirect_id = self._match_id(url)
# The server doesn't support HEAD requests
urlh = self._request_webpage(
url, redirect_id, note='Downloading redirect page')
url = urlh.geturl()
self.to_screen('Following redirect: %s' % url)
return self.url_result(url)
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/feed_service/transports/__init__.py
|
5
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import FeedServiceTransport
from .grpc import FeedServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[FeedServiceTransport]]
_transport_registry['grpc'] = FeedServiceGrpcTransport
__all__ = (
'FeedServiceTransport',
'FeedServiceGrpcTransport',
)
|
Mazecreator/tensorflow
|
refs/heads/master
|
tensorflow/contrib/tensor_forest/python/ops/stats_ops.py
|
68
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stats ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import gen_stats_ops
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops.gen_stats_ops import finalize_tree
from tensorflow.contrib.tensor_forest.python.ops.gen_stats_ops import grow_tree_v4
from tensorflow.contrib.tensor_forest.python.ops.gen_stats_ops import process_input_v4
# pylint: enable=unused-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver
_stats_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_stats_ops.so"))
ops.NotDifferentiable("FertileStatsVariable")
ops.NotDifferentiable("FertileStatsSerialize")
ops.NotDifferentiable("FertileStatsDeserialize")
ops.NotDifferentiable("GrowTreeV4")
ops.NotDifferentiable("ProcessInputV4")
ops.NotDifferentiable("FinalizeTree")
class FertileStatsVariableSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for FertileStatsVariable."""
def __init__(self, params, stats_handle, create_op, name):
"""Creates a FertileStatsVariableSavable object.
Args:
params: A TensorForestParams object.
stats_handle: handle to the tree variable.
create_op: the op to initialize the variable.
name: the name to save the tree variable under.
"""
self.params = params
tensor = gen_stats_ops.fertile_stats_serialize(
stats_handle, params=params.serialized_params_proto)
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree variable. So we just pass an empty value.
slice_spec = ""
specs = [saver.BaseSaverBuilder.SaveSpec(tensor, slice_spec, name),]
super(FertileStatsVariableSavable,
self).__init__(stats_handle, specs, name)
self._stats_handle = stats_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree variable.
"""
with ops.control_dependencies([self._create_op]):
return gen_stats_ops.fertile_stats_deserialize(
self._stats_handle, restored_tensors[0],
params=self.params.serialized_params_proto)
def fertile_stats_variable(params, stats_config, name,
container=None):
r"""Creates a stats object and returns a handle to it.
Args:
params: A TensorForestParams object.
stats_config: A `Tensor` of type `string`. Serialized proto of the stats.
name: A name for the variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the stats.
"""
with ops.name_scope(name, "FertileStatsVariable") as name:
resource_handle = gen_stats_ops.fertile_stats_resource_handle_op(
container, shared_name=name, name=name)
create_op = gen_stats_ops.create_fertile_stats_variable(
resource_handle, stats_config,
params=params.serialized_params_proto)
is_initialized_op = gen_stats_ops.fertile_stats_is_initialized_op(
resource_handle)
# Adds the variable to the savable list.
saveable = FertileStatsVariableSavable(params, resource_handle, create_op,
resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
resources.register_resource(resource_handle, create_op, is_initialized_op)
return resource_handle
|
yawnosnorous/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/symtable.py
|
55
|
"""Interface to the compiler's internal symbol tables"""
import _symtable
from _symtable import (USE, DEF_GLOBAL, DEF_LOCAL, DEF_PARAM,
DEF_IMPORT, DEF_BOUND, OPT_IMPORT_STAR, SCOPE_OFF, SCOPE_MASK, FREE,
LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL)
import weakref
__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
def symtable(code, filename, compile_type):
raw = _symtable.symtable(code, filename, compile_type)
for top in raw.values():
if top.name == 'top':
break
return _newSymbolTable(top, filename)
class SymbolTableFactory:
def __init__(self):
self.__memo = weakref.WeakValueDictionary()
def new(self, table, filename):
if table.type == _symtable.TYPE_FUNCTION:
return Function(table, filename)
if table.type == _symtable.TYPE_CLASS:
return Class(table, filename)
return SymbolTable(table, filename)
def __call__(self, table, filename):
key = table, filename
obj = self.__memo.get(key, None)
if obj is None:
obj = self.__memo[key] = self.new(table, filename)
return obj
_newSymbolTable = SymbolTableFactory()
class SymbolTable(object):
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "global":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
self._table.name,
self._filename)
def get_type(self):
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: {0}".format(self._table.type)
def get_id(self):
return self._table.id
def get_name(self):
return self._table.name
def get_lineno(self):
return self._table.lineno
def is_optimized(self):
return bool(self._table.type == _symtable.TYPE_FUNCTION
and not self._table.optimized)
def is_nested(self):
return bool(self._table.nested)
def has_children(self):
return bool(self._table.children)
def has_exec(self):
"""Return true if the scope uses exec. Deprecated method."""
return False
def has_import_star(self):
"""Return true if the scope uses import *"""
return bool(self._table.optimized & OPT_IMPORT_STAR)
def get_identifiers(self):
return self._table.symbols.keys()
def lookup(self, name):
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
sym = self._symbols[name] = Symbol(name, flags, namespaces)
return sym
def get_symbols(self):
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [_newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
return [_newSymbolTable(st, self._filename)
for st in self._table.children]
class Function(SymbolTable):
# Default values for instance variables
__params = None
__locals = None
__frees = None
__globals = None
def __idents_matching(self, test_func):
return tuple([ident for ident in self.get_identifiers()
if test_func(self._table.symbols[ident])])
def get_parameters(self):
if self.__params is None:
self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
return self.__params
def get_locals(self):
if self.__locals is None:
locs = (LOCAL, CELL)
test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs
self.__locals = self.__idents_matching(test)
return self.__locals
def get_globals(self):
if self.__globals is None:
glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
self.__globals = self.__idents_matching(test)
return self.__globals
def get_frees(self):
if self.__frees is None:
is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
self.__frees = self.__idents_matching(is_free)
return self.__frees
class Class(SymbolTable):
__methods = None
def get_methods(self):
if self.__methods is None:
d = {}
for st in self._table.children:
d[st.name] = 1
self.__methods = tuple(d)
return self.__methods
class Symbol(object):
def __init__(self, name, flags, namespaces=None):
self.__name = name
self.__flags = flags
self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope()
self.__namespaces = namespaces or ()
def __repr__(self):
return "<symbol {0!r}>".format(self.__name)
def get_name(self):
return self.__name
def is_referenced(self):
return bool(self.__flags & _symtable.USE)
def is_parameter(self):
return bool(self.__flags & DEF_PARAM)
def is_global(self):
return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT))
def is_declared_global(self):
return bool(self.__scope == GLOBAL_EXPLICIT)
def is_local(self):
return bool(self.__flags & DEF_BOUND)
def is_free(self):
return bool(self.__scope == FREE)
def is_imported(self):
return bool(self.__flags & DEF_IMPORT)
def is_assigned(self):
return bool(self.__flags & DEF_LOCAL)
def is_namespace(self):
"""Returns true if name binding introduces new namespace.
If the name is used as the target of a function or class
statement, this will be true.
Note that a single name can be bound to multiple objects. If
is_namespace() is true, the name may also be bound to other
objects, like an int or list, that does not introduce a new
namespace.
"""
return bool(self.__namespaces)
def get_namespaces(self):
"""Return a list of namespaces bound to this name"""
return self.__namespaces
def get_namespace(self):
"""Returns the single namespace bound to this name.
Raises ValueError if the name is bound to multiple namespaces.
"""
if len(self.__namespaces) != 1:
raise ValueError("name is bound to multiple namespaces")
return self.__namespaces[0]
if __name__ == "__main__":
import os, sys
src = open(sys.argv[0]).read()
mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
for ident in mod.get_identifiers():
info = mod.lookup(ident)
print(info, info.is_local(), info.is_namespace())
|
jsirois/pants
|
refs/heads/master
|
src/python/pants/testutil/rule_runner.py
|
1
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import multiprocessing
import os
from contextlib import contextmanager
from dataclasses import dataclass
from io import StringIO
from pathlib import Path, PurePath
from tempfile import mkdtemp
from types import CoroutineType, GeneratorType
from typing import Any, Callable, Iterable, Iterator, Mapping, Sequence, Tuple, Type, TypeVar, cast
from colors import blue, cyan, green, magenta, red, yellow
from pants.base.build_root import BuildRoot
from pants.base.deprecated import deprecated
from pants.base.specs_parser import SpecsParser
from pants.build_graph.build_configuration import BuildConfiguration
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.engine.addresses import Address
from pants.engine.console import Console
from pants.engine.environment import CompleteEnvironment
from pants.engine.fs import PathGlobs, PathGlobsAndRoot, Snapshot, Workspace
from pants.engine.goal import Goal
from pants.engine.internals.native_engine import PyExecutor
from pants.engine.internals.scheduler import SchedulerSession
from pants.engine.internals.selectors import Get, Params
from pants.engine.internals.session import SessionValues
from pants.engine.process import InteractiveRunner
from pants.engine.rules import QueryRule as QueryRule
from pants.engine.rules import Rule
from pants.engine.target import Target, WrappedTarget
from pants.engine.unions import UnionMembership
from pants.init.engine_initializer import EngineInitializer
from pants.init.logging import initialize_stdio, stdio_destination
from pants.option.global_options import ExecutionOptions, GlobalOptions
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.source import source_root
from pants.testutil.option_util import create_options_bootstrapper
from pants.util.collections import assert_single_element
from pants.util.contextutil import temporary_dir, temporary_file
from pants.util.dirutil import (
recursive_dirname,
safe_file_dump,
safe_mkdir,
safe_mkdtemp,
safe_open,
)
from pants.util.ordered_set import FrozenOrderedSet
# -----------------------------------------------------------------------------------------------
# `RuleRunner`
# -----------------------------------------------------------------------------------------------
_O = TypeVar("_O")
_EXECUTOR = PyExecutor(
core_threads=multiprocessing.cpu_count(), max_threads=multiprocessing.cpu_count() * 4
)
@dataclass(frozen=True)
class GoalRuleResult:
exit_code: int
stdout: str
stderr: str
@staticmethod
def noop() -> GoalRuleResult:
return GoalRuleResult(0, stdout="", stderr="")
# This is not frozen because we need to update the `scheduler` when setting options.
@dataclass
class RuleRunner:
build_root: str
options_bootstrapper: OptionsBootstrapper
build_config: BuildConfiguration
scheduler: SchedulerSession
def __init__(
self,
*,
rules: Iterable | None = None,
target_types: Iterable[type[Target]] | None = None,
objects: dict[str, Any] | None = None,
context_aware_object_factories: dict[str, Any] | None = None,
isolated_local_store: bool = False,
ca_certs_path: str | None = None,
) -> None:
self.build_root = os.path.realpath(mkdtemp(suffix="_BUILD_ROOT"))
safe_mkdir(self.build_root, clean=True)
safe_mkdir(self.pants_workdir)
BuildRoot().path = self.build_root
# TODO: Redesign rule registration for tests to be more ergonomic and to make this less
# special-cased.
all_rules = (
*(rules or ()),
*source_root.rules(),
QueryRule(WrappedTarget, [Address]),
QueryRule(UnionMembership, []),
)
build_config_builder = BuildConfiguration.Builder()
build_config_builder.register_aliases(
BuildFileAliases(
objects=objects, context_aware_object_factories=context_aware_object_factories
)
)
build_config_builder.register_rules(all_rules)
build_config_builder.register_target_types(target_types or ())
self.build_config = build_config_builder.create()
self.environment = CompleteEnvironment({})
self.options_bootstrapper = create_options_bootstrapper()
options = self.options_bootstrapper.full_options(self.build_config)
global_options = self.options_bootstrapper.bootstrap_options.for_global_scope()
local_store_dir = (
os.path.realpath(safe_mkdtemp())
if isolated_local_store
else global_options.local_store_dir
)
local_execution_root_dir = global_options.local_execution_root_dir
named_caches_dir = global_options.named_caches_dir
graph_session = EngineInitializer.setup_graph_extended(
pants_ignore_patterns=GlobalOptions.compute_pants_ignore(
self.build_root, global_options
),
use_gitignore=False,
local_store_dir=local_store_dir,
local_execution_root_dir=local_execution_root_dir,
named_caches_dir=named_caches_dir,
build_root=self.build_root,
build_configuration=self.build_config,
executor=_EXECUTOR,
execution_options=ExecutionOptions.from_options(options, self.environment),
ca_certs_path=ca_certs_path,
native_engine_visualize_to=None,
).new_session(
build_id="buildid_for_test",
session_values=SessionValues(
{
OptionsBootstrapper: self.options_bootstrapper,
CompleteEnvironment: self.environment,
}
),
)
self.scheduler = graph_session.scheduler_session
def __repr__(self) -> str:
return f"RuleRunner(build_root={self.build_root})"
@property
def pants_workdir(self) -> str:
return os.path.join(self.build_root, ".pants.d")
@property
def rules(self) -> FrozenOrderedSet[Rule]:
return self.build_config.rules
@property
def target_types(self) -> FrozenOrderedSet[Type[Target]]:
return self.build_config.target_types
@property
def union_membership(self) -> UnionMembership:
"""An instance of `UnionMembership` with all the test's registered `UnionRule`s."""
return self.request(UnionMembership, [])
def new_session(self, build_id: str) -> None:
"""Mutates this RuleRunner to begin a new Session with the same Scheduler."""
self.scheduler = self.scheduler.scheduler.new_session(build_id)
def request(self, output_type: Type[_O], inputs: Iterable[Any]) -> _O:
result = assert_single_element(
self.scheduler.product_request(output_type, [Params(*inputs)])
)
return cast(_O, result)
def run_goal_rule(
self,
goal: Type[Goal],
*,
global_args: Iterable[str] | None = None,
args: Iterable[str] | None = None,
env: Mapping[str, str] | None = None,
env_inherit: set[str] | None = None,
) -> GoalRuleResult:
merged_args = (*(global_args or []), goal.name, *(args or []))
self.set_options(merged_args, env=env, env_inherit=env_inherit)
raw_specs = self.options_bootstrapper.full_options_for_scopes(
[*GlobalOptions.known_scope_infos(), *goal.subsystem_cls.known_scope_infos()]
).specs
specs = SpecsParser(self.build_root).parse_specs(raw_specs)
stdout, stderr = StringIO(), StringIO()
console = Console(stdout=stdout, stderr=stderr)
exit_code = self.scheduler.run_goal_rule(
goal,
Params(
specs,
console,
Workspace(self.scheduler),
InteractiveRunner(self.scheduler),
),
)
console.flush()
return GoalRuleResult(exit_code, stdout.getvalue(), stderr.getvalue())
def set_options(
self,
args: Iterable[str],
*,
env: Mapping[str, str] | None = None,
env_inherit: set[str] | None = None,
) -> None:
"""Update the engine session with new options and/or environment variables.
The environment variables will be used to set the `CompleteEnvironment`, which is the
environment variables captured by the parent Pants process. Some rules use this to be able
to read arbitrary env vars. Any options that start with `PANTS_` will also be used to set
options.
Environment variables listed in `env_inherit` and not in `env` will be inherited from the test
runner's environment (os.environ)
This will override any previously configured values.
"""
env = {
**{k: os.environ[k] for k in (env_inherit or set()) if k in os.environ},
**(env or {}),
}
self.options_bootstrapper = create_options_bootstrapper(args=args, env=env)
self.environment = CompleteEnvironment(env)
self.scheduler = self.scheduler.scheduler.new_session(
build_id="buildid_for_test",
session_values=SessionValues(
{
OptionsBootstrapper: self.options_bootstrapper,
CompleteEnvironment: self.environment,
}
),
)
def _invalidate_for(self, *relpaths):
"""Invalidates all files from the relpath, recursively up to the root.
Many python operations implicitly create parent directories, so we assume that touching a
file located below directories that do not currently exist will result in their creation.
"""
files = {f for relpath in relpaths for f in recursive_dirname(relpath)}
return self.scheduler.invalidate_files(files)
def create_dir(self, relpath: str) -> str:
"""Creates a directory under the buildroot.
:API: public
relpath: The relative path to the directory from the build root.
"""
path = os.path.join(self.build_root, relpath)
safe_mkdir(path)
self._invalidate_for(relpath)
return path
def create_file(self, relpath: str, contents: bytes | str = "", mode: str = "w") -> str:
"""Writes to a file under the buildroot.
:API: public
relpath: The relative path to the file from the build root.
contents: A string containing the contents of the file - '' by default..
mode: The mode to write to the file in - over-write by default.
"""
path = os.path.join(self.build_root, relpath)
with safe_open(path, mode=mode) as fp:
fp.write(contents)
self._invalidate_for(relpath)
return path
def create_files(self, path: str, files: Iterable[str]) -> None:
"""Writes to a file under the buildroot with contents same as file name.
:API: public
path: The relative path to the file from the build root.
files: List of file names.
"""
for f in files:
self.create_file(os.path.join(path, f), contents=f)
def add_to_build_file(
self, relpath: str | PurePath, target: str, *, overwrite: bool = False
) -> str:
"""Adds the given target specification to the BUILD file at relpath.
:API: public
relpath: The relative path to the BUILD file from the build root.
target: A string containing the target definition as it would appear in a BUILD file.
overwrite: Whether to overwrite vs. append to the BUILD file.
"""
build_path = (
relpath if PurePath(relpath).name.startswith("BUILD") else PurePath(relpath, "BUILD")
)
mode = "w" if overwrite else "a"
return self.create_file(str(build_path), target, mode=mode)
def make_snapshot(self, files: Mapping[str, str | bytes]) -> Snapshot:
"""Makes a snapshot from a map of file name to file content."""
with temporary_dir() as temp_dir:
for file_name, content in files.items():
mode = "wb" if isinstance(content, bytes) else "w"
safe_file_dump(os.path.join(temp_dir, file_name), content, mode=mode)
return self.scheduler.capture_snapshots(
(PathGlobsAndRoot(PathGlobs(("**",)), temp_dir),)
)[0]
def make_snapshot_of_empty_files(self, files: Iterable[str]) -> Snapshot:
"""Makes a snapshot with empty content for each file.
This is a convenience around `TestBase.make_snapshot`, which allows specifying the content
for each file.
"""
return self.make_snapshot({fp: "" for fp in files})
def get_target(self, address: Address) -> Target:
"""Find the target for a given address.
This requires that the target actually exists, i.e. that you called
`rule_runner.add_to_build_file()`.
"""
return self.request(WrappedTarget, [address]).target
# -----------------------------------------------------------------------------------------------
# `run_rule_with_mocks()`
# -----------------------------------------------------------------------------------------------
# TODO(#6742): Improve the type signature by using generics and type vars. `mock` should be
# `Callable[[InputType], OutputType]`.
@dataclass(frozen=True)
class MockGet:
output_type: Type
input_type: Type
mock: Callable[[Any], Any]
# TODO: Improve the type hints so that the return type can be inferred.
def run_rule_with_mocks(
rule: Callable,
*,
rule_args: Sequence[Any] | None = None,
mock_gets: Sequence[MockGet] | None = None,
union_membership: UnionMembership | None = None,
):
"""A test helper function that runs an @rule with a set of arguments and mocked Get providers.
An @rule named `my_rule` that takes one argument and makes no `Get` requests can be invoked
like so:
```
return_value = run_rule_with_mocks(my_rule, rule_args=[arg1])
```
In the case of an @rule that makes Get requests, things get more interesting: the
`mock_gets` argument must be provided as a sequence of `MockGet`s. Each MockGet takes the Product
and Subject type, along with a one-argument function that takes a subject value and returns a
product value.
So in the case of an @rule named `my_co_rule` that takes one argument and makes Get requests
for a product type `Listing` with subject type `Dir`, the invoke might look like:
```
return_value = run_rule_with_mocks(
my_co_rule,
rule_args=[arg1],
mock_gets=[
MockGet(
output_type=Listing,
input_type=Dir,
mock=lambda dir_subject: Listing(..),
),
],
)
```
If any of the @rule's Get requests involve union members, you should pass a `UnionMembership`
mapping the union base to any union members you'd like to test. For example, if your rule has
`await Get(TestResult, TargetAdaptor, target_adaptor)`, you may pass
`UnionMembership({TargetAdaptor: PythonTestsTargetAdaptor})` to this function.
:returns: The return value of the completed @rule.
"""
task_rule = getattr(rule, "rule", None)
if task_rule is None:
raise TypeError(f"Expected to receive a decorated `@rule`; got: {rule}")
if rule_args is not None and len(rule_args) != len(task_rule.input_selectors):
raise ValueError(
f"Rule expected to receive arguments of the form: {task_rule.input_selectors}; got: {rule_args}"
)
if mock_gets is not None and len(mock_gets) != len(task_rule.input_gets):
raise ValueError(
f"Rule expected to receive Get providers for {task_rule.input_gets}; got: {mock_gets}"
)
res = rule(*(rule_args or ()))
if not isinstance(res, (CoroutineType, GeneratorType)):
return res
def get(product, subject):
provider = next(
(
mock_get.mock
for mock_get in mock_gets
if mock_get.output_type == product
and (
mock_get.input_type == type(subject)
or (
union_membership
and union_membership.is_member(mock_get.input_type, subject)
)
)
),
None,
)
if provider is None:
raise AssertionError(
f"Rule requested: Get{(product, type(subject), subject)}, which cannot be satisfied."
)
return provider(subject)
rule_coroutine = res
rule_input = None
while True:
try:
res = rule_coroutine.send(rule_input)
if isinstance(res, Get):
rule_input = get(res.output_type, res.input)
elif type(res) in (tuple, list):
rule_input = [get(g.output_type, g.input) for g in res]
else:
return res
except StopIteration as e:
if e.args:
return e.value
@contextmanager
def mock_console(
options_bootstrapper: OptionsBootstrapper,
*,
stdin_content: bytes | str | None = None,
) -> Iterator[Tuple[Console, StdioReader]]:
global_bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope()
@contextmanager
def stdin_context():
if stdin_content is None:
yield open("/dev/null", "r")
else:
with temporary_file(binary_mode=isinstance(stdin_content, bytes)) as stdin_file:
stdin_file.write(stdin_content)
stdin_file.close()
yield open(stdin_file.name, "r")
with initialize_stdio(global_bootstrap_options), stdin_context() as stdin, temporary_file(
binary_mode=False
) as stdout, temporary_file(binary_mode=False) as stderr, stdio_destination(
stdin_fileno=stdin.fileno(),
stdout_fileno=stdout.fileno(),
stderr_fileno=stderr.fileno(),
):
# NB: We yield a Console without overriding the destination argument, because we have
# already done a sys.std* level replacement. The replacement is necessary in order for
# InteractiveProcess to have native file handles to interact with.
yield Console(use_colors=global_bootstrap_options.colors), StdioReader(
_stdout=Path(stdout.name), _stderr=Path(stderr.name)
)
@dataclass
class StdioReader:
_stdout: Path
_stderr: Path
def get_stdout(self) -> str:
"""Return all data that has been flushed to stdout so far."""
return self._stdout.read_text()
def get_stderr(self) -> str:
"""Return all data that has been flushed to stderr so far."""
return self._stderr.read_text()
class MockConsole:
"""An implementation of pants.engine.console.Console which captures output."""
@deprecated("2.5.0.dev0", hint_message="Use the mock_console contextmanager instead.")
def __init__(self, use_colors=True):
self.stdout = StringIO()
self.stderr = StringIO()
self.use_colors = use_colors
def write_stdout(self, payload):
self.stdout.write(payload)
def write_stderr(self, payload):
self.stderr.write(payload)
def print_stdout(self, payload):
print(payload, file=self.stdout)
def print_stderr(self, payload):
print(payload, file=self.stderr)
def _safe_color(self, text: str, color: Callable[[str], str]) -> str:
return color(text) if self.use_colors else text
def blue(self, text: str) -> str:
return self._safe_color(text, blue)
def cyan(self, text: str) -> str:
return self._safe_color(text, cyan)
def green(self, text: str) -> str:
return self._safe_color(text, green)
def magenta(self, text: str) -> str:
return self._safe_color(text, magenta)
def red(self, text: str) -> str:
return self._safe_color(text, red)
def yellow(self, text: str) -> str:
return self._safe_color(text, yellow)
|
nian0114/AGNi-pureCM-SC03E
|
refs/heads/pureCM-smdk4412-I930x-CM11
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
40223145c2g18/40223145
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/VFS_import.py
|
738
|
import os
from browser import doc
#_scripts=doc.createElement('script')
#_scripts.src="/src/py_VFS.js"
#_scripts.type="text/javascript"
#doc.get(tag='head')[0].appendChild(_scripts)
VFS=dict(JSObject(__BRYTHON__.py_VFS))
class VFSModuleFinder:
def __init__(self, path_entry):
print("in VFSModuleFinder")
if path_entry.startswith('/libs') or path_entry.startswith('/Lib'):
self.path_entry=path_entry
else:
raise ImportError()
def __str__(self):
return '<%s for "%s">' % (self.__class__.__name__, self.path_entry)
def find_module(self, fullname, path=None):
path = path or self.path_entry
#print('looking for "%s" in %s ...' % (fullname, path))
for _ext in ['js', 'pyj', 'py']:
_filepath=os.path.join(self.path_entry, '%s.%s' % (fullname, _ext))
if _filepath in VFS:
print("module found at %s:%s" % (_filepath, fullname))
return VFSModuleLoader(_filepath, fullname)
print('module %s not found' % fullname)
raise ImportError()
return None
class VFSModuleLoader:
"""Load source for modules"""
def __init__(self, filepath, name):
self._filepath=filepath
self._name=name
def get_source(self):
if self._filepath in VFS:
return JSObject(readFromVFS(self._filepath))
raise ImportError('could not find source for %s' % fullname)
def is_package(self):
return '.' in self._name
def load_module(self):
if self._name in sys.modules:
#print('reusing existing module from previous import of "%s"' % fullname)
mod = sys.modules[self._name]
return mod
_src=self.get_source()
if self._filepath.endswith('.js'):
mod=JSObject(import_js_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.py'):
mod=JSObject(import_py_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.pyj'):
mod=JSObject(import_pyj_module(_src, self._filepath, self._name))
else:
raise ImportError('Invalid Module: %s' % self._filepath)
# Set a few properties required by PEP 302
mod.__file__ = self._filepath
mod.__name__ = self._name
mod.__path__ = os.path.abspath(self._filepath)
mod.__loader__ = self
mod.__package__ = '.'.join(self._name.split('.')[:-1])
if self.is_package():
print('adding path for package')
# Set __path__ for packages
# so we can find the sub-modules.
mod.__path__ = [ self.path_entry ]
else:
print('imported as regular module')
print('creating a new module object for "%s"' % self._name)
sys.modules.setdefault(self._name, mod)
JSObject(__BRYTHON__.imported)[self._name]=mod
return mod
JSObject(__BRYTHON__.path_hooks.insert(0, VFSModuleFinder))
|
guoyu07/metagoofil
|
refs/heads/master
|
hachoir_core/field/vector.py
|
95
|
from hachoir_core.field import Field, FieldSet, ParserError
class GenericVector(FieldSet):
def __init__(self, parent, name, nb_items, item_class, item_name="item", description=None):
# Sanity checks
assert issubclass(item_class, Field)
assert isinstance(item_class.static_size, (int, long))
if not(0 < nb_items):
raise ParserError('Unable to create empty vector "%s" in %s' \
% (name, parent.path))
size = nb_items * item_class.static_size
self.__nb_items = nb_items
self._item_class = item_class
self._item_name = item_name
FieldSet.__init__(self, parent, name, description, size=size)
def __len__(self):
return self.__nb_items
def createFields(self):
name = self._item_name + "[]"
parser = self._item_class
for index in xrange(len(self)):
yield parser(self, name)
class UserVector(GenericVector):
"""
To implement:
- item_name: name of a field without [] (eg. "color" becomes "color[0]"),
default value is "item"
- item_class: class of an item
"""
item_class = None
item_name = "item"
def __init__(self, parent, name, nb_items, description=None):
GenericVector.__init__(self, parent, name, nb_items, self.item_class, self.item_name, description)
|
ReganBell/QReview
|
refs/heads/master
|
networkx/exception.py
|
41
|
# -*- coding: utf-8 -*-
"""
**********
Exceptions
**********
Base exceptions and errors for NetworkX.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult(dschult@colgate.edu)\nLoïc Séguin-C. <loicseguin@gmail.com>"""
# Copyright (C) 2004-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Exception handling
# the root of all Exceptions
class NetworkXException(Exception):
"""Base class for exceptions in NetworkX."""
class NetworkXError(NetworkXException):
"""Exception for a serious error in NetworkX"""
class NetworkXPointlessConcept(NetworkXException):
"""Harary, F. and Read, R. "Is the Null Graph a Pointless Concept?"
In Graphs and Combinatorics Conference, George Washington University.
New York: Springer-Verlag, 1973.
"""
class NetworkXAlgorithmError(NetworkXException):
"""Exception for unexpected termination of algorithms."""
class NetworkXUnfeasible(NetworkXAlgorithmError):
"""Exception raised by algorithms trying to solve a problem
instance that has no feasible solution."""
class NetworkXNoPath(NetworkXUnfeasible):
"""Exception for algorithms that should return a path when running
on graphs where such a path does not exist."""
class NetworkXUnbounded(NetworkXAlgorithmError):
"""Exception raised by algorithms trying to solve a maximization
or a minimization problem instance that is unbounded."""
class NetworkXNotImplemented(NetworkXException):
"""Exception raised by algorithms not implemented for a type of graph."""
|
wolfram74/numerical_methods_iserles_notes
|
refs/heads/master
|
venv/lib/python2.7/site-packages/IPython/utils/zmqrelated.py
|
4
|
"""Utilities for checking zmq versions."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from IPython.utils.version import check_version
def check_for_zmq(minimum_version, required_by='Someone'):
try:
import zmq
except ImportError:
raise ImportError("%s requires pyzmq >= %s"%(required_by, minimum_version))
pyzmq_version = zmq.__version__
if not check_version(pyzmq_version, minimum_version):
raise ImportError("%s requires pyzmq >= %s, but you have %s"%(
required_by, minimum_version, pyzmq_version))
|
bobrock/eden
|
refs/heads/master
|
modules/tests/member/__init__.py
|
27
|
from create_member import *
from member_search import *
|
cyberark-bizdev/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ecs_service_facts.py
|
7
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_service_facts
short_description: list or describe services in ecs
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
description:
- Lists or describes services in ecs.
version_added: "2.1"
author:
- "Mark Chance (@java1guy)"
- "Darek Kaczynski (@kaczynskid)"
requirements: [ json, botocore, boto3 ]
options:
details:
description:
- Set this to true if you want detailed information about the services.
required: false
default: 'false'
choices: ['true', 'false']
cluster:
description:
- The cluster ARNS in which to list the services.
required: false
default: 'default'
service:
description:
- The service to get details for (required if details is true)
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
service: console-test-service
details: true
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
'''
RETURN = '''
services:
description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
returned: success
type: complex
contains:
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
returned: always
type: string
desiredCount:
description: The desired number of instantiations of the task definition to keep running on the service.
returned: always
type: int
loadBalancers:
description: A list of load balancer objects
returned: always
type: complex
contains:
loadBalancerName:
description: the name
returned: always
type: string
containerName:
description: The name of the container to associate with the load balancer.
returned: always
type: string
containerPort:
description: The port on the container to associate with the load balancer.
returned: always
type: int
pendingCount:
description: The number of tasks in the cluster that are in the PENDING state.
returned: always
type: int
runningCount:
description: The number of tasks in the cluster that are in the RUNNING state.
returned: always
type: int
serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: string
serviceName:
description: A user-generated string used to identify the service
returned: always
type: string
status:
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
returned: always
type: string
taskDefinition:
description: The ARN of a task definition to use for tasks in the service.
returned: always
type: string
deployments:
description: list of service deployments
returned: always
type: list of complex
events:
description: lost of service events
returned: always
type: list of complex
''' # NOQA
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
# def list_clusters(self):
# return self.client.list_clusters()
# {'failures': [],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
# 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default',
# 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
# {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
# 'clusters': []}
def list_services(self, cluster):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
response = self.ecs.list_services(**fn_args)
relevant_response = dict(services=response['serviceArns'])
return relevant_response
def describe_services(self, cluster, services):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
fn_args['services'] = services.split(",")
response = self.ecs.describe_services(**fn_args)
relevant_response = {'services': []}
for service in response.get('services', []):
relevant_response['services'].append(self.extract_service_from(service))
if 'failures' in response and len(response['failures']) > 0:
relevant_response['services_not_running'] = response['failures']
return relevant_response
def extract_service_from(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
if 'deployments' in service:
for d in service['deployments']:
if 'createdAt' in d:
d['createdAt'] = str(d['createdAt'])
if 'updatedAt' in d:
d['updatedAt'] = str(d['updatedAt'])
if 'events' in service:
for e in service['events']:
if 'createdAt' in e:
e['createdAt'] = str(e['createdAt'])
return service
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
details=dict(required=False, type='bool', default=False),
cluster=dict(required=False, type='str'),
service=dict(required=False, type='str')
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
show_details = module.params.get('details', False)
task_mgr = EcsServiceManager(module)
if show_details:
if 'service' not in module.params or not module.params['service']:
module.fail_json(msg="service must be specified for ecs_service_facts")
ecs_facts = task_mgr.describe_services(module.params['cluster'], module.params['service'])
else:
ecs_facts = task_mgr.list_services(module.params['cluster'])
ecs_facts_result = dict(changed=False, ansible_facts=ecs_facts)
module.exit_json(**ecs_facts_result)
if __name__ == '__main__':
main()
|
Cinntax/home-assistant
|
refs/heads/dev
|
homeassistant/config_entries.py
|
1
|
"""Manage config entries in Home Assistant."""
import asyncio
import logging
import functools
import uuid
from typing import Any, Callable, List, Optional, Set
import weakref
import attr
from homeassistant import data_entry_flow, loader
from homeassistant.core import callback, HomeAssistant
from homeassistant.exceptions import HomeAssistantError, ConfigEntryNotReady
from homeassistant.setup import async_setup_component, async_process_deps_reqs
from homeassistant.util.decorator import Registry
from homeassistant.helpers import entity_registry
# mypy: allow-untyped-defs
_LOGGER = logging.getLogger(__name__)
_UNDEF = object()
SOURCE_USER = "user"
SOURCE_DISCOVERY = "discovery"
SOURCE_IMPORT = "import"
HANDLERS = Registry()
STORAGE_KEY = "core.config_entries"
STORAGE_VERSION = 1
# Deprecated since 0.73
PATH_CONFIG = ".config_entries.json"
SAVE_DELAY = 1
# The config entry has been set up successfully
ENTRY_STATE_LOADED = "loaded"
# There was an error while trying to set up this config entry
ENTRY_STATE_SETUP_ERROR = "setup_error"
# There was an error while trying to migrate the config entry to a new version
ENTRY_STATE_MIGRATION_ERROR = "migration_error"
# The config entry was not ready to be set up yet, but might be later
ENTRY_STATE_SETUP_RETRY = "setup_retry"
# The config entry has not been loaded
ENTRY_STATE_NOT_LOADED = "not_loaded"
# An error occurred when trying to unload the entry
ENTRY_STATE_FAILED_UNLOAD = "failed_unload"
UNRECOVERABLE_STATES = (ENTRY_STATE_MIGRATION_ERROR, ENTRY_STATE_FAILED_UNLOAD)
DISCOVERY_NOTIFICATION_ID = "config_entry_discovery"
DISCOVERY_SOURCES = ("ssdp", "zeroconf", SOURCE_DISCOVERY, SOURCE_IMPORT)
EVENT_FLOW_DISCOVERED = "config_entry_discovered"
CONN_CLASS_CLOUD_PUSH = "cloud_push"
CONN_CLASS_CLOUD_POLL = "cloud_poll"
CONN_CLASS_LOCAL_PUSH = "local_push"
CONN_CLASS_LOCAL_POLL = "local_poll"
CONN_CLASS_ASSUMED = "assumed"
CONN_CLASS_UNKNOWN = "unknown"
class ConfigError(HomeAssistantError):
"""Error while configuring an account."""
class UnknownEntry(ConfigError):
"""Unknown entry specified."""
class OperationNotAllowed(ConfigError):
"""Raised when a config entry operation is not allowed."""
class ConfigEntry:
"""Hold a configuration entry."""
__slots__ = (
"entry_id",
"version",
"domain",
"title",
"data",
"options",
"system_options",
"source",
"connection_class",
"state",
"_setup_lock",
"update_listeners",
"_async_cancel_retry_setup",
)
def __init__(
self,
version: int,
domain: str,
title: str,
data: dict,
source: str,
connection_class: str,
system_options: dict,
options: Optional[dict] = None,
entry_id: Optional[str] = None,
state: str = ENTRY_STATE_NOT_LOADED,
) -> None:
"""Initialize a config entry."""
# Unique id of the config entry
self.entry_id = entry_id or uuid.uuid4().hex
# Version of the configuration.
self.version = version
# Domain the configuration belongs to
self.domain = domain
# Title of the configuration
self.title = title
# Config data
self.data = data
# Entry options
self.options = options or {}
# Entry system options
self.system_options = SystemOptions(**system_options)
# Source of the configuration (user, discovery, cloud)
self.source = source
# Connection class
self.connection_class = connection_class
# State of the entry (LOADED, NOT_LOADED)
self.state = state
# Listeners to call on update
self.update_listeners: List = []
# Function to cancel a scheduled retry
self._async_cancel_retry_setup: Optional[Callable[[], Any]] = None
async def async_setup(
self,
hass: HomeAssistant,
*,
integration: Optional[loader.Integration] = None,
tries: int = 0,
) -> None:
"""Set up an entry."""
if integration is None:
integration = await loader.async_get_integration(hass, self.domain)
try:
component = integration.get_component()
except ImportError as err:
_LOGGER.error(
"Error importing integration %s to set up %s config entry: %s",
integration.domain,
self.domain,
err,
)
if self.domain == integration.domain:
self.state = ENTRY_STATE_SETUP_ERROR
return
if self.domain == integration.domain:
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error importing platform config_flow from integration %s to set up %s config entry: %s",
integration.domain,
self.domain,
err,
)
self.state = ENTRY_STATE_SETUP_ERROR
return
# Perform migration
if not await self.async_migrate(hass):
self.state = ENTRY_STATE_MIGRATION_ERROR
return
try:
result = await component.async_setup_entry( # type: ignore
hass, self
)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_setup_entry did not return boolean", integration.domain
)
result = False
except ConfigEntryNotReady:
self.state = ENTRY_STATE_SETUP_RETRY
wait_time = 2 ** min(tries, 4) * 5
tries += 1
_LOGGER.warning(
"Config entry for %s not ready yet. Retrying in %d seconds.",
self.domain,
wait_time,
)
async def setup_again(now):
"""Run setup again."""
self._async_cancel_retry_setup = None
await self.async_setup(hass, integration=integration, tries=tries)
self._async_cancel_retry_setup = hass.helpers.event.async_call_later(
wait_time, setup_again
)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error setting up entry %s for %s", self.title, integration.domain
)
result = False
# Only store setup result as state if it was not forwarded.
if self.domain != integration.domain:
return
if result:
self.state = ENTRY_STATE_LOADED
else:
self.state = ENTRY_STATE_SETUP_ERROR
async def async_unload(
self, hass: HomeAssistant, *, integration: Optional[loader.Integration] = None
) -> bool:
"""Unload an entry.
Returns if unload is possible and was successful.
"""
if integration is None:
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
if integration.domain == self.domain:
if self.state in UNRECOVERABLE_STATES:
return False
if self.state != ENTRY_STATE_LOADED:
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
self.state = ENTRY_STATE_NOT_LOADED
return True
supports_unload = hasattr(component, "async_unload_entry")
if not supports_unload:
if integration.domain == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
try:
result = await component.async_unload_entry( # type: ignore
hass, self
)
assert isinstance(result, bool)
# Only adjust state if we unloaded the component
if result and integration.domain == self.domain:
self.state = ENTRY_STATE_NOT_LOADED
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error unloading entry %s for %s", self.title, integration.domain
)
if integration.domain == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
async def async_remove(self, hass: HomeAssistant) -> None:
"""Invoke remove callback on component."""
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
if not hasattr(component, "async_remove_entry"):
return
try:
await component.async_remove_entry( # type: ignore
hass, self
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error calling entry remove callback %s for %s",
self.title,
integration.domain,
)
async def async_migrate(self, hass: HomeAssistant) -> bool:
"""Migrate an entry.
Returns True if config entry is up-to-date or has been migrated.
"""
handler = HANDLERS.get(self.domain)
if handler is None:
_LOGGER.error(
"Flow handler not found for entry %s for %s", self.title, self.domain
)
return False
# Handler may be a partial
while isinstance(handler, functools.partial):
handler = handler.func
if self.version == handler.VERSION:
return True
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
supports_migrate = hasattr(component, "async_migrate_entry")
if not supports_migrate:
_LOGGER.error(
"Migration handler not found for entry %s for %s",
self.title,
self.domain,
)
return False
try:
result = await component.async_migrate_entry( # type: ignore
hass, self
)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_migrate_entry did not return boolean", self.domain
)
return False
if result:
# pylint: disable=protected-access
hass.config_entries._async_schedule_save() # type: ignore
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error migrating entry %s for %s", self.title, self.domain
)
return False
def add_update_listener(self, listener: Callable) -> Callable:
"""Listen for when entry is updated.
Listener: Callback function(hass, entry)
Returns function to unlisten.
"""
weak_listener = weakref.ref(listener)
self.update_listeners.append(weak_listener)
return lambda: self.update_listeners.remove(weak_listener)
def as_dict(self):
"""Return dictionary version of this entry."""
return {
"entry_id": self.entry_id,
"version": self.version,
"domain": self.domain,
"title": self.title,
"data": self.data,
"options": self.options,
"system_options": self.system_options.as_dict(),
"source": self.source,
"connection_class": self.connection_class,
}
class ConfigEntries:
"""Manage the configuration entries.
An instance of this object is available via `hass.config_entries`.
"""
def __init__(self, hass: HomeAssistant, hass_config: dict) -> None:
"""Initialize the entry manager."""
self.hass = hass
self.flow = data_entry_flow.FlowManager(
hass, self._async_create_flow, self._async_finish_flow
)
self.options = OptionsFlowManager(hass)
self._hass_config = hass_config
self._entries: List[ConfigEntry] = []
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
EntityRegistryDisabledHandler(hass).async_setup()
@callback
def async_domains(self) -> List[str]:
"""Return domains for which we have entries."""
seen: Set[str] = set()
result = []
for entry in self._entries:
if entry.domain not in seen:
seen.add(entry.domain)
result.append(entry.domain)
return result
@callback
def async_get_entry(self, entry_id: str) -> Optional[ConfigEntry]:
"""Return entry with matching entry_id."""
for entry in self._entries:
if entry_id == entry.entry_id:
return entry
return None
@callback
def async_entries(self, domain: Optional[str] = None) -> List[ConfigEntry]:
"""Return all entries or entries for a specific domain."""
if domain is None:
return list(self._entries)
return [entry for entry in self._entries if entry.domain == domain]
async def async_remove(self, entry_id):
"""Remove an entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
unload_success = entry.state != ENTRY_STATE_FAILED_UNLOAD
else:
unload_success = await self.async_unload(entry_id)
await entry.async_remove(self.hass)
self._entries.remove(entry)
self._async_schedule_save()
dev_reg, ent_reg = await asyncio.gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
)
dev_reg.async_clear_config_entry(entry_id)
ent_reg.async_clear_config_entry(entry_id)
return {"require_restart": not unload_success}
async def async_initialize(self) -> None:
"""Initialize config entry config."""
# Migrating for config entries stored before 0.73
config = await self.hass.helpers.storage.async_migrator(
self.hass.config.path(PATH_CONFIG),
self._store,
old_conf_migrate_func=_old_conf_migrator,
)
if config is None:
self._entries = []
return
self._entries = [
ConfigEntry(
version=entry["version"],
domain=entry["domain"],
entry_id=entry["entry_id"],
data=entry["data"],
source=entry["source"],
title=entry["title"],
# New in 0.79
connection_class=entry.get("connection_class", CONN_CLASS_UNKNOWN),
# New in 0.89
options=entry.get("options"),
# New in 0.98
system_options=entry.get("system_options", {}),
)
for entry in config["entries"]
]
async def async_setup(self, entry_id: str) -> bool:
"""Set up a config entry.
Return True if entry has been successfully loaded.
"""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state != ENTRY_STATE_NOT_LOADED:
raise OperationNotAllowed
# Setup Component if not set up yet
if entry.domain in self.hass.config.components:
await entry.async_setup(self.hass)
else:
# Setting up the component will set up all its config entries
result = await async_setup_component(
self.hass, entry.domain, self._hass_config
)
if not result:
return result
return entry.state == ENTRY_STATE_LOADED
async def async_unload(self, entry_id: str) -> bool:
"""Unload a config entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
raise OperationNotAllowed
return await entry.async_unload(self.hass)
async def async_reload(self, entry_id: str) -> bool:
"""Reload an entry.
If an entry was not loaded, will just load.
"""
unload_result = await self.async_unload(entry_id)
if not unload_result:
return unload_result
return await self.async_setup(entry_id)
@callback
def async_update_entry(
self, entry, *, data=_UNDEF, options=_UNDEF, system_options=_UNDEF
):
"""Update a config entry."""
if data is not _UNDEF:
entry.data = data
if options is not _UNDEF:
entry.options = options
if system_options is not _UNDEF:
entry.system_options.update(**system_options)
for listener_ref in entry.update_listeners:
listener = listener_ref()
self.hass.async_create_task(listener(self.hass, entry))
self._async_schedule_save()
async def async_forward_entry_setup(self, entry, domain):
"""Forward the setup of an entry to a different component.
By default an entry is setup with the component it belongs to. If that
component also has related platforms, the component will have to
forward the entry to be setup by that component.
You don't want to await this coroutine if it is called as part of the
setup of a component, because it can cause a deadlock.
"""
# Setup Component if not set up yet
if domain not in self.hass.config.components:
result = await async_setup_component(self.hass, domain, self._hass_config)
if not result:
return False
integration = await loader.async_get_integration(self.hass, domain)
await entry.async_setup(self.hass, integration=integration)
async def async_forward_entry_unload(self, entry, domain):
"""Forward the unloading of an entry to a different component."""
# It was never loaded.
if domain not in self.hass.config.components:
return True
integration = await loader.async_get_integration(self.hass, domain)
return await entry.async_unload(self.hass, integration=integration)
async def _async_finish_flow(self, flow, result):
"""Finish a config flow and add an entry."""
# Remove notification if no other discovery config entries in progress
if not any(
ent["context"]["source"] in DISCOVERY_SOURCES
for ent in self.hass.config_entries.flow.async_progress()
if ent["flow_id"] != flow.flow_id
):
self.hass.components.persistent_notification.async_dismiss(
DISCOVERY_NOTIFICATION_ID
)
if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
return result
entry = ConfigEntry(
version=result["version"],
domain=result["handler"],
title=result["title"],
data=result["data"],
options={},
system_options={},
source=flow.context["source"],
connection_class=flow.CONNECTION_CLASS,
)
self._entries.append(entry)
self._async_schedule_save()
await self.async_setup(entry.entry_id)
result["result"] = entry
return result
async def _async_create_flow(self, handler_key, *, context, data):
"""Create a flow for specified handler.
Handler key is the domain of the component that we want to set up.
"""
try:
integration = await loader.async_get_integration(self.hass, handler_key)
except loader.IntegrationNotFound:
_LOGGER.error("Cannot find integration %s", handler_key)
raise data_entry_flow.UnknownHandler
# Make sure requirements and dependencies of component are resolved
await async_process_deps_reqs(self.hass, self._hass_config, integration)
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error occurred loading config flow for integration %s: %s",
handler_key,
err,
)
raise data_entry_flow.UnknownHandler
handler = HANDLERS.get(handler_key)
if handler is None:
raise data_entry_flow.UnknownHandler
source = context["source"]
# Create notification.
if source in DISCOVERY_SOURCES:
self.hass.bus.async_fire(EVENT_FLOW_DISCOVERED)
self.hass.components.persistent_notification.async_create(
title="New devices discovered",
message=(
"We have discovered new devices on your network. "
"[Check it out](/config/integrations)"
),
notification_id=DISCOVERY_NOTIFICATION_ID,
)
flow = handler()
flow.init_step = source
return flow
def _async_schedule_save(self) -> None:
"""Save the entity registry to a file."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self):
"""Return data to save."""
return {"entries": [entry.as_dict() for entry in self._entries]}
async def _old_conf_migrator(old_config):
"""Migrate the pre-0.73 config format to the latest version."""
return {"entries": old_config}
class ConfigFlow(data_entry_flow.FlowHandler):
"""Base class for config flows with some helpers."""
def __init_subclass__(cls, domain=None, **kwargs):
"""Initialize a subclass, register if possible."""
super().__init_subclass__(**kwargs) # type: ignore
if domain is not None:
HANDLERS.register(domain)(cls)
CONNECTION_CLASS = CONN_CLASS_UNKNOWN
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
raise data_entry_flow.UnknownHandler
@callback
def _async_current_entries(self):
"""Return current entries."""
return self.hass.config_entries.async_entries(self.handler)
@callback
def _async_in_progress(self):
"""Return other in progress flows for current domain."""
return [
flw
for flw in self.hass.config_entries.flow.async_progress()
if flw["handler"] == self.handler and flw["flow_id"] != self.flow_id
]
class OptionsFlowManager:
"""Flow to set options for a configuration entry."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the options manager."""
self.hass = hass
self.flow = data_entry_flow.FlowManager(
hass, self._async_create_flow, self._async_finish_flow
)
async def _async_create_flow(self, entry_id, *, context, data):
"""Create an options flow for a config entry.
Entry_id and flow.handler is the same thing to map entry with flow.
"""
entry = self.hass.config_entries.async_get_entry(entry_id)
if entry is None:
return
if entry.domain not in HANDLERS:
raise data_entry_flow.UnknownHandler
flow = HANDLERS[entry.domain].async_get_options_flow(entry)
return flow
async def _async_finish_flow(self, flow, result):
"""Finish an options flow and update options for configuration entry.
Flow.handler and entry_id is the same thing to map flow with entry.
"""
entry = self.hass.config_entries.async_get_entry(flow.handler)
if entry is None:
return
self.hass.config_entries.async_update_entry(entry, options=result["data"])
result["result"] = True
return result
class OptionsFlow(data_entry_flow.FlowHandler):
"""Base class for config option flows."""
pass
@attr.s(slots=True)
class SystemOptions:
"""Config entry system options."""
disable_new_entities = attr.ib(type=bool, default=False)
def update(self, *, disable_new_entities):
"""Update properties."""
self.disable_new_entities = disable_new_entities
def as_dict(self):
"""Return dictionary version of this config entrys system options."""
return {"disable_new_entities": self.disable_new_entities}
class EntityRegistryDisabledHandler:
"""Handler to handle when entities related to config entries updating disabled_by."""
RELOAD_AFTER_UPDATE_DELAY = 30
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the handler."""
self.hass = hass
self.registry: Optional[entity_registry.EntityRegistry] = None
self.changed: Set[str] = set()
self._remove_call_later: Optional[Callable[[], None]] = None
@callback
def async_setup(self) -> None:
"""Set up the disable handler."""
self.hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, self._handle_entry_updated
)
async def _handle_entry_updated(self, event):
"""Handle entity registry entry update."""
if (
event.data["action"] != "update"
or "disabled_by" not in event.data["changes"]
):
return
if self.registry is None:
self.registry = await entity_registry.async_get_registry(self.hass)
entity_entry = self.registry.async_get(event.data["entity_id"])
if (
# Stop if no entry found
entity_entry is None
# Stop if entry not connected to config entry
or entity_entry.config_entry_id is None
# Stop if the entry got disabled. In that case the entity handles it
# themselves.
or entity_entry.disabled_by
):
return
config_entry = self.hass.config_entries.async_get_entry(
entity_entry.config_entry_id
)
if config_entry.entry_id not in self.changed and await support_entry_unload(
self.hass, config_entry.domain
):
self.changed.add(config_entry.entry_id)
if not self.changed:
return
# We are going to delay reloading on *every* entity registry change so that
# if a user is happily clicking along, it will only reload at the end.
if self._remove_call_later:
self._remove_call_later()
self._remove_call_later = self.hass.helpers.event.async_call_later(
self.RELOAD_AFTER_UPDATE_DELAY, self._handle_reload
)
async def _handle_reload(self, _now):
"""Handle a reload."""
self._remove_call_later = None
to_reload = self.changed
self.changed = set()
_LOGGER.info(
"Reloading config entries because disabled_by changed in entity registry: %s",
", ".join(self.changed),
)
await asyncio.gather(
*[self.hass.config_entries.async_reload(entry_id) for entry_id in to_reload]
)
async def support_entry_unload(hass: HomeAssistant, domain: str) -> bool:
"""Test if a domain supports entry unloading."""
integration = await loader.async_get_integration(hass, domain)
component = integration.get_component()
return hasattr(component, "async_unload_entry")
|
tillahoffmann/tensorflow
|
refs/heads/master
|
tensorflow/contrib/gan/python/features/python/clip_weights.py
|
74
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to clip weights."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import clip_weights_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.features.python.clip_weights_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = clip_weights_impl.__all__
remove_undocumented(__name__, __all__)
|
tqtran7/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/firewalls/tabs.py
|
48
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls import tables
FirewallsTable = tables.FirewallsTable
PoliciesTable = tables.PoliciesTable
RulesTable = tables.RulesTable
class RulesTab(tabs.TableTab):
table_classes = (RulesTable,)
name = _("Firewall Rules")
slug = "rules"
template_name = "horizon/common/_detail_table.html"
def get_rulestable_data(self):
try:
tenant_id = self.request.user.tenant_id
request = self.tab_group.request
rules = api.fwaas.rule_list_for_tenant(request, tenant_id)
except Exception:
rules = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve rules list.'))
return rules
class PoliciesTab(tabs.TableTab):
table_classes = (PoliciesTable,)
name = _("Firewall Policies")
slug = "policies"
template_name = "horizon/common/_detail_table.html"
def get_policiestable_data(self):
try:
tenant_id = self.request.user.tenant_id
request = self.tab_group.request
policies = api.fwaas.policy_list_for_tenant(request, tenant_id)
except Exception:
policies = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve policies list.'))
return policies
class FirewallsTab(tabs.TableTab):
table_classes = (FirewallsTable,)
name = _("Firewalls")
slug = "firewalls"
template_name = "horizon/common/_detail_table.html"
def get_firewallstable_data(self):
try:
tenant_id = self.request.user.tenant_id
request = self.tab_group.request
firewalls = api.fwaas.firewall_list_for_tenant(request, tenant_id)
if api.neutron.is_extension_supported(request,
'fwaasrouterinsertion'):
routers = api.neutron.router_list(request, tenant_id=tenant_id)
for fw in firewalls:
router_list = [r for r in routers
if r['id'] in fw['router_ids']]
fw.get_dict()['routers'] = router_list
except Exception:
firewalls = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve firewall list.'))
return firewalls
class RuleDetailsTab(tabs.Tab):
name = _("Firewall Rule Details")
slug = "ruledetails"
template_name = "project/firewalls/_rule_details.html"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, request):
rid = self.tab_group.kwargs['rule_id']
try:
rule = api.fwaas.rule_get(request, rid)
except Exception:
exceptions.handle(request,
_('Unable to retrieve rule details.'),
redirect=self.failure_url)
return {'rule': rule}
class PolicyDetailsTab(tabs.Tab):
name = _("Firewall Policy Details")
slug = "policydetails"
template_name = "project/firewalls/_policy_details.html"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, request):
pid = self.tab_group.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(request, pid)
except Exception:
exceptions.handle(request,
_('Unable to retrieve policy details.'),
redirect=self.failure_url)
return {'policy': policy}
class FirewallDetailsTab(tabs.Tab):
name = _("Firewall Details")
slug = "firewalldetails"
template_name = "project/firewalls/_firewall_details.html"
failure_url = reverse_lazy('horizon:project:firewalls:index')
def get_context_data(self, request):
fid = self.tab_group.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(request, fid)
body = {'firewall': firewall}
if api.neutron.is_extension_supported(request,
'fwaasrouterinsertion'):
tenant_id = self.request.user.tenant_id
tenant_routers = api.neutron.router_list(request,
tenant_id=tenant_id)
router_ids = firewall.get_dict()['router_ids']
routers = [r for r in tenant_routers
if r['id'] in router_ids]
body['routers'] = routers
except Exception:
exceptions.handle(request,
_('Unable to retrieve firewall details.'),
redirect=self.failure_url)
return body
class FirewallTabs(tabs.TabGroup):
slug = "fwtabs"
tabs = (FirewallsTab, PoliciesTab, RulesTab)
sticky = True
class RuleDetailsTabs(tabs.TabGroup):
slug = "ruletabs"
tabs = (RuleDetailsTab,)
class PolicyDetailsTabs(tabs.TabGroup):
slug = "policytabs"
tabs = (PolicyDetailsTab,)
class FirewallDetailsTabs(tabs.TabGroup):
slug = "firewalltabs"
tabs = (FirewallDetailsTab,)
|
sbidoul/pip
|
refs/heads/main
|
src/pip/_vendor/pep517/colorlog.py
|
56
|
"""Nicer log formatting with colours.
Code copied from Tornado, Apache licensed.
"""
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
try:
import curses
except ImportError:
curses = None
def _stderr_supports_color():
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
class LogFormatter(logging.Formatter):
"""Log formatter with colour support
"""
DEFAULT_COLORS = {
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
logging.CRITICAL: 1,
}
def __init__(self, color=True, datefmt=None):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = str(fg_color, "ascii")
for levelno, code in self.DEFAULT_COLORS.items():
self._colors[levelno] = str(
curses.tparm(fg_color, code), "ascii")
self._normal = str(curses.tigetstr("sgr0"), "ascii")
scr = curses.initscr()
self.termwidth = scr.getmaxyx()[1]
curses.endwin()
else:
self._normal = ''
# Default width is usually 80, but too wide is
# worse than too narrow
self.termwidth = 70
def formatMessage(self, record):
mlen = len(record.message)
right_text = '{initial}-{name}'.format(initial=record.levelname[0],
name=record.name)
if mlen + len(right_text) < self.termwidth:
space = ' ' * (self.termwidth - (mlen + len(right_text)))
else:
space = ' '
if record.levelno in self._colors:
start_color = self._colors[record.levelno]
end_color = self._normal
else:
start_color = end_color = ''
return record.message + space + start_color + right_text + end_color
def enable_colourful_output(level=logging.INFO):
handler = logging.StreamHandler()
handler.setFormatter(LogFormatter())
logging.root.addHandler(handler)
logging.root.setLevel(level)
|
superberny70/pelisalacarta
|
refs/heads/develop
|
python/main-classic/lib/sambatools/pyasn1/codec/ber/decoder.py
|
10
|
# BER decoder
from pyasn1 import debug, error
from pyasn1.codec.ber import eoo
from pyasn1.compat.octets import oct2int, isOctetsType
from pyasn1.type import tag, univ, char, useful, tagmap
class AbstractDecoder:
protoComponent = None
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
raise error.PyAsn1Error('Decoder not implemented for %s' % (tagSet,))
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,))
class AbstractSimpleDecoder(AbstractDecoder):
tagFormats = (tag.tagFormatSimple,)
def _createComponent(self, asn1Spec, tagSet, value=None):
if tagSet[0][1] not in self.tagFormats:
raise error.PyAsn1Error('Invalid tag format %s for %s' % (tagSet[0], self.protoComponent.prettyPrintType()))
if asn1Spec is None:
return self.protoComponent.clone(value, tagSet)
elif value is None:
return asn1Spec
else:
return asn1Spec.clone(value)
class AbstractConstructedDecoder(AbstractDecoder):
tagFormats = (tag.tagFormatConstructed,)
def _createComponent(self, asn1Spec, tagSet, value=None):
if tagSet[0][1] not in self.tagFormats:
raise error.PyAsn1Error('Invalid tag format %s for %s' % (tagSet[0], self.protoComponent.prettyPrintType()))
if asn1Spec is None:
return self.protoComponent.clone(tagSet)
else:
return asn1Spec.clone()
class ExplicitTagDecoder(AbstractSimpleDecoder):
protoComponent = univ.Any('')
tagFormats = (tag.tagFormatConstructed,)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
if substrateFun:
return substrateFun(
self._createComponent(asn1Spec, tagSet, ''),
substrate, length
)
head, tail = substrate[:length], substrate[length:]
value, _ = decodeFun(head, asn1Spec, tagSet, length)
return value, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
if substrateFun:
return substrateFun(
self._createComponent(asn1Spec, tagSet, ''),
substrate, length
)
value, substrate = decodeFun(substrate, asn1Spec, tagSet, length)
terminator, substrate = decodeFun(substrate, allowEoo=True)
if eoo.endOfOctets.isSameTypeWith(terminator) and \
terminator == eoo.endOfOctets:
return value, substrate
else:
raise error.PyAsn1Error('Missing end-of-octets terminator')
explicitTagDecoder = ExplicitTagDecoder()
class IntegerDecoder(AbstractSimpleDecoder):
protoComponent = univ.Integer(0)
precomputedValues = {
'\x00': 0,
'\x01': 1,
'\x02': 2,
'\x03': 3,
'\x04': 4,
'\x05': 5,
'\x06': 6,
'\x07': 7,
'\x08': 8,
'\x09': 9,
'\xff': -1,
'\xfe': -2,
'\xfd': -3,
'\xfc': -4,
'\xfb': -5
}
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
return self._createComponent(asn1Spec, tagSet, 0), tail
if head in self.precomputedValues:
value = self.precomputedValues[head]
else:
firstOctet = oct2int(head[0])
if firstOctet & 0x80:
value = -1
else:
value = 0
for octet in head:
value = value << 8 | oct2int(octet)
return self._createComponent(asn1Spec, tagSet, value), tail
class BooleanDecoder(IntegerDecoder):
protoComponent = univ.Boolean(0)
def _createComponent(self, asn1Spec, tagSet, value=None):
return IntegerDecoder._createComponent(self, asn1Spec, tagSet, value and 1 or 0)
class BitStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.BitString(())
tagFormats = (tag.tagFormatSimple, tag.tagFormatConstructed)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if tagSet[0][1] == tag.tagFormatSimple: # XXX what tag to check?
if not head:
raise error.PyAsn1Error('Empty substrate')
trailingBits = oct2int(head[0])
if trailingBits > 7:
raise error.PyAsn1Error(
'Trailing bits overflow %s' % trailingBits
)
head = head[1:]
lsb = p = 0; l = len(head)-1; b = []
while p <= l:
if p == l:
lsb = trailingBits
j = 7
o = oct2int(head[p])
while j >= lsb:
b.append((o>>j)&0x01)
j = j - 1
p = p + 1
return self._createComponent(asn1Spec, tagSet, b), tail
r = self._createComponent(asn1Spec, tagSet, ())
if substrateFun:
return substrateFun(r, substrate, length)
while head:
component, head = decodeFun(head, self.protoComponent)
r = r + component
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet, '')
if substrateFun:
return substrateFun(r, substrate, length)
while substrate:
component, substrate = decodeFun(substrate, self.protoComponent,
allowEoo=True)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
r = r + component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return r, substrate
class OctetStringDecoder(AbstractSimpleDecoder):
protoComponent = univ.OctetString('')
tagFormats = (tag.tagFormatSimple, tag.tagFormatConstructed)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if tagSet[0][1] == tag.tagFormatSimple: # XXX what tag to check?
return self._createComponent(asn1Spec, tagSet, head), tail
r = self._createComponent(asn1Spec, tagSet, '')
if substrateFun:
return substrateFun(r, substrate, length)
while head:
component, head = decodeFun(head, self.protoComponent)
r = r + component
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet, '')
if substrateFun:
return substrateFun(r, substrate, length)
while substrate:
component, substrate = decodeFun(substrate, self.protoComponent,
allowEoo=True)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
r = r + component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return r, substrate
class NullDecoder(AbstractSimpleDecoder):
protoComponent = univ.Null('')
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
r = self._createComponent(asn1Spec, tagSet)
if head:
raise error.PyAsn1Error('Unexpected %d-octet substrate for Null' % length)
return r, tail
class ObjectIdentifierDecoder(AbstractSimpleDecoder):
protoComponent = univ.ObjectIdentifier(())
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
raise error.PyAsn1Error('Empty substrate')
oid = ()
index = 0
substrateLen = len(head)
while index < substrateLen:
subId = oct2int(head[index])
index += 1
if subId < 128:
oid = oid + (subId,)
elif subId > 128:
# Construct subid from a number of octets
nextSubId = subId
subId = 0
while nextSubId >= 128:
subId = (subId << 7) + (nextSubId & 0x7F)
if index >= substrateLen:
raise error.SubstrateUnderrunError(
'Short substrate for sub-OID past %s' % (oid,)
)
nextSubId = oct2int(head[index])
index += 1
oid = oid + ((subId << 7) + nextSubId,)
elif subId == 128:
# ASN.1 spec forbids leading zeros (0x80) in OID
# encoding, tolerating it opens a vulnerability. See
# http://www.cosic.esat.kuleuven.be/publications/article-1432.pdf
# page 7
raise error.PyAsn1Error('Invalid octet 0x80 in OID encoding')
# Decode two leading arcs
if 0 <= oid[0] <= 39:
oid = (0,) + oid
elif 40 <= oid[0] <= 79:
oid = (1, oid[0]-40) + oid[1:]
elif oid[0] >= 80:
oid = (2, oid[0]-80) + oid[1:]
else:
raise error.PyAsn1Error('Malformed first OID octet: %s' % head[0])
return self._createComponent(asn1Spec, tagSet, oid), tail
class RealDecoder(AbstractSimpleDecoder):
protoComponent = univ.Real()
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
return self._createComponent(asn1Spec, tagSet, 0.0), tail
fo = oct2int(head[0]); head = head[1:]
if fo & 0x80: # binary encoding
if not head:
raise error.PyAsn1Error("Incomplete floating-point value")
n = (fo & 0x03) + 1
if n == 4:
n = oct2int(head[0])
head = head[1:]
eo, head = head[:n], head[n:]
if not eo or not head:
raise error.PyAsn1Error('Real exponent screwed')
e = oct2int(eo[0]) & 0x80 and -1 or 0
while eo: # exponent
e <<= 8
e |= oct2int(eo[0])
eo = eo[1:]
b = fo >> 4 & 0x03 # base bits
if b > 2:
raise error.PyAsn1Error('Illegal Real base')
if b == 1: # encbase = 8
e *= 3
elif b == 2: # encbase = 16
e *= 4
p = 0
while head: # value
p <<= 8
p |= oct2int(head[0])
head = head[1:]
if fo & 0x40: # sign bit
p = -p
sf = fo >> 2 & 0x03 # scale bits
p *= 2**sf
value = (p, 2, e)
elif fo & 0x40: # infinite value
value = fo & 0x01 and '-inf' or 'inf'
elif fo & 0xc0 == 0: # character encoding
if not head:
raise error.PyAsn1Error("Incomplete floating-point value")
try:
if fo & 0x3 == 0x1: # NR1
value = (int(head), 10, 0)
elif fo & 0x3 == 0x2: # NR2
value = float(head)
elif fo & 0x3 == 0x3: # NR3
value = float(head)
else:
raise error.SubstrateUnderrunError(
'Unknown NR (tag %s)' % fo
)
except ValueError:
raise error.SubstrateUnderrunError(
'Bad character Real syntax'
)
else:
raise error.SubstrateUnderrunError(
'Unknown encoding (tag %s)' % fo
)
return self._createComponent(asn1Spec, tagSet, value), tail
class SequenceDecoder(AbstractConstructedDecoder):
protoComponent = univ.Sequence()
def _getComponentTagMap(self, r, idx):
try:
return r.getComponentTagMapNearPosition(idx)
except error.PyAsn1Error:
return
def _getComponentPositionByType(self, r, t, idx):
return r.getComponentPositionNearType(t, idx)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
r = self._createComponent(asn1Spec, tagSet)
idx = 0
if substrateFun:
return substrateFun(r, substrate, length)
while head:
asn1Spec = self._getComponentTagMap(r, idx)
component, head = decodeFun(head, asn1Spec)
idx = self._getComponentPositionByType(
r, component.getEffectiveTagSet(), idx
)
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
r.setDefaultComponents()
r.verifySizeSpec()
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
idx = 0
while substrate:
asn1Spec = self._getComponentTagMap(r, idx)
component, substrate = decodeFun(substrate, asn1Spec, allowEoo=True)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
idx = self._getComponentPositionByType(
r, component.getEffectiveTagSet(), idx
)
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
r.setDefaultComponents()
r.verifySizeSpec()
return r, substrate
class SequenceOfDecoder(AbstractConstructedDecoder):
protoComponent = univ.SequenceOf()
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
asn1Spec = r.getComponentType()
idx = 0
while head:
component, head = decodeFun(head, asn1Spec)
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
r.verifySizeSpec()
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
asn1Spec = r.getComponentType()
idx = 0
while substrate:
component, substrate = decodeFun(substrate, asn1Spec, allowEoo=True)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
r.setComponentByPosition(idx, component, asn1Spec is None)
idx = idx + 1
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
r.verifySizeSpec()
return r, substrate
class SetDecoder(SequenceDecoder):
protoComponent = univ.Set()
def _getComponentTagMap(self, r, idx):
return r.getComponentTagMap()
def _getComponentPositionByType(self, r, t, idx):
nextIdx = r.getComponentPositionByType(t)
if nextIdx is None:
return idx
else:
return nextIdx
class SetOfDecoder(SequenceOfDecoder):
protoComponent = univ.SetOf()
class ChoiceDecoder(AbstractConstructedDecoder):
protoComponent = univ.Choice()
tagFormats = (tag.tagFormatSimple, tag.tagFormatConstructed)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
if r.getTagSet() == tagSet: # explicitly tagged Choice
component, head = decodeFun(
head, r.getComponentTagMap()
)
else:
component, head = decodeFun(
head, r.getComponentTagMap(), tagSet, length, state
)
if isinstance(component, univ.Choice):
effectiveTagSet = component.getEffectiveTagSet()
else:
effectiveTagSet = component.getTagSet()
r.setComponentByType(effectiveTagSet, component, 0, asn1Spec is None)
return r, tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
r = self._createComponent(asn1Spec, tagSet)
if substrateFun:
return substrateFun(r, substrate, length)
if r.getTagSet() == tagSet: # explicitly tagged Choice
component, substrate = decodeFun(substrate, r.getComponentTagMap())
# eat up EOO marker
eooMarker, substrate = decodeFun(substrate, allowEoo=True)
if not eoo.endOfOctets.isSameTypeWith(eooMarker) or \
eooMarker != eoo.endOfOctets:
raise error.PyAsn1Error('No EOO seen before substrate ends')
else:
component, substrate= decodeFun(
substrate, r.getComponentTagMap(), tagSet, length, state
)
if isinstance(component, univ.Choice):
effectiveTagSet = component.getEffectiveTagSet()
else:
effectiveTagSet = component.getTagSet()
r.setComponentByType(effectiveTagSet, component, 0, asn1Spec is None)
return r, substrate
class AnyDecoder(AbstractSimpleDecoder):
protoComponent = univ.Any()
tagFormats = (tag.tagFormatSimple, tag.tagFormatConstructed)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
if asn1Spec is None or \
asn1Spec is not None and tagSet != asn1Spec.getTagSet():
# untagged Any container, recover inner header substrate
length = length + len(fullSubstrate) - len(substrate)
substrate = fullSubstrate
if substrateFun:
return substrateFun(self._createComponent(asn1Spec, tagSet),
substrate, length)
head, tail = substrate[:length], substrate[length:]
return self._createComponent(asn1Spec, tagSet, value=head), tail
def indefLenValueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet,
length, state, decodeFun, substrateFun):
if asn1Spec is not None and tagSet == asn1Spec.getTagSet():
# tagged Any type -- consume header substrate
header = ''
else:
# untagged Any, recover header substrate
header = fullSubstrate[:-len(substrate)]
r = self._createComponent(asn1Spec, tagSet, header)
# Any components do not inherit initial tag
asn1Spec = self.protoComponent
if substrateFun:
return substrateFun(r, substrate, length)
while substrate:
component, substrate = decodeFun(substrate, asn1Spec, allowEoo=True)
if eoo.endOfOctets.isSameTypeWith(component) and \
component == eoo.endOfOctets:
break
r = r + component
else:
raise error.SubstrateUnderrunError(
'No EOO seen before substrate ends'
)
return r, substrate
# character string types
class UTF8StringDecoder(OctetStringDecoder):
protoComponent = char.UTF8String()
class NumericStringDecoder(OctetStringDecoder):
protoComponent = char.NumericString()
class PrintableStringDecoder(OctetStringDecoder):
protoComponent = char.PrintableString()
class TeletexStringDecoder(OctetStringDecoder):
protoComponent = char.TeletexString()
class VideotexStringDecoder(OctetStringDecoder):
protoComponent = char.VideotexString()
class IA5StringDecoder(OctetStringDecoder):
protoComponent = char.IA5String()
class GraphicStringDecoder(OctetStringDecoder):
protoComponent = char.GraphicString()
class VisibleStringDecoder(OctetStringDecoder):
protoComponent = char.VisibleString()
class GeneralStringDecoder(OctetStringDecoder):
protoComponent = char.GeneralString()
class UniversalStringDecoder(OctetStringDecoder):
protoComponent = char.UniversalString()
class BMPStringDecoder(OctetStringDecoder):
protoComponent = char.BMPString()
# "useful" types
class ObjectDescriptorDecoder(OctetStringDecoder):
protoComponent = useful.ObjectDescriptor()
class GeneralizedTimeDecoder(OctetStringDecoder):
protoComponent = useful.GeneralizedTime()
class UTCTimeDecoder(OctetStringDecoder):
protoComponent = useful.UTCTime()
tagMap = {
univ.Integer.tagSet: IntegerDecoder(),
univ.Boolean.tagSet: BooleanDecoder(),
univ.BitString.tagSet: BitStringDecoder(),
univ.OctetString.tagSet: OctetStringDecoder(),
univ.Null.tagSet: NullDecoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierDecoder(),
univ.Enumerated.tagSet: IntegerDecoder(),
univ.Real.tagSet: RealDecoder(),
univ.Sequence.tagSet: SequenceDecoder(), # conflicts with SequenceOf
univ.Set.tagSet: SetDecoder(), # conflicts with SetOf
univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any
# character string types
char.UTF8String.tagSet: UTF8StringDecoder(),
char.NumericString.tagSet: NumericStringDecoder(),
char.PrintableString.tagSet: PrintableStringDecoder(),
char.TeletexString.tagSet: TeletexStringDecoder(),
char.VideotexString.tagSet: VideotexStringDecoder(),
char.IA5String.tagSet: IA5StringDecoder(),
char.GraphicString.tagSet: GraphicStringDecoder(),
char.VisibleString.tagSet: VisibleStringDecoder(),
char.GeneralString.tagSet: GeneralStringDecoder(),
char.UniversalString.tagSet: UniversalStringDecoder(),
char.BMPString.tagSet: BMPStringDecoder(),
# useful types
useful.ObjectDescriptor.tagSet: ObjectDescriptorDecoder(),
useful.GeneralizedTime.tagSet: GeneralizedTimeDecoder(),
useful.UTCTime.tagSet: UTCTimeDecoder()
}
# Type-to-codec map for ambiguous ASN.1 types
typeMap = {
univ.Set.typeId: SetDecoder(),
univ.SetOf.typeId: SetOfDecoder(),
univ.Sequence.typeId: SequenceDecoder(),
univ.SequenceOf.typeId: SequenceOfDecoder(),
univ.Choice.typeId: ChoiceDecoder(),
univ.Any.typeId: AnyDecoder()
}
( stDecodeTag, stDecodeLength, stGetValueDecoder, stGetValueDecoderByAsn1Spec,
stGetValueDecoderByTag, stTryAsExplicitTag, stDecodeValue,
stDumpRawValue, stErrorCondition, stStop ) = [x for x in range(10)]
class Decoder:
defaultErrorState = stErrorCondition
# defaultErrorState = stDumpRawValue
defaultRawDecoder = AnyDecoder()
supportIndefLength = True
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
# Tag & TagSet objects caches
self.__tagCache = {}
self.__tagSetCache = {}
def __call__(self, substrate, asn1Spec=None, tagSet=None,
length=None, state=stDecodeTag, recursiveFlag=1,
substrateFun=None, allowEoo=False):
if debug.logger & debug.flagDecoder:
debug.logger('decoder called at scope %s with state %d, working with up to %d octets of substrate: %s' % (debug.scope, state, len(substrate), debug.hexdump(substrate)))
fullSubstrate = substrate
while state != stStop:
if state == stDecodeTag:
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on tag decoding'
)
if not isOctetsType(substrate) and \
not isinstance(substrate, univ.OctetString):
raise error.PyAsn1Error('Bad octet stream type')
# Decode tag
firstOctet = substrate[0]
substrate = substrate[1:]
if firstOctet in self.__tagCache:
lastTag = self.__tagCache[firstOctet]
else:
t = oct2int(firstOctet)
# Look for end-of-octets sentinel
if t == 0:
if substrate and oct2int(substrate[0]) == 0:
if allowEoo and self.supportIndefLength:
debug.logger and debug.logger & debug.flagDecoder and debug.logger('end-of-octets sentinel found')
value, substrate = eoo.endOfOctets, substrate[1:]
state = stStop
continue
else:
raise error.PyAsn1Error('Unexpected end-of-contents sentinel')
else:
raise error.PyAsn1Error('Zero tag encountered')
tagClass = t&0xC0
tagFormat = t&0x20
tagId = t&0x1F
if tagId == 0x1F:
tagId = 0
while 1:
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on long tag decoding'
)
t = oct2int(substrate[0])
tagId = tagId << 7 | (t&0x7F)
substrate = substrate[1:]
if not t&0x80:
break
lastTag = tag.Tag(
tagClass=tagClass, tagFormat=tagFormat, tagId=tagId
)
if tagId < 31:
# cache short tags
self.__tagCache[firstOctet] = lastTag
if tagSet is None:
if firstOctet in self.__tagSetCache:
tagSet = self.__tagSetCache[firstOctet]
else:
# base tag not recovered
tagSet = tag.TagSet((), lastTag)
if firstOctet in self.__tagCache:
self.__tagSetCache[firstOctet] = tagSet
else:
tagSet = lastTag + tagSet
state = stDecodeLength
debug.logger and debug.logger & debug.flagDecoder and debug.logger('tag decoded into %s, decoding length' % tagSet)
if state == stDecodeLength:
# Decode length
if not substrate:
raise error.SubstrateUnderrunError(
'Short octet stream on length decoding'
)
firstOctet = oct2int(substrate[0])
if firstOctet == 128:
size = 1
length = -1
elif firstOctet < 128:
length, size = firstOctet, 1
else:
size = firstOctet & 0x7F
# encoded in size bytes
length = 0
lengthString = substrate[1:size+1]
# missing check on maximum size, which shouldn't be a
# problem, we can handle more than is possible
if len(lengthString) != size:
raise error.SubstrateUnderrunError(
'%s<%s at %s' %
(size, len(lengthString), tagSet)
)
for char in lengthString:
length = (length << 8) | oct2int(char)
size = size + 1
substrate = substrate[size:]
if length != -1 and len(substrate) < length:
raise error.SubstrateUnderrunError(
'%d-octet short' % (length - len(substrate))
)
if length == -1 and not self.supportIndefLength:
error.PyAsn1Error('Indefinite length encoding not supported by this codec')
state = stGetValueDecoder
debug.logger and debug.logger & debug.flagDecoder and debug.logger('value length decoded into %d, payload substrate is: %s' % (length, debug.hexdump(length == -1 and substrate or substrate[:length])))
if state == stGetValueDecoder:
if asn1Spec is None:
state = stGetValueDecoderByTag
else:
state = stGetValueDecoderByAsn1Spec
#
# There're two ways of creating subtypes in ASN.1 what influences
# decoder operation. These methods are:
# 1) Either base types used in or no IMPLICIT tagging has been
# applied on subtyping.
# 2) Subtype syntax drops base type information (by means of
# IMPLICIT tagging.
# The first case allows for complete tag recovery from substrate
# while the second one requires original ASN.1 type spec for
# decoding.
#
# In either case a set of tags (tagSet) is coming from substrate
# in an incremental, tag-by-tag fashion (this is the case of
# EXPLICIT tag which is most basic). Outermost tag comes first
# from the wire.
#
if state == stGetValueDecoderByTag:
if tagSet in self.__tagMap:
concreteDecoder = self.__tagMap[tagSet]
else:
concreteDecoder = None
if concreteDecoder:
state = stDecodeValue
else:
_k = tagSet[:1]
if _k in self.__tagMap:
concreteDecoder = self.__tagMap[_k]
else:
concreteDecoder = None
if concreteDecoder:
state = stDecodeValue
else:
state = stTryAsExplicitTag
if debug.logger and debug.logger & debug.flagDecoder:
debug.logger('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state == stDecodeValue and 'value' or 'as explicit tag'))
debug.scope.push(concreteDecoder is None and '?' or concreteDecoder.protoComponent.__class__.__name__)
if state == stGetValueDecoderByAsn1Spec:
if isinstance(asn1Spec, (dict, tagmap.TagMap)):
if tagSet in asn1Spec:
__chosenSpec = asn1Spec[tagSet]
else:
__chosenSpec = None
if debug.logger and debug.logger & debug.flagDecoder:
debug.logger('candidate ASN.1 spec is a map of:')
for t, v in asn1Spec.getPosMap().items():
debug.logger(' %s -> %s' % (t, v.__class__.__name__))
if asn1Spec.getNegMap():
debug.logger('but neither of: ')
for t, v in asn1Spec.getNegMap().items():
debug.logger(' %s -> %s' % (t, v.__class__.__name__))
debug.logger('new candidate ASN.1 spec is %s, chosen by %s' % (__chosenSpec is None and '<none>' or __chosenSpec.prettyPrintType(), tagSet))
else:
__chosenSpec = asn1Spec
debug.logger and debug.logger & debug.flagDecoder and debug.logger('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__)
if __chosenSpec is not None and (
tagSet == __chosenSpec.getTagSet() or \
tagSet in __chosenSpec.getTagMap()
):
# use base type for codec lookup to recover untagged types
baseTagSet = __chosenSpec.baseTagSet
if __chosenSpec.typeId is not None and \
__chosenSpec.typeId in self.__typeMap:
# ambiguous type
concreteDecoder = self.__typeMap[__chosenSpec.typeId]
debug.logger and debug.logger & debug.flagDecoder and debug.logger('value decoder chosen for an ambiguous type by type ID %s' % (__chosenSpec.typeId,))
elif baseTagSet in self.__tagMap:
# base type or tagged subtype
concreteDecoder = self.__tagMap[baseTagSet]
debug.logger and debug.logger & debug.flagDecoder and debug.logger('value decoder chosen by base %s' % (baseTagSet,))
else:
concreteDecoder = None
if concreteDecoder:
asn1Spec = __chosenSpec
state = stDecodeValue
else:
state = stTryAsExplicitTag
else:
concreteDecoder = None
state = stTryAsExplicitTag
if debug.logger and debug.logger & debug.flagDecoder:
debug.logger('codec %s chosen by ASN.1 spec, decoding %s' % (state == stDecodeValue and concreteDecoder.__class__.__name__ or "<none>", state == stDecodeValue and 'value' or 'as explicit tag'))
debug.scope.push(__chosenSpec is None and '?' or __chosenSpec.__class__.__name__)
if state == stTryAsExplicitTag:
if tagSet and \
tagSet[0][1] == tag.tagFormatConstructed and \
tagSet[0][0] != tag.tagClassUniversal:
# Assume explicit tagging
concreteDecoder = explicitTagDecoder
state = stDecodeValue
else:
concreteDecoder = None
state = self.defaultErrorState
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state == stDecodeValue and 'value' or 'as failure'))
if state == stDumpRawValue:
concreteDecoder = self.defaultRawDecoder
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__)
state = stDecodeValue
if state == stDecodeValue:
if recursiveFlag == 0 and not substrateFun: # legacy
substrateFun = lambda a,b,c: (a,b[:c])
if length == -1: # indef length
value, substrate = concreteDecoder.indefLenValueDecoder(
fullSubstrate, substrate, asn1Spec, tagSet, length,
stGetValueDecoder, self, substrateFun
)
else:
value, substrate = concreteDecoder.valueDecoder(
fullSubstrate, substrate, asn1Spec, tagSet, length,
stGetValueDecoder, self, substrateFun
)
state = stStop
debug.logger and debug.logger & debug.flagDecoder and debug.logger('codec %s yields type %s, value:\n%s\n...remaining substrate is: %s' % (concreteDecoder.__class__.__name__, value.__class__.__name__, value.prettyPrint(), substrate and debug.hexdump(substrate) or '<none>'))
if state == stErrorCondition:
raise error.PyAsn1Error(
'%s not in asn1Spec: %s' % (tagSet, asn1Spec)
)
if debug.logger and debug.logger & debug.flagDecoder:
debug.scope.pop()
debug.logger('decoder left scope %s, call completed' % debug.scope)
return value, substrate
decode = Decoder(tagMap, typeMap)
# XXX
# non-recursive decoding; return position rather than substrate
|
enkimute/ganja.js
|
refs/heads/master
|
codegen/python/mink.py
|
1
|
"""3D Projective Geometric Algebra.
Written by a generator written by enki.
"""
__author__ = 'Enki'
import math
class MINK:
def __init__(self, value=0, index=0):
"""Initiate a new MINK.
Optional, the component index can be set with value.
"""
self.mvec = [0] * 4
self._base = ["1", "e1", "e2", "e12"]
if (value != 0):
self.mvec[index] = value
@classmethod
def fromarray(cls, array):
"""Initiate a new MINK from an array-like object.
The first axis of the array is assumed to correspond to the elements
of the algebra, and needs to have the same length. Any other dimensions
are left unchanged, and should have simple operations such as addition
and multiplication defined. NumPy arrays are therefore a perfect
candidate.
:param array: array-like object whose length is the dimension of the algebra.
:return: new instance of MINK.
"""
self = cls()
if len(array) != len(self):
raise TypeError('length of array must be identical to the dimension '
'of the algebra.')
self.mvec = array
return self
def __str__(self):
if isinstance(self.mvec, list):
res = ' + '.join(filter(None, [("%.7f" % x).rstrip("0").rstrip(".")+(["",self._base[i]][i>0]) if abs(x) > 0.000001 else None for i,x in enumerate(self)]))
else: # Assume array-like, redirect str conversion
res = str(self.mvec)
if (res == ''):
return "0"
return res
def __getitem__(self, key):
return self.mvec[key]
def __setitem__(self, key, value):
self.mvec[key] = value
def __len__(self):
return len(self.mvec)
def __invert__(a):
"""MINK.Reverse
Reverse the order of the basis blades.
"""
res = a.mvec.copy()
res[0]=a[0]
res[1]=a[1]
res[2]=a[2]
res[3]=-a[3]
return MINK.fromarray(res)
def Dual(a):
"""MINK.Dual
Poincare duality operator.
"""
res = a.mvec.copy()
res[0]=a[3]
res[1]=-a[2]
res[2]=-a[1]
res[3]=a[0]
return MINK.fromarray(res)
def Conjugate(a):
"""MINK.Conjugate
Clifford Conjugation
"""
res = a.mvec.copy()
res[0]=a[0]
res[1]=-a[1]
res[2]=-a[2]
res[3]=-a[3]
return MINK.fromarray(res)
def Involute(a):
"""MINK.Involute
Main involution
"""
res = a.mvec.copy()
res[0]=a[0]
res[1]=-a[1]
res[2]=-a[2]
res[3]=a[3]
return MINK.fromarray(res)
def __mul__(a,b):
"""MINK.Mul
The geometric product.
"""
if type(b) in (int, float):
return a.muls(b)
res = a.mvec.copy()
res[0]=b[0]*a[0]+b[1]*a[1]-b[2]*a[2]+b[3]*a[3]
res[1]=b[1]*a[0]+b[0]*a[1]+b[3]*a[2]-b[2]*a[3]
res[2]=b[2]*a[0]+b[3]*a[1]+b[0]*a[2]-b[1]*a[3]
res[3]=b[3]*a[0]+b[2]*a[1]-b[1]*a[2]+b[0]*a[3]
return MINK.fromarray(res)
__rmul__=__mul__
def __xor__(a,b):
res = a.mvec.copy()
res[0]=b[0]*a[0]
res[1]=b[1]*a[0]+b[0]*a[1]
res[2]=b[2]*a[0]+b[0]*a[2]
res[3]=b[3]*a[0]+b[2]*a[1]-b[1]*a[2]+b[0]*a[3]
return MINK.fromarray(res)
def __and__(a,b):
res = a.mvec.copy()
res[3]=1*(a[3]*b[3])
res[2]=-1*(a[2]*-1*b[3]+a[3]*b[2]*-1)
res[1]=1*(a[1]*b[3]+a[3]*b[1])
res[0]=1*(a[0]*b[3]+a[1]*b[2]*-1-a[2]*-1*b[1]+a[3]*b[0])
return MINK.fromarray(res)
def __or__(a,b):
res = a.mvec.copy()
res[0]=b[0]*a[0]+b[1]*a[1]-b[2]*a[2]+b[3]*a[3]
res[1]=b[1]*a[0]+b[0]*a[1]+b[3]*a[2]-b[2]*a[3]
res[2]=b[2]*a[0]+b[3]*a[1]+b[0]*a[2]-b[1]*a[3]
res[3]=b[3]*a[0]+b[0]*a[3]
return MINK.fromarray(res)
def __add__(a,b):
"""MINK.Add
Multivector addition
"""
if type(b) in (int, float):
return a.adds(b)
res = a.mvec.copy()
res[0] = a[0]+b[0]
res[1] = a[1]+b[1]
res[2] = a[2]+b[2]
res[3] = a[3]+b[3]
return MINK.fromarray(res)
__radd__=__add__
def __sub__(a,b):
"""MINK.Sub
Multivector subtraction
"""
if type(b) in (int, float):
return a.subs(b)
res = a.mvec.copy()
res[0] = a[0]-b[0]
res[1] = a[1]-b[1]
res[2] = a[2]-b[2]
res[3] = a[3]-b[3]
return MINK.fromarray(res)
def __rsub__(a,b):
"""MINK.Sub
Multivector subtraction
"""
return b + -1 * a
def smul(a,b):
res = a.mvec.copy()
res[0] = a*b[0]
res[1] = a*b[1]
res[2] = a*b[2]
res[3] = a*b[3]
return MINK.fromarray(res)
def muls(a,b):
res = a.mvec.copy()
res[0] = a[0]*b
res[1] = a[1]*b
res[2] = a[2]*b
res[3] = a[3]*b
return MINK.fromarray(res)
def sadd(a,b):
res = a.mvec.copy()
res[0] = a+b[0]
res[1] = b[1]
res[2] = b[2]
res[3] = b[3]
return MINK.fromarray(res)
def adds(a,b):
res = a.mvec.copy()
res[0] = a[0]+b
res[1] = a[1]
res[2] = a[2]
res[3] = a[3]
return MINK.fromarray(res)
def ssub(a,b):
res = a.mvec.copy()
res[0] = a-b[0]
res[1] = -b[1]
res[2] = -b[2]
res[3] = -b[3]
return MINK.fromarray(res)
def subs(a,b):
res = a.mvec.copy()
res[0] = a[0]-b
res[1] = a[1]
res[2] = a[2]
res[3] = a[3]
return MINK.fromarray(res)
def norm(a):
return abs((a * a.Conjugate())[0])**0.5
def inorm(a):
return a.Dual().norm()
def normalized(a):
return a * (1 / a.norm())
e1 = MINK(1.0, 1)
e2 = MINK(1.0, 2)
e12 = MINK(1.0, 3)
if __name__ == '__main__':
print("e1*e1 :", str(e1*e1))
print("pss :", str(e12))
print("pss*pss :", str(e12*e12))
|
xpertsavenue/WiringOP-Zero-Python
|
refs/heads/master
|
examples/two-mcp23017.py
|
3
|
# Turns on each pin of an mcp23017 on address 0x20 ( quick2wire IO expander )
import wiringpi
pin_base = 65
i2c_addr = 0x20
i2c_addr_2 = 0x21
#pins = [65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80]
wiringpi.wiringPiSetup()
wiringpi.mcp23017Setup(pin_base,i2c_addr)
wiringpi.mcp23017Setup(pin_base+16,i2c_addr_2)
#for pin in pins:
for pin in range(65,96):
wiringpi.pinMode(pin,1)
wiringpi.digitalWrite(pin,1)
# wiringpi.delay(1000)
# wiringpi.digitalWrite(pin,0)
|
bovenyan/hsa
|
refs/heads/master
|
hsa-python/examples/example_utils/network_loader.py
|
4
|
'''
Copyright 2012, Stanford University. This file is licensed under GPL v2 plus
a special exception, as described in included LICENSE_EXCEPTION.txt.
Created on Jul 25, 2012
@author: Peyman Kazemian
'''
from examples.example_utils.emulated_tf import emulated_tf
from headerspace.tf import TF
import json
def load_network(settings):
n = net_loader(settings)
ntf = n.load_ntf()
ttf = n.load_ttf()
(name_to_id,id_to_name) = n.load_port_map()
return (ntf,ttf,name_to_id,id_to_name)
class net_loader(object):
def __init__(self,settings):
'''
@settings has the following key value pairs
@required rtr_names: list of router names
@required num_layers
@required fwd_engine_layer
@required input_path: path of tf files
@required switch_id_multipliert
@required port_type_multiplier
@required out_port_type_const
@optional remove_duplicates: True of False - if duplicates sshould be
removed after each step. (def: False)
'''
self.settings = settings
def load_ntf(self):
'''
load transfer functions into a emulated transfer function with @layer layers.
'''
if "remove_duplicates" in self.settings.keys() and \
self.settings["remove_duplicates"]:
emul_tf = emulated_tf(self.settings["num_layers"],True)
else:
emul_tf = emulated_tf(self.settings["num_layers"],False)
emul_tf.set_fwd_engine_stage(self.settings["fwd_engine_layer"])
emul_tf.set_multipliers(self.settings["switch_id_multiplier"], \
self.settings["port_type_multiplier"], \
self.settings["out_port_type_const"])
for rtr_name in self.settings["rtr_names"]:
f = TF(1)
f.load_from_json("%s/%s.tf.json"%(self.settings["input_path"],
rtr_name))
if "hash_table" in self.settings.keys():
f.activate_hash_table(self.settings["hash_table"])
emul_tf.append_tf(f)
emul_tf.length = f.length
return emul_tf
def load_ttf(self):
'''
loads topology transfer function
'''
f = TF(1)
f.load_from_json("%s/topology.tf.json"%self.settings["input_path"])
return f
def load_port_map(self):
'''
load the map from port ID to name of box-port name.
'''
f = open("%s/port_map.json"%self.settings["input_path"],'r')
map = json.load(f)
id_to_name = {}
for rtr in map.keys():
for port in map[rtr]:
port_num = map[rtr][port]
map[rtr][port] = port_num
id_to_name[str(port_num)] = "%s-%s"%(rtr,port)
if "out_port_type_const" in self.settings.keys() and \
self.settings["out_port_type_const"] > 0:
out_port = port_num + self.settings["port_type_multiplier"]\
* self.settings["out_port_type_const"]
id_to_name[str(out_port)] = "%s-%s"%(rtr,port)
return (map,id_to_name)
|
javierri/Metodo-Ordenamiento-de-Arreglos
|
refs/heads/master
|
MetodoInsercion.py
|
1
|
# Metodo de Ordenamiento por Inserción en Python
# Observación: Ninguna
# Autor: Javier Rivera (UNEFA Mérida)
def OrdInsercion(arreglo):
i = 1
for elem in arreglo[1:]: # Recorre los elementos del arreglo la pos 1 hasta el fin
j = i - 1
while (j >= 0): # Recorre los elementos del arreglo desde posicion de elem-1 hasta 0
# Busca posiciòn donde va a insertar elem
if (arreglo[j] < elem):
del arreglo[i]
arreglo.insert(j+1,elem)
break
# Inserta al inicio (posiciòn 0) en caso de ser menor que el primer elemento
if (j == 0 and arreglo[0] >= elem):
del arreglo[i]
arreglo.insert(0,elem)
j = j - 1
i = i + 1
# PRINCIPAL
arreglo = [5,3,4,2,6,1,7,9,3,8,6]
print arreglo
OrdInsercion(arreglo)
print arreglo
|
xindaya/bosen
|
refs/heads/release_1.1
|
app/sparsecoding/script/kill.py
|
14
|
#!/usr/bin/env python
import os, sys
if len(sys.argv) != 2:
print "usage: %s <hostfile>" % sys.argv[0]
sys.exit(1)
host_file = sys.argv[1]
prog_name = "sparsecoding_main"
# Get host IPs
with open(host_file, "r") as f:
hostlines = f.read().splitlines()
host_ips = [line.split()[1] for line in hostlines]
ssh_cmd = (
"ssh "
"-o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"-o LogLevel=quiet "
)
for ip in host_ips:
cmd = ssh_cmd + ip + " killall -q " + prog_name
os.system(cmd)
print "Done killing"
|
sanghinitin/golismero
|
refs/heads/master
|
golismero/api/data/information/dns.py
|
8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This package contains the classes that represent
the different types of DNS queries and responses.
"""
# Required since "dns" is both an external module and the name of this file.
from __future__ import absolute_import
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: http://golismero-project.com
Golismero project mail: contact@golismero-project.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from . import Capture
from .text import Text
from .. import identity
from ..resource.domain import Domain
from ..resource.email import Email
from ..resource.ip import IP
from ...config import Config
#------------------------------------------------------------------------------
class DnsSEC(object):
"""
DNSSEC utility functions.
"""
RSAMD5 = 1
DH = 2
DSA = 3
ECC = 4
RSASHA1 = 5
DSANSEC3SHA1 = 6
RSASHA1NSEC3SHA1 = 7
RSASHA256 = 8
RSASHA512 = 10
INDIRECT = 252
PRIVATEDNS = 253
PRIVATEOID = 254
ALGORITHM_BY_TEXT = {
'RSAMD5' : RSAMD5,
'DH' : DH,
'DSA' : DSA,
'ECC' : ECC,
'RSASHA1' : RSASHA1,
'DSANSEC3SHA1' : DSANSEC3SHA1,
'RSASHA1NSEC3SHA1' : RSASHA1NSEC3SHA1,
'RSASHA256' : RSASHA256,
'RSASHA512' : RSASHA512,
'INDIRECT' : INDIRECT,
'PRIVATEDNS' : PRIVATEDNS,
'PRIVATEOID' : PRIVATEOID,
}
ALGORITHM_BY_NUM = {
RSAMD5 : 'RSAMD5',
DH :'DH',
DSA : 'DSA',
ECC : 'ECC' ,
RSASHA1 : 'RSASHA1',
DSANSEC3SHA1 : 'DSANSEC3SHA1',
RSASHA1NSEC3SHA1 : 'RSASHA1NSEC3SHA1',
RSASHA256 : 'RSASHA256',
RSASHA512 : 'RSASHA512',
INDIRECT : 'INDIRECT',
PRIVATEDNS : 'PRIVATEDNS',
PRIVATEOID : 'PRIVATEOID',
}
#--------------------------------------------------------------------------
@staticmethod
def algorithm_to_text(alg):
"""
:return: From a numeric value, returns a text with the name of the algorithm.
:rtype: str
"""
if not isinstance(alg, int):
raise TypeError("Expected int, got %r instead" % type(alg))
if alg not in DnsSEC.ALGORITHM_BY_TEXT.values():
raise TypeError("Invalid algorithm '%s'" % alg)
return DnsSEC.ALGORITHM_BY_NUM[alg]
#--------------------------------------------------------------------------
@staticmethod
def text_to_algorithm(alg):
"""
:return: From a numeric text, returns the integer value of the algorithm
:rtype: int
"""
if not isinstance(alg, basestring):
raise TypeError("Expected string, got %r instead" % type(alg))
if alg not in DnsSEC.ALGORITHM_BY_TEXT:
raise TypeError("Invalid algorithm '%s'" % alg)
return DnsSEC.ALGORITHM_BY_TEXT[alg]
#------------------------------------------------------------------------------
class DnsRegister(Capture):
"""
Base class for DNS Registers.
"""
data_subtype = "information/dns/abstract"
# Types of registers
DNS_TYPES = (
'A',
'AAAA',
'AFSDB',
'CERT',
'CNAME',
'DNSKEY',
'DS',
'HINFO',
'IPSECKEY',
'ISDN',
'LOC',
'MX',
'NAPTR',
'NS',
'NSAP',
'NSEC',
'NSEC3',
'NSEC3PARAM',
'PTR',
'RP',
'RRSIG',
'SOA',
'SPF',
'SRV',
'TXT',
'WKS',
'X25'
)
NONE = 0
A = 1
NS = 2
MD = 3
MF = 4
CNAME = 5
SOA = 6
MB = 7
MG = 8
MR = 9
NULL = 10
WKS = 11
PTR = 12
HINFO = 13
MINFO = 14
MX = 15
TXT = 16
RP = 17
AFSDB = 18
X25 = 19
ISDN = 20
RT = 21
NSAP = 22
NSAP_PTR = 23
SIG = 24
KEY = 25
PX = 26
GPOS = 27
AAAA = 28
LOC = 29
NXT = 30
SRV = 33
NAPTR = 35
KX = 36
CERT = 37
A6 = 38
DNAME = 39
OPT = 41
APL = 42
DS = 43
SSHFP = 44
IPSECKEY = 45
RRSIG = 46
NSEC = 47
DNSKEY = 48
DHCID = 49
NSEC3 = 50
NSEC3PARAM = 51
HIP = 55
SPF = 99
UNSPEC = 103
TKEY = 249
TSIG = 250
IXFR = 251
AXFR = 252
MAILB = 253
MAILA = 254
ANY = 255
TA = 32768
DLV = 32769
_by_text = {
'NONE' : NONE,
'A' : A,
'NS' : NS,
'MD' : MD,
'MF' : MF,
'CNAME' : CNAME,
'SOA' : SOA,
'MB' : MB,
'MG' : MG,
'MR' : MR,
'NULL' : NULL,
'WKS' : WKS,
'PTR' : PTR,
'HINFO' : HINFO,
'MINFO' : MINFO,
'MX' : MX,
'TXT' : TXT,
'RP' : RP,
'AFSDB' : AFSDB,
'X25' : X25,
'ISDN' : ISDN,
'RT' : RT,
'NSAP' : NSAP,
'NSAP-PTR' : NSAP_PTR,
'SIG' : SIG,
'KEY' : KEY,
'PX' : PX,
'GPOS' : GPOS,
'AAAA' : AAAA,
'LOC' : LOC,
'NXT' : NXT,
'SRV' : SRV,
'NAPTR' : NAPTR,
'KX' : KX,
'CERT' : CERT,
'A6' : A6,
'DNAME' : DNAME,
'OPT' : OPT,
'APL' : APL,
'DS' : DS,
'SSHFP' : SSHFP,
'IPSECKEY' : IPSECKEY,
'RRSIG' : RRSIG,
'NSEC' : NSEC,
'DNSKEY' : DNSKEY,
'DHCID' : DHCID,
'NSEC3' : NSEC3,
'NSEC3PARAM' : NSEC3PARAM,
'HIP' : HIP,
'SPF' : SPF,
'UNSPEC' : UNSPEC,
'TKEY' : TKEY,
'TSIG' : TSIG,
'IXFR' : IXFR,
'AXFR' : AXFR,
'MAILB' : MAILB,
'MAILA' : MAILA,
'ANY' : ANY,
'TA' : TA,
'DLV' : DLV,
}
#--------------------------------------------------------------------------
def __init__(self, **kwargs):
"""
:param type: Type of DNS register. Valid types are in the DNS_TYPES dictionary.
:type type: str
"""
self._type = kwargs.get('type', None)
# Checks for types
if self._type is None:
raise TypeError("Missing argument: 'type'")
if not isinstance(self._type, basestring):
raise TypeError("Expected str, got %s" % type(self._type))
super(DnsRegister, self).__init__()
#--------------------------------------------------------------------------
@property
def display_name(self):
name = super(DnsRegister, self).display_name
if name.startswith("Dns "):
name = "DNS " + name[4:]
return name
#--------------------------------------------------------------------------
@identity
def type(self):
"""
:return: Type of DNS register.
:rtype: str
"""
return self._type
#--------------------------------------------------------------------------
@property
def type_int(self):
"""
:return: Integer value for the DNS register type.
:rtype: int
"""
return self._by_text[self.type]
#--------------------------------------------------------------------------
@staticmethod
def name2id(id):
"""
Get the number of the DNS Register identificator by their id.
:param id: the id of the DNS code type.
:type id: int
:return: the name of DNS register type: A, AAAA, CNAME...
:rtype: str
"""
return DnsRegister._by_text[id]
#--------------------------------------------------------------------------
@staticmethod
def id2name(name):
"""
Get the id of the DNS Register identificator by their name.
:param name: the name of the DNS code type: A, AAAA, CNAME...
:type name: str
:return: the id number of DNS register type.
:rtype: int
"""
m_by_value = dict([(y, x) for x, y in DnsRegister._by_text.iteritems()])
return m_by_value[name]
#------------------------------------------------------------------------------
class DNSRegisterAlgorithm(DnsRegister):
data_subtype = "information/dns/abstract"
#--------------------------------------------------------------------------
def __init__(self, algorithm, **kwargs):
"""
:param algorithm: The DNSSEC algorithm for the certificate. Allowed values are in the DnsSEC.ALGORITHM_BY_TEXT dict.
:type algorithm: str | int
"""
#
# Check the algorithm
#
if isinstance(algorithm, basestring):
self.__algorithm_value = DnsSEC.text_to_algorithm(algorithm)
self.__algorithm_name = DnsSEC.algorithm_to_text(self.__algorithm_value)
elif isinstance(algorithm, int):
self.__algorithm_name = DnsSEC.algorithm_to_text(algorithm)
self.__algorithm_value = DnsSEC.text_to_algorithm(self.__algorithm_name)
else:
raise TypeError("Expected str or int, got %r instead" % type(algorithm))
super(DNSRegisterAlgorithm, self).__init__(**kwargs)
#--------------------------------------------------------------------------
@identity
def algorithm_name(self):
"""
:return: Name of the DNSSEC algorithm.
:rtype: str
"""
return self.__algorithm_name
#--------------------------------------------------------------------------
@property
def algorithm_value(self):
"""
:return: Integer with the DNSSEC algorithm value.
:rtype: int
"""
return self.__algorithm_value
#------------------------------------------------------------------------------
class DnsRegisterA(DnsRegister):
"""
Register type 'A'.
"""
#--------------------------------------------------------------------------
def __init__(self, address, **kwargs):
"""
:param address: The IPv4 address.
:type address: str
"""
if not isinstance(address, basestring):
raise TypeError("Expected str, got %s" % type(address))
self.__address = address
# Set type of register and the other options
super(DnsRegisterA, self).__init__(type="A", **kwargs)
#--------------------------------------------------------------------------
@identity
def address(self):
"""
:return: The IPv4 address.
:rtype: str
"""
return self.__address
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [IP(self.address)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.address in scope
#------------------------------------------------------------------------------
class DnsRegisterAAAA(DnsRegister):
"""
Register type 'AAAA'
"""
#--------------------------------------------------------------------------
def __init__(self, address, **kwargs):
"""
:param address: The IPv6 address.
:type address: str
"""
if not isinstance(address, basestring):
raise TypeError("Expected str, got %s" % type(address))
self.__address = address
# Set type of register and the other options
super(DnsRegisterAAAA, self).__init__(type="AAAA", **kwargs)
#--------------------------------------------------------------------------
@identity
def address(self):
"""
:return: The IPv6 address.
:rtype: str
"""
return self.__address
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [IP(self.address)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.address in scope
#------------------------------------------------------------------------------
class DnsRegisterAFSDB(DnsRegister):
"""
Register type 'AFSDB'
"""
#--------------------------------------------------------------------------
def __init__(self, subtype, hostname, **kwargs):
"""
:param subtype: the subtype value
:type subtype: int.
:param hostname: the hostname value
:type hostname: str
"""
if not isinstance(subtype, int):
raise TypeError("Expected int, got %s" % type(subtype))
if not isinstance(hostname, basestring):
raise TypeError("Expected str, got %s" % type(hostname))
self.__subtype = subtype
self.__hostname = hostname
# Set type of register and the other options
super(DnsRegisterAFSDB, self).__init__(type="AFSDB", **kwargs)
#--------------------------------------------------------------------------
@identity
def subtype(self):
"""
:return: the subtype value
:rtype: str
"""
return self.__subtype
#--------------------------------------------------------------------------
@identity
def hostname(self):
"""
:return: the hostname value
:rtype: str
"""
return self.__hostname
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [Domain(self.hostname)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.hostname in scope
#------------------------------------------------------------------------------
class DnsRegisterCERT(DNSRegisterAlgorithm):
"""
Register type 'CERT'
"""
CERT_TYPE_BY_VAL = {
1 : 'PKIX',
2 : 'SPKI',
3 : 'PGP',
253 : 'URI',
254 : 'OID',
}
CERT_TYPE_BY_NAME = {
'PKIX' : 1,
'SPKI' : 2,
'PGP' : 3,
'URI' : 253,
'OID' : 254,
}
#--------------------------------------------------------------------------
def __init__(self, algorithm, certificate, certificate_type, key_tag, **kwargs):
"""
:param algorithm: the DNSSEC algorithm for the certificate. Allowed values are in DnsSEC.ALGORITHM_BY_TEXT dict.
:type algorithm: str | int
:param certificate: the certificate
:type certificate: str
:param certificate_type: The type of the certificate. Allowed values are: DnsRegisterCERT.CERT_TYPE_BY_NAME or DnsRegisterCERT.CERT_TYPE_BY_VAL.
:type certificate_type: int | str
:param key_tag: the key tag.
:type key_tag: int
"""
#
# Check the certificate type
#
if isinstance(certificate_type, basestring):
self.__cert_type_value = DnsRegisterCERT.text_to_cert(certificate_type)
self.__cert_type_name = DnsRegisterCERT.cert_to_text(self.__cert_type_value)
elif isinstance(certificate_type, int):
self.__cert_type_name = DnsRegisterCERT.cert_to_text(certificate_type)
self.__cert_type_value = DnsRegisterCERT.text_to_cert(self.__cert_type_name)
else:
raise TypeError("Not a valid certificate_type, got %s" % type(certificate_type))
if not isinstance(certificate, basestring):
raise TypeError("Expected str, got %s" % type(certificate))
if not isinstance(key_tag, int):
raise TypeError("Expected int, got '%s'" % type(key_tag))
self.__certificate = certificate
self.__key_tag = key_tag
# Set type of register and the other options
super(DnsRegisterCERT, self).__init__(algorithm, type="CERT", **kwargs)
#--------------------------------------------------------------------------
@identity
def certificate(self):
"""
:return: string with the certificate
:rtype: str
"""
return self.__certificate
#--------------------------------------------------------------------------
@identity
def certificate_type_name(self):
"""
:return: string with the name of the type of certificate
:rtype: str
"""
return self.__cert_type_name
#--------------------------------------------------------------------------
@property
def certificate_type_value(self):
"""
:return: integer value of the type of certificate
:rtype: int
"""
return self.__cert_type_value
#--------------------------------------------------------------------------
@identity
def key_tag(self):
"""
:return: The key tag
:rtype: int
"""
return self.__key_tag
#--------------------------------------------------------------------------
@staticmethod
def cert_to_text(cert):
"""
:return: From a numeric value, returns a text with the name of the type of cert.
:rtype: str
"""
if not isinstance(cert, int):
raise TypeError("Expected int, got '%s'" % type(cert))
if cert not in DnsRegisterCERT.CERT_TYPE_BY_VAL.values():
raise TypeError("Invalid algorithm '%s'" % cert)
return DnsRegisterCERT.CERT_TYPE_BY_VAL[cert]
#--------------------------------------------------------------------------
@staticmethod
def text_to_cert(cert):
"""
:return: From a numeric text, returns the integer value of the type of cert
:rtype: int
"""
if not isinstance(cert, basestring):
raise TypeError("Expected basestring, got '%s'" % type(cert))
if cert not in DnsRegisterCERT.CERT_TYPE_BY_NAME.values():
raise TypeError("Invalid algorithm '%s'" % cert)
return DnsRegisterCERT.CERT_TYPE_BY_NAME[cert]
#------------------------------------------------------------------------------
class DnsRegisterCNAME(DnsRegister):
"""
Register type 'CNAME'
"""
#--------------------------------------------------------------------------
def __init__(self, target, **kwargs):
"""
:param target: name of the pointer host.
:type target: str
"""
if not isinstance(target, basestring):
raise TypeError("Expected str, got %s" % type(target))
self.__target = target
# Set type of register and the other options
super(DnsRegisterCNAME, self).__init__(type="CNAME", **kwargs)
#--------------------------------------------------------------------------
@identity
def target(self):
"""
:return: name of the pointer host.
:rtype: str
"""
return self.__target
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [Domain(self.target)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.target in scope
#------------------------------------------------------------------------------
class DnsRegisterDNSKEY(DNSRegisterAlgorithm):
"""
Register type 'DNSKEY'
"""
#--------------------------------------------------------------------------
def __init__(self, algorithm, flags, key, protocol, **kwargs):
"""
:param algorithm: the DNSSEC algorithm for the certificate. Allowed values are in DnsSEC.ALGORITHM_BY_TEXT dict.
:type algorithm: str | int
:param flags: the key flags
:type flags: int
:param key: String with the public key
:type key: str
:param protocol: the protocol for which this key may be used.
:type protocol: int
"""
if not isinstance(flags, int):
raise TypeError("Expected int, got '%s'" % type(flags))
if not isinstance(key, basestring):
raise TypeError("Expected basestring, got '%s'" % type(key))
if not isinstance(protocol, int):
raise TypeError("Expected int, got '%s'" % type(protocol))
self.__flags = flags
self.__key = key
self.__protocol = protocol
# Set type of register and the other options
super(DnsRegisterDNSKEY, self).__init__(algorithm, type="DNSKEY", **kwargs)
#--------------------------------------------------------------------------
@identity
def flags(self):
"""
:return: flags for the record
:rtype: int
"""
return self.__flags
#--------------------------------------------------------------------------
@identity
def key(self):
"""
:return: String with the public key
:rtype: str
"""
return self.__key
#--------------------------------------------------------------------------
@identity
def protocol(self):
"""
:return: the protocol for which this key may be used.
:rtype: int
"""
return self.__protocol
#------------------------------------------------------------------------------
class DnsRegisterDS(DNSRegisterAlgorithm):
"""
Register type 'DS'
"""
#--------------------------------------------------------------------------
def __init__(self, algorithm, digest, digest_type, key_tag, **kwargs):
"""
:param algorithm: the DNSSEC algorithm for the certificate. Allowed values are in DnsSEC.ALGORITHM_BY_TEXT dict.
:type algorithm: str | int
:param digest: string with the digest
:type digest: str
:param digest_type: the digest type
:type digest_type: int
:param key_tag: the key tag.
:type key_tag: int
"""
if not isinstance(digest, str):
raise TypeError("Expected str, got '%s'" % type(digest))
if not isinstance(digest_type, int):
raise TypeError("Expected int, got '%s'" % type(digest_type))
if not isinstance(key_tag, int):
raise TypeError("Expected int, got '%s'" % type(key_tag))
self.__digest = digest
self.__digest_type = digest_type
self.__key_tag = key_tag
# Set type of register and the other options
super(DnsRegisterDS, self).__init__(algorithm, type="DS", **kwargs)
#--------------------------------------------------------------------------
@identity
def key_tag(self):
"""
:return: The key tag
:rtype: int
"""
return self.__key_tag
#--------------------------------------------------------------------------
@identity
def digest(self):
"""
:return: string with the digest
:rtype: str
"""
return self.__digest
#--------------------------------------------------------------------------
@identity
def digest_type(self):
"""
:return: the digest type
:rtype: int
"""
return self.__digest_type
#------------------------------------------------------------------------------
class DnsRegisterHINFO(DnsRegister):
"""
Register type 'HINFO'
"""
#--------------------------------------------------------------------------
def __init__(self, cpu, os, **kwargs):
"""
:param cpu: the CPU type.
:type cpu: str.
:param os: the OS type
:type os: str
"""
if not isinstance(cpu, basestring):
raise TypeError("Expected str, got %s" % type(cpu))
if not isinstance(os, basestring):
raise TypeError("Expected str, got %s" % type(os))
self.__cpu = cpu
self.__os = os
# Set type of register and the other options
super(DnsRegisterHINFO, self).__init__(type="HINFO", **kwargs)
#--------------------------------------------------------------------------
@identity
def cpu(self):
"""
:return: the CPU type
:rtype: str
"""
return self.__cpu
#--------------------------------------------------------------------------
@identity
def os(self):
"""
:return: the OS type
:rtype: str
"""
return self.__os
# XXX TODO: add a discovered host fingerprint here
#------------------------------------------------------------------------------
class DnsRegisterIPSECKEY(DNSRegisterAlgorithm):
"""
Register type 'IPSECKEY'
"""
# TODO: discover the gateway address
#--------------------------------------------------------------------------
def __init__(self, algorithm, gateway, gateway_type, key, precedence, **kwargs):
"""
:param algorithm: the DNSSEC algorithm for the certificate. Allowed values are in DnsSEC.ALGORITHM_BY_TEXT dict.
:type algorithm: str | int
:param gateway: gateway address
:type gateway: None, IPv4 address, IPV6 address, or domain name
:param gateway_type: the gateway type
:type gateway_type: int
:param key: the public key.
:type key: str
:param precedence: the precedence for this key data.
:type precedence: int
"""
if not isinstance(gateway, basestring):
raise TypeError("Expected int, got '%s'" % type(gateway))
if isinstance(gateway_type, int):
if gateway_type < 0 or gateway_type > 3:
raise TypeError("Gateway type must be in 0-4 range")
else:
raise TypeError("Expected int, got '%s'" % type(gateway_type))
if not isinstance(precedence, int):
raise TypeError("Expected int, got '%s'" % type(precedence))
self.__gateway = gateway
self.__gateway_type = gateway_type
self.__key = key
self.__precedence = precedence
# Set type of register and the other options
super(DnsRegisterIPSECKEY, self).__init__(algorithm, type="IPSECKEY", **kwargs)
#--------------------------------------------------------------------------
@identity
def gateway(self):
"""
:return: gateway address
:rtype: None, IPv4 address, IPV6 address, or domain name
"""
return self.__gateway
#--------------------------------------------------------------------------
@identity
def gateway_type(self):
"""
:return: the gateway type
:rtype: int
"""
return self.__gateway_type
#--------------------------------------------------------------------------
@identity
def key(self):
"""
:return: the public key
:rtype: str
"""
return self.__key
#--------------------------------------------------------------------------
@identity
def precedence(self):
"""
:return: the precedence of this key data
:rtype: int
"""
return self.__precedence
#------------------------------------------------------------------------------
class DnsRegisterISDN(DnsRegister):
"""
Register type 'ISDN'
"""
#--------------------------------------------------------------------------
def __init__(self, address, subaddress = "", **kwargs):
"""
:param address: the ISDN address.
:type address: str
:param subaddress: the ISDN subaddress.
:type subaddress: str
"""
if not isinstance(address, basestring):
raise TypeError("Expected str, got %s" % type(address))
if not isinstance(subaddress, basestring):
raise TypeError("Expected basestring, got '%s'" % type(subaddress))
self.__address = address
self.__subaddress = subaddress
# Set type of register and the other options
super(DnsRegisterISDN, self).__init__(type="ISDN", **kwargs)
#--------------------------------------------------------------------------
@identity
def address(self):
"""
:return: the ISDN address
:rtype: str
"""
return self.__address
#--------------------------------------------------------------------------
@identity
def subaddress(self):
"""
:return: the ISDN subaddress (or '' if not present)
:rtype: str
"""
return self.__subaddress
#------------------------------------------------------------------------------
class DnsRegisterLOC(DnsRegister):
"""
Register type 'LOC'
"""
# TODO: discover a geolocation information type here
#--------------------------------------------------------------------------
def __init__(self, latitude, longitude, altitude, coordinates, **kwargs):
"""
:param latitude: tuple specifying the degrees, minutes, seconds, and milliseconds of the coordinate.
:type latitude: tuple(int, int, int, int)
:param latitude: tuple specifying the degrees, minutes, seconds, and milliseconds of the coordinate.
:type longitude: tuple(int, int, int, int)
:param altitude: altitude
:type altitude: float
:param coordinates: string with the geolocation coordinates
:type coordinates: str
"""
if not isinstance(coordinates, basestring):
raise TypeError("Expected str, got %s" % type(coordinates))
if isinstance(latitude, tuple):
for t in latitude:
if not isinstance(t, int):
raise TypeError("Expected int, got '%s'" % type(t))
else:
raise TypeError("Expected float, got '%s'" % type(latitude))
if isinstance(longitude, tuple):
for t in longitude:
if not isinstance(t, int):
raise TypeError("Expected int, got '%s'" % type(t))
else:
raise TypeError("Expected float, got '%s'" % type(longitude))
if not isinstance(altitude, float):
raise TypeError("Expected float, got '%s'" % type(altitude))
self.__latitude = latitude
self.__longitude = longitude
self.__altitude = altitude
self.__coordinates = coordinates
# Set type of register and the other options
super(DnsRegisterLOC, self).__init__(type="LOC", **kwargs)
#--------------------------------------------------------------------------
@identity
def coordinates(self):
"""
:return: string with the phisical coordinates
:rtype: str
"""
return self.__coordinates
#--------------------------------------------------------------------------
@identity
def latitude(self):
"""
:return: tuple specifying the degrees, minutes, seconds, and milliseconds of the coordinate.
:rtype: (int, int, int, int)
"""
return self.__latitude
#--------------------------------------------------------------------------
@identity
def longitude(self):
"""
:return: tuple specifying the degrees, minutes, seconds, and milliseconds of the coordinate.
:rtype: (int, int, int, int)
"""
return self.__longitude
#--------------------------------------------------------------------------
@identity
def altitude(self):
"""
:return: altitude
:rtype: float
"""
return self.__altitude
#------------------------------------------------------------------------------
class DnsRegisterMX(DnsRegister):
"""
Register type 'MX'
"""
#--------------------------------------------------------------------------
def __init__(self, exchange, preference, **kwargs):
"""
:param exchange: string with then name of exchange server
:type exchange: str
:param preference: the preference value
:type preference: int
"""
if not isinstance(exchange, basestring):
raise TypeError("Expected basestring, got '%s'" % type(exchange))
if not isinstance(preference, int):
raise TypeError("Expected int, got '%s'" % type(preference))
self.__exchange = exchange
self.__preference = preference
# Set type of register and the other options
super(DnsRegisterMX, self).__init__(type="MX", **kwargs)
#--------------------------------------------------------------------------
@identity
def exchange(self):
"""
:return: string with the name of exchange server.
:rtype: str
"""
return self.__exchange
#--------------------------------------------------------------------------
@identity
def preference(self):
"""
:return: integer with the preference
:rtype: int
"""
return self.__preference
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [Domain(self.exchange)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.exchange in scope
#------------------------------------------------------------------------------
class DnsRegisterNAPTR(DnsRegister):
"""
Register type 'NAPTR'
"""
#--------------------------------------------------------------------------
def __init__(self, order, preference, regex, replacement, service, **kwargs):
"""
:param order: the order
:type order: int
:param preference: the preference
:type preference: int
:param regex: regular expression
:type regex: str
:param replacement: replacemente name
:type replacement: str
:param service: service name
:type service: str
"""
if not isinstance(order, int):
raise TypeError("Expected int, got '%s'" % type(order))
if not isinstance(preference, int):
raise TypeError("Expected int, got '%s'" % type(preference))
if not isinstance(regex, basestring):
raise TypeError("Expected basestring, got '%s'" % type(regex))
if not isinstance(replacement, str):
raise TypeError("Expected str, got '%s'" % type(replacement))
self.__order = order
self.__preference = preference
self.__regex = regex
self.__replacement = replacement
self.__service = service
# Set type of register and the other options
super(DnsRegisterNAPTR, self).__init__(type="NAPTR", **kwargs)
#--------------------------------------------------------------------------
@identity
def order(self):
"""
:return: the order
:rtype: int
"""
return self.__order
#--------------------------------------------------------------------------
@identity
def preference(self):
"""
:return: the preference
:rtype: int
"""
return self.__preference
#--------------------------------------------------------------------------
@identity
def regex(self):
"""
:return: regular expression
:rtype: str
"""
return self.__regex
#--------------------------------------------------------------------------
@identity
def replacement(self):
"""
:return: The replacemente name
:rtype: str
"""
return self.__replacement
#--------------------------------------------------------------------------
@identity
def service(self):
"""
:return: service name
:rtype: str
"""
return self.__service
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [Domain(self.replacement)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.replacement in scope
#------------------------------------------------------------------------------
class DnsRegisterNS(DnsRegister):
"""
Register type 'NS'
"""
#--------------------------------------------------------------------------
def __init__(self, target, **kwargs):
"""
:param target: server target
:type target: str
"""
if not isinstance(target, basestring):
raise TypeError("Expected basestring, got '%s'" % type(target))
self.__target = target
# Set type of register and the other options
super(DnsRegisterNS, self).__init__(type="NS", **kwargs)
#--------------------------------------------------------------------------
@identity
def target(self):
"""
:return: The target server
:rtype: str
"""
return self.__target
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [Domain(self.target)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.target in scope
#------------------------------------------------------------------------------
class DnsRegisterNSAP(DnsRegister):
"""
Register type 'NSAP'
"""
#--------------------------------------------------------------------------
def __init__(self, address, **kwargs):
"""
:param address: a NASP address
:type address: str
"""
if not isinstance(address, basestring):
raise TypeError("Expected basestring, got '%s'" % type(address))
self.__address = address
# Set type of register and the other options
super(DnsRegisterNSAP, self).__init__(type="NSAP", **kwargs)
#--------------------------------------------------------------------------
@identity
def address(self):
"""
:return: a NASP address
:rtype: str
"""
return self.__address
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [IP(self.address)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.address in scope
#------------------------------------------------------------------------------
class DnsRegisterNSEC(DnsRegister):
"""
Register type 'NSEC'
"""
#--------------------------------------------------------------------------
def __init__(self, next, **kwargs):
"""
:param next: the next server name
:type next: str
"""
if not isinstance(next, basestring):
raise TypeError("Expected basestring, got '%s'" % type(next))
self.__next = next
# Set type of register and the other options
super(DnsRegisterNSEC, self).__init__(type="NSEC", **kwargs)
#--------------------------------------------------------------------------
@identity
def next(self):
"""
:return: the next server name
:rtype: str
"""
return self.__next
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [Domain(self.next)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.next in scope
#------------------------------------------------------------------------------
class DnsRegisterNSEC3(DNSRegisterAlgorithm):
"""
Register type 'NSEC3'
"""
#--------------------------------------------------------------------------
def __init__(self, algorithm, flags, iterations, salt, **kwargs):
"""
:param algorithm: the DNSSEC algorithm for the certificate. Allowed values are in DnsSEC.ALGORITHM_BY_TEXT dict.
:type algorithm: str | int
:param flags: the flags
:type flags: int
:param iterations: the number of iterations
:type iterations: int
:param salt: the salt
:type salt: str
"""
if not isinstance(flags, int):
raise TypeError("Expected int, got '%s'" % type(flags))
if not isinstance(iterations, int):
raise TypeError("Expected int, got '%s'" % type(iterations))
if not isinstance(salt, str):
raise TypeError("Expected str, got '%s'" % type(salt))
self.__flags = flags
self.__iterations = iterations
self.__salt = salt
# Set type of register and the other options
super(DnsRegisterNSEC3, self).__init__(algorithm, type="NSEC3", **kwargs)
#--------------------------------------------------------------------------
@identity
def salt(self):
"""
:return: the salt
:rtype: str
"""
return self.__salt
#--------------------------------------------------------------------------
@identity
def iterations(self):
"""
:return: the number of iterations
:rtype: int
"""
return self.__iterations
#--------------------------------------------------------------------------
@identity
def flags(self):
"""
:return: the flags
:rtype: int
"""
return self.__flags
#------------------------------------------------------------------------------
class DnsRegisterNSEC3PARAM(DNSRegisterAlgorithm):
"""
Register type 'NSEC3PARAM'
"""
#--------------------------------------------------------------------------
def __init__(self, algorithm, flags, iterations, salt, **kwargs):
"""
:param algorithm: the DNSSEC algorithm for the certificate. Allowed values are in DnsSEC.ALGORITHM_BY_TEXT dict.
:type algorithm: str | int
:param flags: the flags
:type flags: int
:param iterations: the number of iterations
:type iterations: int
:param salt: the salt
:type salt: str
"""
if not isinstance(flags, int):
raise TypeError("Expected int, got '%s'" % type(flags))
if not isinstance(iterations, int):
raise TypeError("Expected int, got '%s'" % type(iterations))
if not isinstance(salt, str):
raise TypeError("Expected str, got '%s'" % type(salt))
self.__flags = flags
self.__iterations = iterations
self.__salt = salt
# Set type of register and the other options
super(DnsRegisterNSEC3PARAM, self).__init__(algorithm, type="NSEC3PARAM", **kwargs)
#--------------------------------------------------------------------------
@identity
def salt(self):
"""
:return: the salt
:rtype: str
"""
return self.__salt
#--------------------------------------------------------------------------
@identity
def iterations(self):
"""
:return: the number of iterations
:rtype: int
"""
return self.__iterations
#--------------------------------------------------------------------------
@identity
def flags(self):
"""
:return: the flags
:rtype: int
"""
return self.__flags
#------------------------------------------------------------------------------
class DnsRegisterPTR(DnsRegister):
"""
Register type 'PTR'
"""
#--------------------------------------------------------------------------
def __init__(self, target, **kwargs):
"""
:param target: server target
:type target: str
"""
if not isinstance(target, basestring):
raise TypeError("Expected basestring, got '%s'" % type(target))
self.__target = target
# Set type of register and the other options
super(DnsRegisterPTR, self).__init__(type="PTR", **kwargs)
#--------------------------------------------------------------------------
@identity
def target(self):
"""
:return: The target server
:rtype: str
"""
return self.__target
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [Domain(self.target)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.target in scope
#------------------------------------------------------------------------------
class DnsRegisterRP(DnsRegister):
"""
Register type 'RP'
"""
#--------------------------------------------------------------------------
def __init__(self, mbox, txt, **kwargs):
"""
:param mbox: The responsible person's mailbox as string format
:type mbox: str
:param txt: The owner name of a node with TXT records, or the root name if no TXT records are associated with this RP.
:type txt: str
"""
if not isinstance(mbox, basestring):
raise TypeError("Expected basestring, got '%s'" % type(mbox))
if not isinstance(txt, basestring):
raise TypeError("Expected basestring, got '%s'" % type(txt))
self.__mbox = mbox
self.__txt = txt
# Set type of register and the other options
super(DnsRegisterRP, self).__init__(type="RP", **kwargs)
#--------------------------------------------------------------------------
@identity
def txt(self):
"""
:return: The owner name of a node with TXT records, or the root name
:rtype: str
"""
return self.__txt
#--------------------------------------------------------------------------
@identity
def mbox(self):
"""
:return: The responsible person's mailbox as string format
:rtype: str
"""
return self.__mbox
#--------------------------------------------------------------------------
@property
def discovered(self):
result = []
if self.mbox in Config.audit_scope:
try:
result.append( Email(self.mbox) )
except Exception:
# Some people put arbitrary text instead.
pass
if self.txt in Config.audit_scope:
try:
result.append( Domain(self.txt) )
except Exception:
# Same here.
pass
return result
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.mbox in scope or self.txt in scope
#------------------------------------------------------------------------------
class DnsRegisterRRSIG(DNSRegisterAlgorithm):
"""
Register type 'RRSIG'
"""
#--------------------------------------------------------------------------
def __init__(self, algorithm, expiration, interception, key_tag, labels, original_ttl, signer, type_covered, **kwargs):
"""
:param algorithm: the DNSSEC algorithm for the certificate. Allowed values are in DnsSEC.ALGORITHM_BY_TEXT dict.
:type algorithm: str | int
:param expiration: signature expiration time
:type expiration: long
:param interception: signature interception time
:type interception: long
:param key_tag: the key tag.
:type key_tag: int
:param labels: number of labels
:type labels: int
:param original_ttl: the original TTL
:type original_ttl: long
:param signer: the signer
:type signer: str
:param type_covered: the rdata type this signature covers
:type type_covered: int
"""
if not isinstance(expiration, long):
raise TypeError("Expected long, got '%s'" % type(expiration))
if not isinstance(key_tag, int):
raise TypeError("Expected int, got '%s'" % type(key_tag))
if not isinstance(labels, int):
raise TypeError("Expected int, got '%s'" % type(labels))
if not isinstance(original_ttl, long):
raise TypeError("Expected long, got '%s'" % type(original_ttl))
if not isinstance(signer, str):
raise TypeError("Expected str, got '%s'" % type(signer))
if not isinstance(type_covered, int):
raise TypeError("Expected int, got '%s'" % type(type_covered))
self.__expiration = expiration
self.__interception = interception
self.__key_tag = key_tag
self.__labels = labels
self.__original_ttl = original_ttl
self.__type_covered = type_covered
# Set type of register and the other options
super(DnsRegisterRRSIG, self).__init__(algorithm, type="RRSIG", **kwargs)
#--------------------------------------------------------------------------
@identity
def type_covered(self):
"""
:return: the rdata type this signature covers
:rtype: int
"""
return self.__type_covered
#--------------------------------------------------------------------------
@identity
def labels(self):
"""
:return: number of labels
:rtype: int
"""
return self.__labels
#--------------------------------------------------------------------------
@identity
def original_ttl(self):
"""
:return: the original TTL
:rtype: long
"""
return self.__original_ttl
#--------------------------------------------------------------------------
@identity
def expiration(self):
"""
:return: signature expiration time
:rtype: long
"""
return self.__expiration
#--------------------------------------------------------------------------
@identity
def interception(self):
"""
:return: signature interception time
:rtype: long
"""
return self.__interception
#--------------------------------------------------------------------------
@identity
def key_tag(self):
"""
:return: The key tag
:rtype: int
"""
return self.__key_tag
#------------------------------------------------------------------------------
class DnsRegisterSIG(DnsRegisterRRSIG):
"""
Register type 'RRSIG'
"""
data_subtype = "dns_register_sig"
#------------------------------------------------------------------------------
class DnsRegisterSOA(DnsRegister):
"""
Register type 'SOA'
"""
#--------------------------------------------------------------------------
def __init__(self, mname, rname, refresh, expire, **kwargs):
"""
:param mname: the SOA MNAME (master name) field
:type mname: str
:param rname: the SOA RNAME (responsible name) field
:type rname: str
:param refresh: The zone's refresh value (in seconds)
:type refresh: int
:param expire: The zone's expiration value (in seconds)
:type expire: int
"""
if not isinstance(mname, basestring):
raise TypeError("Expected str, got %s" % type(mname))
if not isinstance(rname, basestring):
raise TypeError("Expected str, got %s" % type(rname))
if not isinstance(refresh, int):
raise TypeError("Expected int, got '%s'" % type(refresh))
if not isinstance(expire, int):
raise TypeError("Expected int, got '%s'" % type(expire))
self.__mname = mname
self.__rname = rname
self.__refresh = refresh
self.__expire = expire
# Set type of register and the other options
super(DnsRegisterSOA, self).__init__(type="SOA", **kwargs)
#--------------------------------------------------------------------------
@identity
def mname(self):
"""
:return: the SOA MNAME (master name) field
:rtype: str
"""
return self.__mname
#--------------------------------------------------------------------------
@identity
def rname(self):
"""
:return: the SOA RNAME (responsible name) field
:rtype: str
"""
return self.__rname
#--------------------------------------------------------------------------
@identity
def refresh(self):
"""
:return: The zone's refresh value (in seconds)
:rtype: int
"""
return self.__refresh
#--------------------------------------------------------------------------
@identity
def expire(self):
"""
:return: The zone's expiration value (in seconds)
:rtype: int
"""
return self.__expire
#--------------------------------------------------------------------------
@property
def discovered(self):
result = []
if self.mname in Config.audit_scope:
result.append( Domain(self.mname) )
if self.rname in Config.audit_scope:
result.append( Domain(self.rname) )
return result
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.mname in scope or self.rname in scope
#------------------------------------------------------------------------------
class DnsRegisterTXT(DnsRegister):
"""
Register type 'TXT'
"""
#--------------------------------------------------------------------------
def __init__(self, strings, **kwargs):
"""
:param strings: list of the string text
:type strings: list(str)
"""
if isinstance(strings, list):
for l in strings:
if not isinstance(l, basestring):
raise TypeError("Expected str, got %s" % type(l))
else:
raise TypeError("Expected str, got %s" % type(strings))
self.__strings = strings
# Set type of register and the other options
super(DnsRegisterTXT, self).__init__(type="TXT", **kwargs)
#--------------------------------------------------------------------------
@identity
def strings(self):
"""
:return: list of the text strings
:rtype: list(str)
"""
return self.__strings
#--------------------------------------------------------------------------
@property
def discovered(self):
return [Text("\n".join(self.strings))]
#------------------------------------------------------------------------------
class DnsRegisterSPF(DnsRegister):
"""
Register type 'SPF'
"""
#--------------------------------------------------------------------------
def __init__(self, strings, **kwargs):
"""
:param strings: list of the string text
:type strings: list(str)
"""
if isinstance(strings, list):
for l in strings:
if not isinstance(l, basestring):
raise TypeError("Expected str, got %s" % type(l))
else:
raise TypeError("Expected str, got %s" % type(strings))
self.__strings = strings
# Set type of register and the other options
super(DnsRegisterSPF, self).__init__(type="SPF", **kwargs)
#--------------------------------------------------------------------------
@identity
def strings(self):
"""
:return: list of the text strings
:rtype: list(str)
"""
return self.__strings
#--------------------------------------------------------------------------
@property
def discovered(self):
return [Text("\n".join(self.strings))]
#------------------------------------------------------------------------------
class DnsRegisterSRV(DnsRegister):
"""
Register type 'SRV'
"""
#--------------------------------------------------------------------------
def __init__(self, target, priority, weight, port, **kwargs):
"""
:param target: the target host name
:type target: str
:param priority: the priority
:type priority: int
:param weight: the weight
:type weight: int
:param port: the port of the service
:type port: int
"""
if not isinstance(target, basestring):
raise TypeError("Expected basestring, got '%s'" % type(target))
if not isinstance(priority, int):
raise TypeError("Expected int, got '%s'" % type(priority))
if not isinstance(weight, int):
raise TypeError("Expected int, got '%s'" % type(weight))
if not isinstance(port, int):
raise TypeError("Expected int, got '%s'" % type(port))
self.__target = target
self.__priority = priority
self.__weight = weight
self.__port = port
# Set type of register and the other options
super(DnsRegisterSRV, self).__init__(type="SRV", **kwargs)
#--------------------------------------------------------------------------
@identity
def target(self):
"""
:return: the target host name
:rtype: str
"""
return self.__target
#--------------------------------------------------------------------------
@identity
def priority(self):
"""
:return: the priority
:rtype: int
"""
return self.__priority
#--------------------------------------------------------------------------
@identity
def weight(self):
"""
:return: the weight
:rtype: int
"""
return self.__weight
#--------------------------------------------------------------------------
@identity
def port(self):
"""
:return: the port of the service
:rtype: int
"""
return self.__port
#--------------------------------------------------------------------------
@property
def discovered(self):
# TODO discover the port
if self.is_in_scope():
return [Domain(self.target)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.target in scope
#------------------------------------------------------------------------------
class DnsRegisterWKS(DnsRegister):
"""
Register type 'WKS'
"""
#--------------------------------------------------------------------------
def __init__(self, address, protocol, bitmap, **kwargs):
"""
:param address: the address
:type address: str
:param protocol: the protocol.
:type protocol: int
:param bitmap: the bitmap.
:type bitmap: str
"""
if not isinstance(address, basestring):
raise TypeError("Expected basestring, got '%s'" % type(address))
if not isinstance(protocol, basestring):
raise TypeError("Expected basestring, got '%s'" % type(protocol))
if not isinstance(bitmap, basestring):
raise TypeError("Expected basestring, got '%s'" % type(bitmap))
self.__address = address
self.__protocol = protocol
self.__bitmap = bitmap
# Set type of register and the other options
super(DnsRegisterWKS, self).__init__(type="WKS", **kwargs)
#--------------------------------------------------------------------------
@identity
def address(self):
"""
:return: the address
:rtype: str
"""
return self.__address
#--------------------------------------------------------------------------
@identity
def protocol(self):
"""
:return: the protocol
:rtype: int
"""
return self.__protocol
#--------------------------------------------------------------------------
@identity
def bitmap(self):
"""
:return: the bitmap
:rtype: str
"""
return self.__bitmap
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [IP(self.address)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.address in scope
#------------------------------------------------------------------------------
class DnsRegisterX25(DnsRegister):
"""
Register type 'X25'
"""
#--------------------------------------------------------------------------
def __init__(self, address, **kwargs):
"""
:param address: the PSDN address
:type address: str
"""
if not isinstance(address, basestring):
raise TypeError("Expected str, got %s" % type(address))
self.__address = address
# Set type of register and the other options
super(DnsRegisterX25, self).__init__(type="X25", **kwargs)
#--------------------------------------------------------------------------
@identity
def address(self):
"""
:return: the PSDN address
:rtype: str
"""
return self.__address
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [IP(self.address)]
return []
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.address in scope
|
912/M-new
|
refs/heads/master
|
virtualenvironment/tourism_industry/lib/python2.7/site-packages/pip/_vendor/requests/certs.py
|
961
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
certs.py
~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
def where():
"""Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
|
smarinac/root
|
refs/heads/master
|
math/vc/makeTest.py
|
25
|
#!/usr/bin/env python
#
# scripts to replace a string in a set of a files
#
import sys, re, os
with open("out.txt", "wt") as out:
for line in open("arithmetics.cpp"):
out.write(line.replace('main', 'arithmetics'))
|
Pavlos1/SensoringJMSS
|
refs/heads/master
|
read_arduino_data.py
|
1
|
#!/usr/local/bin/python
# Written by Pavel
# License: BSD
#import mraa
import serial
import sqlite3
import time
import os
import re
import sys
import traceback
error_count = 0
#uart = mraa.Uart(0)
#os.system("/sbin/modprobe cdc-acm")
devices = [f for f in os.listdir("/dev") if re.match(r"^ttyUSB[0-9]$", f)] + [f for f in os.listdir("/dev") if re.match(r"^ttyACM[0-9]$", f)]
print "USB TTYs found:", devices
selected_device = "/dev/"+sorted(devices)[0]
ser = serial.Serial(selected_device, 9600)
while True:
try:
raw_data = str(ser.readline()).rstrip("\n").rstrip("\r").split("|")
print raw_data
if len(raw_data) != 4:
print "wrong length"
continue
# SQL data format is (time, light, colume, temperature, humidity)
con = sqlite3.connect("sensor_data.db")
with con:
cur = con.cursor()
cur.execute("insert into data values (%d,%d,%d,%f,%f)" %(int(time.time()), int(raw_data[0]), int(raw_data[1]), float(raw_data[2]), float(raw_data[3])))
con.close()
except:
error_count += 1
print "Something went wrong. Probably race condition. Continuing... %d" %error_count
if error_count >= 50:
print "Bailing..."
os.system("/sbin/reboot")
|
Pingmin/linux
|
refs/heads/master
|
tools/testing/selftests/tc-testing/tdc_config.py
|
132
|
"""
# SPDX-License-Identifier: GPL-2.0
tdc_config.py - tdc user-specified values
Copyright (C) 2017 Lucas Bates <lucasb@mojatatu.com>
"""
# Dictionary containing all values that can be substituted in executable
# commands.
NAMES = {
# Substitute your own tc path here
'TC': '/sbin/tc',
# Substitute your own ip path here
'IP': '/sbin/ip',
# Name of veth devices to be created for the namespace
'DEV0': 'v0p0',
'DEV1': 'v0p1',
'DEV2': '',
'DUMMY': 'dummy1',
'BATCH_FILE': './batch.txt',
'BATCH_DIR': 'tmp',
# Length of time in seconds to wait before terminating a command
'TIMEOUT': 12,
# Name of the namespace to use
'NS': 'tcut',
# Directory containing eBPF test programs
'EBPFDIR': './bpf'
}
ENVIR = { }
# put customizations in tdc_config_local.py
try:
from tdc_config_local import *
except ImportError as ie:
pass
try:
NAMES.update(EXTRA_NAMES)
except NameError as ne:
pass
|
FluffySurvivor/yogstation
|
refs/heads/dev
|
bot/requests/packages/chardet/utf8prober.py
|
2918
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
|
wistoch/meego-app-browser
|
refs/heads/master
|
tools/python/google/process_utils.py
|
186
|
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shared process-related utility functions."""
import errno
import os
import subprocess
import sys
class CommandNotFound(Exception): pass
TASKKILL = os.path.join(os.environ['WINDIR'], 'system32', 'taskkill.exe')
TASKKILL_PROCESS_NOT_FOUND_ERR = 128
# On windows 2000 there is no taskkill.exe, we need to have pskill somewhere
# in the path.
PSKILL = 'pskill.exe'
PSKILL_PROCESS_NOT_FOUND_ERR = -1
def KillAll(executables):
"""Tries to kill all copies of each process in the processes list. Returns
an error if any running processes couldn't be killed.
"""
result = 0
if os.path.exists(TASKKILL):
command = [TASKKILL, '/f', '/im']
process_not_found_err = TASKKILL_PROCESS_NOT_FOUND_ERR
else:
command = [PSKILL, '/t']
process_not_found_err = PSKILL_PROCESS_NOT_FOUND_ERR
for name in executables:
new_error = RunCommand(command + [name])
# Ignore "process not found" error.
if new_error != 0 and new_error != process_not_found_err:
result = new_error
return result
def RunCommandFull(command, verbose=True, collect_output=False,
print_output=True):
"""Runs the command list.
Prints the given command (which should be a list of one or more strings).
If specified, prints its stderr (and optionally stdout) to stdout,
line-buffered, converting line endings to CRLF (see note below). If
specified, collects the output as a list of lines and returns it. Waits
for the command to terminate and returns its status.
Args:
command: the full command to run, as a list of one or more strings
verbose: if True, combines all output (stdout and stderr) into stdout.
Otherwise, prints only the command's stderr to stdout.
collect_output: if True, collects the output of the command as a list of
lines and returns it
print_output: if True, prints the output of the command
Returns:
A tuple consisting of the process's exit status and output. If
collect_output is False, the output will be [].
Raises:
CommandNotFound if the command executable could not be found.
"""
print '\n' + subprocess.list2cmdline(command).replace('\\', '/') + '\n', ###
if verbose:
out = subprocess.PIPE
err = subprocess.STDOUT
else:
out = file(os.devnull, 'w')
err = subprocess.PIPE
try:
proc = subprocess.Popen(command, stdout=out, stderr=err, bufsize=1)
except OSError, e:
if e.errno == errno.ENOENT:
raise CommandNotFound('Unable to find "%s"' % command[0])
raise
output = []
if verbose:
read_from = proc.stdout
else:
read_from = proc.stderr
line = read_from.readline()
while line:
line = line.rstrip()
if collect_output:
output.append(line)
if print_output:
# Windows Python converts \n to \r\n automatically whenever it
# encounters it written to a text file (including stdout). The only
# way around it is to write to a binary file, which isn't feasible for
# stdout. So we end up with \r\n here even though we explicitly write
# \n. (We could write \r instead, which doesn't get converted to \r\n,
# but that's probably more troublesome for people trying to read the
# files.)
print line + '\n',
# Python on windows writes the buffer only when it reaches 4k. This is
# not fast enough for all purposes.
sys.stdout.flush()
line = read_from.readline()
# Make sure the process terminates.
proc.wait()
if not verbose:
out.close()
return (proc.returncode, output)
def RunCommand(command, verbose=True):
"""Runs the command list, printing its output and returning its exit status.
Prints the given command (which should be a list of one or more strings),
then runs it and prints its stderr (and optionally stdout) to stdout,
line-buffered, converting line endings to CRLF. Waits for the command to
terminate and returns its status.
Args:
command: the full command to run, as a list of one or more strings
verbose: if True, combines all output (stdout and stderr) into stdout.
Otherwise, prints only the command's stderr to stdout.
Returns:
The process's exit status.
Raises:
CommandNotFound if the command executable could not be found.
"""
return RunCommandFull(command, verbose)[0]
def RunCommandsInParallel(commands, verbose=True, collect_output=False,
print_output=True):
"""Runs a list of commands in parallel, waits for all commands to terminate
and returns their status. If specified, the ouput of commands can be
returned and/or printed.
Args:
commands: the list of commands to run, each as a list of one or more
strings.
verbose: if True, combines stdout and stderr into stdout.
Otherwise, prints only the command's stderr to stdout.
collect_output: if True, collects the output of the each command as a list
of lines and returns it.
print_output: if True, prints the output of each command.
Returns:
A list of tuples consisting of each command's exit status and output. If
collect_output is False, the output will be [].
Raises:
CommandNotFound if any of the command executables could not be found.
"""
command_num = len(commands)
outputs = [[] for i in xrange(command_num)]
procs = [None for i in xrange(command_num)]
eofs = [False for i in xrange(command_num)]
for command in commands:
print '\n' + subprocess.list2cmdline(command).replace('\\', '/') + '\n',
if verbose:
out = subprocess.PIPE
err = subprocess.STDOUT
else:
out = file(os.devnull, 'w')
err = subprocess.PIPE
for i in xrange(command_num):
try:
command = commands[i]
procs[i] = subprocess.Popen(command, stdout=out, stderr=err, bufsize=1)
except OSError, e:
if e.errno == errno.ENOENT:
raise CommandNotFound('Unable to find "%s"' % command[0])
raise
# We could consider terminating the processes already started.
# But Popen.kill() is only available in version 2.6.
# For now the clean up is done by KillAll.
while True:
eof_all = True
for i in xrange(command_num):
if eofs[i]:
continue
if verbose:
read_from = procs[i].stdout
else:
read_from = procs[i].stderr
line = read_from.readline()
if line:
eof_all = False
line = line.rstrip()
outputs[i].append(line)
if print_output:
# Windows Python converts \n to \r\n automatically whenever it
# encounters it written to a text file (including stdout). The only
# way around it is to write to a binary file, which isn't feasible
# for stdout. So we end up with \r\n here even though we explicitly
# write \n. (We could write \r instead, which doesn't get converted
# to \r\n, but that's probably more troublesome for people trying to
# read the files.)
print line + '\n',
else:
eofs[i] = True
if eof_all:
break
# Make sure the process terminates.
for i in xrange(command_num):
procs[i].wait()
if not verbose:
out.close()
return [(procs[i].returncode, outputs[i]) for i in xrange(command_num)]
|
chubbymaggie/pwndbg
|
refs/heads/master
|
pwndbg/compat.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compatibility functionality, for determining whether we are
running under Python2 or Python3, and resolving any
inconsistencies which arise from this.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
# Quickly determine which version is running
python2 = sys.version_info.major == 2
python3 = sys.version_info.major == 3
if python3:
basestring = str
else:
basestring = basestring
|
vovojh/gem5
|
refs/heads/master
|
tests/configs/realview-switcheroo-atomic.py
|
64
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
import switcheroo
root = LinuxArmFSSwitcheroo(
mem_class=SimpleMemory,
cpu_classes=(AtomicSimpleCPU, AtomicSimpleCPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/cloud/dataproc/v1/dataproc-v1-py/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dataproc_v1.types import workflow_templates
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import WorkflowTemplateServiceTransport, DEFAULT_CLIENT_INFO
class WorkflowTemplateServiceGrpcTransport(WorkflowTemplateServiceTransport):
"""gRPC backend transport for WorkflowTemplateService.
The API interface for managing Workflow Templates in the
Dataproc API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'dataproc.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'dataproc.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_workflow_template(self) -> Callable[
[workflow_templates.CreateWorkflowTemplateRequest],
workflow_templates.WorkflowTemplate]:
r"""Return a callable for the create workflow template method over gRPC.
Creates new workflow template.
Returns:
Callable[[~.CreateWorkflowTemplateRequest],
~.WorkflowTemplate]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_workflow_template' not in self._stubs:
self._stubs['create_workflow_template'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1.WorkflowTemplateService/CreateWorkflowTemplate',
request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize,
response_deserializer=workflow_templates.WorkflowTemplate.deserialize,
)
return self._stubs['create_workflow_template']
@property
def get_workflow_template(self) -> Callable[
[workflow_templates.GetWorkflowTemplateRequest],
workflow_templates.WorkflowTemplate]:
r"""Return a callable for the get workflow template method over gRPC.
Retrieves the latest workflow template.
Can retrieve previously instantiated template by
specifying optional version parameter.
Returns:
Callable[[~.GetWorkflowTemplateRequest],
~.WorkflowTemplate]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_workflow_template' not in self._stubs:
self._stubs['get_workflow_template'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1.WorkflowTemplateService/GetWorkflowTemplate',
request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize,
response_deserializer=workflow_templates.WorkflowTemplate.deserialize,
)
return self._stubs['get_workflow_template']
@property
def instantiate_workflow_template(self) -> Callable[
[workflow_templates.InstantiateWorkflowTemplateRequest],
operations_pb2.Operation]:
r"""Return a callable for the instantiate workflow template method over gRPC.
Instantiates a template and begins execution.
The returned Operation can be used to track execution of
workflow by polling
[operations.get][google.longrunning.Operations.GetOperation].
The Operation will complete when entire workflow is finished.
The running workflow can be aborted via
[operations.cancel][google.longrunning.Operations.CancelOperation].
This will cause any inflight jobs to be cancelled and
workflow-owned clusters to be deleted.
The [Operation.metadata][google.longrunning.Operation.metadata]
will be
`WorkflowMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata>`__.
Also see `Using
WorkflowMetadata <https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata>`__.
On successful completion,
[Operation.response][google.longrunning.Operation.response] will
be [Empty][google.protobuf.Empty].
Returns:
Callable[[~.InstantiateWorkflowTemplateRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'instantiate_workflow_template' not in self._stubs:
self._stubs['instantiate_workflow_template'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateWorkflowTemplate',
request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['instantiate_workflow_template']
@property
def instantiate_inline_workflow_template(self) -> Callable[
[workflow_templates.InstantiateInlineWorkflowTemplateRequest],
operations_pb2.Operation]:
r"""Return a callable for the instantiate inline workflow
template method over gRPC.
Instantiates a template and begins execution.
This method is equivalent to executing the sequence
[CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
[InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
[DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
The returned Operation can be used to track execution of
workflow by polling
[operations.get][google.longrunning.Operations.GetOperation].
The Operation will complete when entire workflow is finished.
The running workflow can be aborted via
[operations.cancel][google.longrunning.Operations.CancelOperation].
This will cause any inflight jobs to be cancelled and
workflow-owned clusters to be deleted.
The [Operation.metadata][google.longrunning.Operation.metadata]
will be
`WorkflowMetadata <https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata>`__.
Also see `Using
WorkflowMetadata <https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata>`__.
On successful completion,
[Operation.response][google.longrunning.Operation.response] will
be [Empty][google.protobuf.Empty].
Returns:
Callable[[~.InstantiateInlineWorkflowTemplateRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'instantiate_inline_workflow_template' not in self._stubs:
self._stubs['instantiate_inline_workflow_template'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateInlineWorkflowTemplate',
request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['instantiate_inline_workflow_template']
@property
def update_workflow_template(self) -> Callable[
[workflow_templates.UpdateWorkflowTemplateRequest],
workflow_templates.WorkflowTemplate]:
r"""Return a callable for the update workflow template method over gRPC.
Updates (replaces) workflow template. The updated
template must contain version that matches the current
server version.
Returns:
Callable[[~.UpdateWorkflowTemplateRequest],
~.WorkflowTemplate]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_workflow_template' not in self._stubs:
self._stubs['update_workflow_template'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1.WorkflowTemplateService/UpdateWorkflowTemplate',
request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize,
response_deserializer=workflow_templates.WorkflowTemplate.deserialize,
)
return self._stubs['update_workflow_template']
@property
def list_workflow_templates(self) -> Callable[
[workflow_templates.ListWorkflowTemplatesRequest],
workflow_templates.ListWorkflowTemplatesResponse]:
r"""Return a callable for the list workflow templates method over gRPC.
Lists workflows that match the specified filter in
the request.
Returns:
Callable[[~.ListWorkflowTemplatesRequest],
~.ListWorkflowTemplatesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_workflow_templates' not in self._stubs:
self._stubs['list_workflow_templates'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1.WorkflowTemplateService/ListWorkflowTemplates',
request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize,
response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize,
)
return self._stubs['list_workflow_templates']
@property
def delete_workflow_template(self) -> Callable[
[workflow_templates.DeleteWorkflowTemplateRequest],
empty_pb2.Empty]:
r"""Return a callable for the delete workflow template method over gRPC.
Deletes a workflow template. It does not cancel in-
rogress workflows.
Returns:
Callable[[~.DeleteWorkflowTemplateRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_workflow_template' not in self._stubs:
self._stubs['delete_workflow_template'] = self.grpc_channel.unary_unary(
'/google.cloud.dataproc.v1.WorkflowTemplateService/DeleteWorkflowTemplate',
request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_workflow_template']
__all__ = (
'WorkflowTemplateServiceGrpcTransport',
)
|
maemori/accon
|
refs/heads/master
|
docker/taiga/conf/local.py
|
1
|
from .common import *
from .celery import *
MEDIA_URL = "http://127.0.0.1/media/"
STATIC_URL = "http://127.0.0.1/static/"
ADMIN_MEDIA_PREFIX = "http://127.0.0.1/static/admin/"
SITES["front"]["scheme"] = "http"
SITES["front"]["domain"] = "127.0.0.1"
SECRET_KEY = "mysecret"
DEBUG = False
TEMPLATE_DEBUG = False
PUBLIC_REGISTER_ENABLED = True
DEFAULT_FROM_EMAIL = "no-reply@example.com"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_USE_TLS = False
EMAIL_HOST = "127.0.0.1"
EMAIL_PORT = 25
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_ENABLED = True
EVENTS_PUSH_BACKEND = "taiga.events.backends.rabbitmq.EventsPushBackend"
EVENTS_PUSH_BACKEND_OPTIONS = {"url": "amqp://taiga:PASSWORD@localhost:5672/taiga"}
|
iedparis8/django-helpdesk
|
refs/heads/master
|
views/feeds.py
|
6
|
"""
django-helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
views/feeds.py - A handful of staff-only RSS feeds to provide ticket details
to feed readers or similar software.
"""
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.shortcuts import get_object_or_404
from helpdesk.models import Ticket, FollowUp, Queue
class OpenTicketsByUser(Feed):
title_template = 'helpdesk/rss/ticket_title.html'
description_template = 'helpdesk/rss/ticket_description.html'
def get_object(self, request, user_name, queue_slug=None):
user = get_object_or_404(User, username=user_name)
if queue_slug:
queue = get_object_or_404(Queue, slug=queue_slug)
else:
queue = None
return {'user': user, 'queue': queue}
def title(self, obj):
if obj['queue']:
return _("Helpdesk: Open Tickets in queue %(queue)s for %(username)s") % {
'queue': obj['queue'].title,
'username': obj['user'].get_username(),
}
else:
return _("Helpdesk: Open Tickets for %(username)s") % {
'username': obj['user'].get_username(),
}
def description(self, obj):
if obj['queue']:
return _("Open and Reopened Tickets in queue %(queue)s for %(username)s") % {
'queue': obj['queue'].title,
'username': obj['user'].get_username(),
}
else:
return _("Open and Reopened Tickets for %(username)s") % {
'username': obj['user'].get_username(),
}
def link(self, obj):
if obj['queue']:
return u'%s?assigned_to=%s&queue=%s' % (
reverse('helpdesk_list'),
obj['user'].id,
obj['queue'].id,
)
else:
return u'%s?assigned_to=%s' % (
reverse('helpdesk_list'),
obj['user'].id,
)
def items(self, obj):
if obj['queue']:
return Ticket.objects.filter(
assigned_to=obj['user']
).filter(
queue=obj['queue']
).filter(
Q(status=Ticket.OPEN_STATUS) | Q(status=Ticket.REOPENED_STATUS)
)
else:
return Ticket.objects.filter(
assigned_to=obj['user']
).filter(
Q(status=Ticket.OPEN_STATUS) | Q(status=Ticket.REOPENED_STATUS)
)
def item_pubdate(self, item):
return item.created
def item_author_name(self, item):
if item.assigned_to:
return item.assigned_to.get_username()
else:
return _('Unassigned')
class UnassignedTickets(Feed):
title_template = 'helpdesk/rss/ticket_title.html'
description_template = 'helpdesk/rss/ticket_description.html'
title = _('Helpdesk: Unassigned Tickets')
description = _('Unassigned Open and Reopened tickets')
link = ''#%s?assigned_to=' % reverse('helpdesk_list')
def items(self, obj):
return Ticket.objects.filter(
assigned_to__isnull=True
).filter(
Q(status=Ticket.OPEN_STATUS) | Q(status=Ticket.REOPENED_STATUS)
)
def item_pubdate(self, item):
return item.created
def item_author_name(self, item):
if item.assigned_to:
return item.assigned_to.get_username()
else:
return _('Unassigned')
class RecentFollowUps(Feed):
title_template = 'helpdesk/rss/recent_activity_title.html'
description_template = 'helpdesk/rss/recent_activity_description.html'
title = _('Helpdesk: Recent Followups')
description = _('Recent FollowUps, such as e-mail replies, comments, attachments and resolutions')
link = '/tickets/' # reverse('helpdesk_list')
def items(self):
return FollowUp.objects.order_by('-date')[:20]
class OpenTicketsByQueue(Feed):
title_template = 'helpdesk/rss/ticket_title.html'
description_template = 'helpdesk/rss/ticket_description.html'
def get_object(self, request, queue_slug):
return get_object_or_404(Queue, slug=queue_slug)
def title(self, obj):
return _('Helpdesk: Open Tickets in queue %(queue)s') % {
'queue': obj.title,
}
def description(self, obj):
return _('Open and Reopened Tickets in queue %(queue)s') % {
'queue': obj.title,
}
def link(self, obj):
return '%s?queue=%s' % (
reverse('helpdesk_list'),
obj.id,
)
def items(self, obj):
return Ticket.objects.filter(
queue=obj
).filter(
Q(status=Ticket.OPEN_STATUS) | Q(status=Ticket.REOPENED_STATUS)
)
def item_pubdate(self, item):
return item.created
def item_author_name(self, item):
if item.assigned_to:
return item.assigned_to.get_username()
else:
return _('Unassigned')
|
benghaem/py-senmolib
|
refs/heads/master
|
examples/bpm/example_ecg_worker.py
|
2
|
#! /home/ben/programming/senmo/env/bin/python
from senmolib.components.worker import Worker
import numpy as np
class exampleEcgWorker(Worker):
def process(self, data_arr):
x_vals =[item[0] for item in data_arr]
y_vals =[item[1] for item in data_arr]
print(self.identity,"have x,y",x_vals[0],y_vals[0])
#threshold
th = 1
data_der = np.ediff1d(y_vals)
x_locs = []
for i in range(1, len(y_vals)-1):
if data_der[i-1] > 0 and data_der[i] <= 0 and y_vals[i] >= th:
x_locs.append(x_vals[i])
print(self.identity,"computed",x_locs)
return x_locs
exampleEcgWorker().start()
|
CentOS-PaaS-SIG/linch-pin
|
refs/heads/develop
|
linchpin/provision/roles/azure/action_plugins/azure_rm_availabilityset.py
|
6
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
import linchpin.MockUtils.MockUtils as mock_utils
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
"""
Simple action plugin that returns the mocked output
when linchpin_mock is True
"""
super(ActionModule, self).run(tmp, task_vars)
module_args = self._task.args.copy()
linchpin_mock = task_vars['vars'].get('linchpin_mock',
False)
up = task_vars['vars'].get('state', 'present') == 'present'
if up and linchpin_mock:
return mock_utils.get_mock_data(module_args,
"azure_availabilityset.present")
elif not up and linchpin_mock:
return mock_utils.get_mock_data(module_args,
"azure_availabilityset.absent")
module_return = self._execute_module(module_args=module_args,
task_vars=task_vars, tmp=tmp)
return module_return
|
ustudio/ustudio-hmac-tornado
|
refs/heads/master
|
tests/client/test_authenticated_request.py
|
1
|
from tests.example_server import BaseHMACTestCase
from tornado.testing import gen_test
from hmacauth.client import authenticated_request
class TestAuthenticatedRequest(BaseHMACTestCase):
@gen_test
def test_signs_post_with_bytestring_body(self):
response = yield self.http_client.fetch(authenticated_request(
self.get_url("/authorized/argument"),
method="POST",
body=b"Some Body",
hmac_key="correct-key",
hmac_secret="secret"))
self.assertEqual(200, response.code)
@gen_test
def test_signs_post_with_unicode_body(self):
response = yield self.http_client.fetch(authenticated_request(
self.get_url("/authorized/argument"),
method="POST",
body="Some Body",
hmac_key="correct-key",
hmac_secret="secret"))
self.assertEqual(200, response.code)
@gen_test
def test_signs_explicit_get(self):
response = yield self.http_client.fetch(authenticated_request(
self.get_url("/authorized/argument"),
method="GET",
hmac_key="correct-key",
hmac_secret="secret"))
self.assertEqual(200, response.code)
@gen_test
def test_signs_implicit_get(self):
response = yield self.http_client.fetch(authenticated_request(
self.get_url("/authorized/argument"),
hmac_key="correct-key",
hmac_secret="secret"))
self.assertEqual(200, response.code)
@gen_test
def test_handles_path_only_url(self):
request = authenticated_request(
"/authorized/argument",
hmac_key="correct-key",
hmac_secret="secret")
request.url = self.get_url(request.url)
response = yield self.http_client.fetch(request)
self.assertEqual(200, response.code)
@gen_test
def test_includes_existing_headers_in_request(self):
response = yield self.http_client.fetch(authenticated_request(
self.get_url("/authorized/argument"),
headers={
"X-Ping": "Pong"
},
hmac_key="correct-key",
hmac_secret="secret"))
self.assertEqual(200, response.code)
self.assertEqual("Pong", response.body.decode("utf8"))
@gen_test
def test_signs_url_as_keyword_argument(self):
response = yield self.http_client.fetch(authenticated_request(
url=self.get_url("/authorized/argument"),
hmac_key="correct-key",
hmac_secret="secret"))
self.assertEqual(200, response.code)
@gen_test
def test_raises_exception_without_url_argument(self):
with self.assertRaises(TypeError):
yield self.http_client.fetch(authenticated_request(
hmac_key="correct-key",
hmac_secret="secret"))
@gen_test
def test_normalizes_and_signs_query_arguments(self):
response = yield self.http_client.fetch(authenticated_request(
url=self.get_url("/authorized/argument?bar=value%202&Foo=value%3f1&blank"),
hmac_key="correct-key",
hmac_secret="secret"))
self.assertEqual(200, response.code)
|
micadeyeye/Blongo
|
refs/heads/master
|
django/http/multipartparser.py
|
22
|
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_unicode
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. If
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type)
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
#
# Content-Length should contain the length of the body we are about
# to receive.
#
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH',0)))
except (ValueError, TypeError):
# For now set it to 0; we'll try again later on down.
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(MultiValueDict(), encoding=self._encoding), MultiValueDict()
limited_input_data = LimitBytes(self._input_data, self._content_length)
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(limited_input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(limited_input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
field_name = force_unicode(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_unicode(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_unicode(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
content_type_extra = meta_data.get('content-type', (0,{}))[1]
if content_type_extra is None:
content_type_extra = {}
try:
charset = content_type_extra.get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra.copy())
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
try:
chunk = str(chunk).decode('base64')
except Exception, e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile, e:
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload, e:
if not e.connection_reset:
exhaust(limited_input_data)
else:
# Make sure that the request data is all fed
exhaust(limited_input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_unicode(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(object):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = ''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield ''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = self.next()
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = ''.join(parts())
return out
def next(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = ''
else:
output = self._producer.next()
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = ''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(object):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def next(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class LimitBytes(object):
""" Limit bytes for a file object. """
def __init__(self, fileobject, length):
self._file = fileobject
self.remaining = length
def read(self, num_bytes=None):
"""
Read data from the underlying file.
If you ask for too much or there isn't anything left,
this will raise an InputStreamExhausted error.
"""
if self.remaining <= 0:
raise InputStreamExhausted()
if num_bytes is None:
num_bytes = self.remaining
else:
num_bytes = min(num_bytes, self.remaining)
self.remaining -= num_bytes
return self._file.read(num_bytes)
class InterBoundaryIter(object):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def next(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(object):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to .next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def next(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = ''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
if data[max(0,end-1)] == '\n':
end -= 1
if data[max(0,end-1)] == '\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find('\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split('\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = '--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value. """
plist = _parse_header_params(';' + line)
key = plist.pop(0).lower()
pdict = {}
for p in plist:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
|
ychen820/microblog
|
refs/heads/master
|
y/google-cloud-sdk/.install/.backup/lib/requests/packages/urllib3/packages/six.py
|
2374
|
"""Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
ibinti/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/hgext/convert/cvsps.py
|
91
|
# Mercurial built-in replacement for cvsps.
#
# Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import re
import cPickle as pickle
from mercurial import util
from mercurial.i18n import _
from mercurial import hook
from mercurial import util
class logentry(object):
'''Class logentry has the following attributes:
.author - author name as CVS knows it
.branch - name of branch this revision is on
.branches - revision tuple of branches starting at this revision
.comment - commit message
.commitid - CVS commitid or None
.date - the commit date as a (time, tz) tuple
.dead - true if file revision is dead
.file - Name of file
.lines - a tuple (+lines, -lines) or None
.parent - Previous revision of this entry
.rcs - name of file as returned from CVS
.revision - revision number as tuple
.tags - list of tags on the file
.synthetic - is this a synthetic "file ... added on ..." revision?
.mergepoint - the branch that has been merged from (if present in
rlog output) or None
.branchpoints - the branches that start at the current entry or empty
'''
def __init__(self, **entries):
self.synthetic = False
self.__dict__.update(entries)
def __repr__(self):
items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
return "%s(%s)"%(type(self).__name__, ", ".join(items))
class logerror(Exception):
pass
def getrepopath(cvspath):
"""Return the repository path from a CVS path.
>>> getrepopath('/foo/bar')
'/foo/bar'
>>> getrepopath('c:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:10/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:10c:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:c:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
'/foo/bar'
>>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
'/foo/bar'
>>> getrepopath('user@server/path/to/repository')
'/path/to/repository'
"""
# According to CVS manual, CVS paths are expressed like:
# [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
#
# CVSpath is splitted into parts and then position of the first occurrence
# of the '/' char after the '@' is located. The solution is the rest of the
# string after that '/' sign including it
parts = cvspath.split(':')
atposition = parts[-1].find('@')
start = 0
if atposition != -1:
start = atposition
repopath = parts[-1][parts[-1].find('/', start):]
return repopath
def createlog(ui, directory=None, root="", rlog=True, cache=None):
'''Collect the CVS rlog'''
# Because we store many duplicate commit log messages, reusing strings
# saves a lot of memory and pickle storage space.
_scache = {}
def scache(s):
"return a shared version of a string"
return _scache.setdefault(s, s)
ui.status(_('collecting CVS rlog\n'))
log = [] # list of logentry objects containing the CVS state
# patterns to match in CVS (r)log output, by state of use
re_00 = re.compile('RCS file: (.+)$')
re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
re_02 = re.compile('cvs (r?log|server): (.+)\n$')
re_03 = re.compile("(Cannot access.+CVSROOT)|"
"(can't create temporary directory.+)$")
re_10 = re.compile('Working file: (.+)$')
re_20 = re.compile('symbolic names:')
re_30 = re.compile('\t(.+): ([\\d.]+)$')
re_31 = re.compile('----------------------------$')
re_32 = re.compile('======================================='
'======================================$')
re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
r'(\s+commitid:\s+([^;]+);)?'
r'(.*mergepoint:\s+([^;]+);)?')
re_70 = re.compile('branches: (.+);$')
file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
prefix = '' # leading path to strip of what we get from CVS
if directory is None:
# Current working directory
# Get the real directory in the repository
try:
prefix = open(os.path.join('CVS','Repository')).read().strip()
directory = prefix
if prefix == ".":
prefix = ""
except IOError:
raise logerror(_('not a CVS sandbox'))
if prefix and not prefix.endswith(os.sep):
prefix += os.sep
# Use the Root file in the sandbox, if it exists
try:
root = open(os.path.join('CVS','Root')).read().strip()
except IOError:
pass
if not root:
root = os.environ.get('CVSROOT', '')
# read log cache if one exists
oldlog = []
date = None
if cache:
cachedir = os.path.expanduser('~/.hg.cvsps')
if not os.path.exists(cachedir):
os.mkdir(cachedir)
# The cvsps cache pickle needs a uniquified name, based on the
# repository location. The address may have all sort of nasties
# in it, slashes, colons and such. So here we take just the
# alphanumeric characters, concatenated in a way that does not
# mix up the various components, so that
# :pserver:user@server:/path
# and
# /pserver/user/server/path
# are mapped to different cache file names.
cachefile = root.split(":") + [directory, "cache"]
cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
cachefile = os.path.join(cachedir,
'.'.join([s for s in cachefile if s]))
if cache == 'update':
try:
ui.note(_('reading cvs log cache %s\n') % cachefile)
oldlog = pickle.load(open(cachefile))
for e in oldlog:
if not (util.safehasattr(e, 'branchpoints') and
util.safehasattr(e, 'commitid') and
util.safehasattr(e, 'mergepoint')):
ui.status(_('ignoring old cache\n'))
oldlog = []
break
ui.note(_('cache has %d log entries\n') % len(oldlog))
except Exception, e:
ui.note(_('error reading cache: %r\n') % e)
if oldlog:
date = oldlog[-1].date # last commit date as a (time,tz) tuple
date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
# build the CVS commandline
cmd = ['cvs', '-q']
if root:
cmd.append('-d%s' % root)
p = util.normpath(getrepopath(root))
if not p.endswith('/'):
p += '/'
if prefix:
# looks like normpath replaces "" by "."
prefix = p + util.normpath(prefix)
else:
prefix = p
cmd.append(['log', 'rlog'][rlog])
if date:
# no space between option and date string
cmd.append('-d>%s' % date)
cmd.append(directory)
# state machine begins here
tags = {} # dictionary of revisions on current file with their tags
branchmap = {} # mapping between branch names and revision numbers
state = 0
store = False # set when a new record can be appended
cmd = [util.shellquote(arg) for arg in cmd]
ui.note(_("running %s\n") % (' '.join(cmd)))
ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
pfp = util.popen(' '.join(cmd))
peek = pfp.readline()
while True:
line = peek
if line == '':
break
peek = pfp.readline()
if line.endswith('\n'):
line = line[:-1]
#ui.debug('state=%d line=%r\n' % (state, line))
if state == 0:
# initial state, consume input until we see 'RCS file'
match = re_00.match(line)
if match:
rcs = match.group(1)
tags = {}
if rlog:
filename = util.normpath(rcs[:-2])
if filename.startswith(prefix):
filename = filename[len(prefix):]
if filename.startswith('/'):
filename = filename[1:]
if filename.startswith('Attic/'):
filename = filename[6:]
else:
filename = filename.replace('/Attic/', '/')
state = 2
continue
state = 1
continue
match = re_01.match(line)
if match:
raise logerror(match.group(1))
match = re_02.match(line)
if match:
raise logerror(match.group(2))
if re_03.match(line):
raise logerror(line)
elif state == 1:
# expect 'Working file' (only when using log instead of rlog)
match = re_10.match(line)
assert match, _('RCS file must be followed by working file')
filename = util.normpath(match.group(1))
state = 2
elif state == 2:
# expect 'symbolic names'
if re_20.match(line):
branchmap = {}
state = 3
elif state == 3:
# read the symbolic names and store as tags
match = re_30.match(line)
if match:
rev = [int(x) for x in match.group(2).split('.')]
# Convert magic branch number to an odd-numbered one
revn = len(rev)
if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
rev = rev[:-2] + rev[-1:]
rev = tuple(rev)
if rev not in tags:
tags[rev] = []
tags[rev].append(match.group(1))
branchmap[match.group(1)] = match.group(2)
elif re_31.match(line):
state = 5
elif re_32.match(line):
state = 0
elif state == 4:
# expecting '------' separator before first revision
if re_31.match(line):
state = 5
else:
assert not re_32.match(line), _('must have at least '
'some revisions')
elif state == 5:
# expecting revision number and possibly (ignored) lock indication
# we create the logentry here from values stored in states 0 to 4,
# as this state is re-entered for subsequent revisions of a file.
match = re_50.match(line)
assert match, _('expected revision number')
e = logentry(rcs=scache(rcs),
file=scache(filename),
revision=tuple([int(x) for x in
match.group(1).split('.')]),
branches=[],
parent=None,
commitid=None,
mergepoint=None,
branchpoints=set())
state = 6
elif state == 6:
# expecting date, author, state, lines changed
match = re_60.match(line)
assert match, _('revision must be followed by date line')
d = match.group(1)
if d[2] == '/':
# Y2K
d = '19' + d
if len(d.split()) != 3:
# cvs log dates always in GMT
d = d + ' UTC'
e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S'])
e.author = scache(match.group(2))
e.dead = match.group(3).lower() == 'dead'
if match.group(5):
if match.group(6):
e.lines = (int(match.group(5)), int(match.group(6)))
else:
e.lines = (int(match.group(5)), 0)
elif match.group(6):
e.lines = (0, int(match.group(6)))
else:
e.lines = None
if match.group(7): # cvs 1.12 commitid
e.commitid = match.group(8)
if match.group(9): # cvsnt mergepoint
myrev = match.group(10).split('.')
if len(myrev) == 2: # head
e.mergepoint = 'HEAD'
else:
myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
branches = [b for b in branchmap if branchmap[b] == myrev]
assert len(branches) == 1, ('unknown branch: %s'
% e.mergepoint)
e.mergepoint = branches[0]
e.comment = []
state = 7
elif state == 7:
# read the revision numbers of branches that start at this revision
# or store the commit log message otherwise
m = re_70.match(line)
if m:
e.branches = [tuple([int(y) for y in x.strip().split('.')])
for x in m.group(1).split(';')]
state = 8
elif re_31.match(line) and re_50.match(peek):
state = 5
store = True
elif re_32.match(line):
state = 0
store = True
else:
e.comment.append(line)
elif state == 8:
# store commit log message
if re_31.match(line):
cpeek = peek
if cpeek.endswith('\n'):
cpeek = cpeek[:-1]
if re_50.match(cpeek):
state = 5
store = True
else:
e.comment.append(line)
elif re_32.match(line):
state = 0
store = True
else:
e.comment.append(line)
# When a file is added on a branch B1, CVS creates a synthetic
# dead trunk revision 1.1 so that the branch has a root.
# Likewise, if you merge such a file to a later branch B2 (one
# that already existed when the file was added on B1), CVS
# creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
# these revisions now, but mark them synthetic so
# createchangeset() can take care of them.
if (store and
e.dead and
e.revision[-1] == 1 and # 1.1 or 1.1.x.1
len(e.comment) == 1 and
file_added_re.match(e.comment[0])):
ui.debug('found synthetic revision in %s: %r\n'
% (e.rcs, e.comment[0]))
e.synthetic = True
if store:
# clean up the results and save in the log.
store = False
e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
e.comment = scache('\n'.join(e.comment))
revn = len(e.revision)
if revn > 3 and (revn % 2) == 0:
e.branch = tags.get(e.revision[:-1], [None])[0]
else:
e.branch = None
# find the branches starting from this revision
branchpoints = set()
for branch, revision in branchmap.iteritems():
revparts = tuple([int(i) for i in revision.split('.')])
if len(revparts) < 2: # bad tags
continue
if revparts[-2] == 0 and revparts[-1] % 2 == 0:
# normal branch
if revparts[:-2] == e.revision:
branchpoints.add(branch)
elif revparts == (1, 1, 1): # vendor branch
if revparts in e.branches:
branchpoints.add(branch)
e.branchpoints = branchpoints
log.append(e)
if len(log) % 100 == 0:
ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
log.sort(key=lambda x: (x.rcs, x.revision))
# find parent revisions of individual files
versions = {}
for e in log:
branch = e.revision[:-1]
p = versions.get((e.rcs, branch), None)
if p is None:
p = e.revision[:-2]
e.parent = p
versions[(e.rcs, branch)] = e.revision
# update the log cache
if cache:
if log:
# join up the old and new logs
log.sort(key=lambda x: x.date)
if oldlog and oldlog[-1].date >= log[0].date:
raise logerror(_('log cache overlaps with new log entries,'
' re-run without cache.'))
log = oldlog + log
# write the new cachefile
ui.note(_('writing cvs log cache %s\n') % cachefile)
pickle.dump(log, open(cachefile, 'w'))
else:
log = oldlog
ui.status(_('%d log entries\n') % len(log))
hook.hook(ui, None, "cvslog", True, log=log)
return log
class changeset(object):
'''Class changeset has the following attributes:
.id - integer identifying this changeset (list index)
.author - author name as CVS knows it
.branch - name of branch this changeset is on, or None
.comment - commit message
.commitid - CVS commitid or None
.date - the commit date as a (time,tz) tuple
.entries - list of logentry objects in this changeset
.parents - list of one or two parent changesets
.tags - list of tags on this changeset
.synthetic - from synthetic revision "file ... added on branch ..."
.mergepoint- the branch that has been merged from or None
.branchpoints- the branches that start at the current entry or empty
'''
def __init__(self, **entries):
self.synthetic = False
self.__dict__.update(entries)
def __repr__(self):
items = ("%s=%r"%(k, self.__dict__[k]) for k in sorted(self.__dict__))
return "%s(%s)"%(type(self).__name__, ", ".join(items))
def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
'''Convert log into changesets.'''
ui.status(_('creating changesets\n'))
# try to order commitids by date
mindate = {}
for e in log:
if e.commitid:
mindate[e.commitid] = min(e.date, mindate.get(e.commitid))
# Merge changesets
log.sort(key=lambda x: (mindate.get(x.commitid), x.commitid, x.comment,
x.author, x.branch, x.date, x.branchpoints))
changesets = []
files = set()
c = None
for i, e in enumerate(log):
# Check if log entry belongs to the current changeset or not.
# Since CVS is file-centric, two different file revisions with
# different branchpoints should be treated as belonging to two
# different changesets (and the ordering is important and not
# honoured by cvsps at this point).
#
# Consider the following case:
# foo 1.1 branchpoints: [MYBRANCH]
# bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
#
# Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
# later version of foo may be in MYBRANCH2, so foo should be the
# first changeset and bar the next and MYBRANCH and MYBRANCH2
# should both start off of the bar changeset. No provisions are
# made to ensure that this is, in fact, what happens.
if not (c and e.branchpoints == c.branchpoints and
(# cvs commitids
(e.commitid is not None and e.commitid == c.commitid) or
(# no commitids, use fuzzy commit detection
(e.commitid is None or c.commitid is None) and
e.comment == c.comment and
e.author == c.author and
e.branch == c.branch and
((c.date[0] + c.date[1]) <=
(e.date[0] + e.date[1]) <=
(c.date[0] + c.date[1]) + fuzz) and
e.file not in files))):
c = changeset(comment=e.comment, author=e.author,
branch=e.branch, date=e.date,
entries=[], mergepoint=e.mergepoint,
branchpoints=e.branchpoints, commitid=e.commitid)
changesets.append(c)
files = set()
if len(changesets) % 100 == 0:
t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
ui.status(util.ellipsis(t, 80) + '\n')
c.entries.append(e)
files.add(e.file)
c.date = e.date # changeset date is date of latest commit in it
# Mark synthetic changesets
for c in changesets:
# Synthetic revisions always get their own changeset, because
# the log message includes the filename. E.g. if you add file3
# and file4 on a branch, you get four log entries and three
# changesets:
# "File file3 was added on branch ..." (synthetic, 1 entry)
# "File file4 was added on branch ..." (synthetic, 1 entry)
# "Add file3 and file4 to fix ..." (real, 2 entries)
# Hence the check for 1 entry here.
c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
# Sort files in each changeset
def entitycompare(l, r):
'Mimic cvsps sorting order'
l = l.file.split('/')
r = r.file.split('/')
nl = len(l)
nr = len(r)
n = min(nl, nr)
for i in range(n):
if i + 1 == nl and nl < nr:
return -1
elif i + 1 == nr and nl > nr:
return +1
elif l[i] < r[i]:
return -1
elif l[i] > r[i]:
return +1
return 0
for c in changesets:
c.entries.sort(entitycompare)
# Sort changesets by date
def cscmp(l, r):
d = sum(l.date) - sum(r.date)
if d:
return d
# detect vendor branches and initial commits on a branch
le = {}
for e in l.entries:
le[e.rcs] = e.revision
re = {}
for e in r.entries:
re[e.rcs] = e.revision
d = 0
for e in l.entries:
if re.get(e.rcs, None) == e.parent:
assert not d
d = 1
break
for e in r.entries:
if le.get(e.rcs, None) == e.parent:
assert not d
d = -1
break
return d
changesets.sort(cscmp)
# Collect tags
globaltags = {}
for c in changesets:
for e in c.entries:
for tag in e.tags:
# remember which is the latest changeset to have this tag
globaltags[tag] = c
for c in changesets:
tags = set()
for e in c.entries:
tags.update(e.tags)
# remember tags only if this is the latest changeset to have it
c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
# Find parent changesets, handle {{mergetobranch BRANCHNAME}}
# by inserting dummy changesets with two parents, and handle
# {{mergefrombranch BRANCHNAME}} by setting two parents.
if mergeto is None:
mergeto = r'{{mergetobranch ([-\w]+)}}'
if mergeto:
mergeto = re.compile(mergeto)
if mergefrom is None:
mergefrom = r'{{mergefrombranch ([-\w]+)}}'
if mergefrom:
mergefrom = re.compile(mergefrom)
versions = {} # changeset index where we saw any particular file version
branches = {} # changeset index where we saw a branch
n = len(changesets)
i = 0
while i < n:
c = changesets[i]
for f in c.entries:
versions[(f.rcs, f.revision)] = i
p = None
if c.branch in branches:
p = branches[c.branch]
else:
# first changeset on a new branch
# the parent is a changeset with the branch in its
# branchpoints such that it is the latest possible
# commit without any intervening, unrelated commits.
for candidate in xrange(i):
if c.branch not in changesets[candidate].branchpoints:
if p is not None:
break
continue
p = candidate
c.parents = []
if p is not None:
p = changesets[p]
# Ensure no changeset has a synthetic changeset as a parent.
while p.synthetic:
assert len(p.parents) <= 1, \
_('synthetic changeset cannot have multiple parents')
if p.parents:
p = p.parents[0]
else:
p = None
break
if p is not None:
c.parents.append(p)
if c.mergepoint:
if c.mergepoint == 'HEAD':
c.mergepoint = None
c.parents.append(changesets[branches[c.mergepoint]])
if mergefrom:
m = mergefrom.search(c.comment)
if m:
m = m.group(1)
if m == 'HEAD':
m = None
try:
candidate = changesets[branches[m]]
except KeyError:
ui.warn(_("warning: CVS commit message references "
"non-existent branch %r:\n%s\n")
% (m, c.comment))
if m in branches and c.branch != m and not candidate.synthetic:
c.parents.append(candidate)
if mergeto:
m = mergeto.search(c.comment)
if m:
if m.groups():
m = m.group(1)
if m == 'HEAD':
m = None
else:
m = None # if no group found then merge to HEAD
if m in branches and c.branch != m:
# insert empty changeset for merge
cc = changeset(
author=c.author, branch=m, date=c.date,
comment='convert-repo: CVS merge from branch %s'
% c.branch,
entries=[], tags=[],
parents=[changesets[branches[m]], c])
changesets.insert(i + 1, cc)
branches[m] = i + 1
# adjust our loop counters now we have inserted a new entry
n += 1
i += 2
continue
branches[c.branch] = i
i += 1
# Drop synthetic changesets (safe now that we have ensured no other
# changesets can have them as parents).
i = 0
while i < len(changesets):
if changesets[i].synthetic:
del changesets[i]
else:
i += 1
# Number changesets
for i, c in enumerate(changesets):
c.id = i + 1
ui.status(_('%d changeset entries\n') % len(changesets))
hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
return changesets
def debugcvsps(ui, *args, **opts):
'''Read CVS rlog for current directory or named path in
repository, and convert the log to changesets based on matching
commit log entries and dates.
'''
if opts["new_cache"]:
cache = "write"
elif opts["update_cache"]:
cache = "update"
else:
cache = None
revisions = opts["revisions"]
try:
if args:
log = []
for d in args:
log += createlog(ui, d, root=opts["root"], cache=cache)
else:
log = createlog(ui, root=opts["root"], cache=cache)
except logerror, e:
ui.write("%r\n"%e)
return
changesets = createchangeset(ui, log, opts["fuzz"])
del log
# Print changesets (optionally filtered)
off = len(revisions)
branches = {} # latest version number in each branch
ancestors = {} # parent branch
for cs in changesets:
if opts["ancestors"]:
if cs.branch not in branches and cs.parents and cs.parents[0].id:
ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
cs.parents[0].id)
branches[cs.branch] = cs.id
# limit by branches
if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
continue
if not off:
# Note: trailing spaces on several lines here are needed to have
# bug-for-bug compatibility with cvsps.
ui.write('---------------------\n')
ui.write(('PatchSet %d \n' % cs.id))
ui.write(('Date: %s\n' % util.datestr(cs.date,
'%Y/%m/%d %H:%M:%S %1%2')))
ui.write(('Author: %s\n' % cs.author))
ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
ui.write(('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
','.join(cs.tags) or '(none)')))
if cs.branchpoints:
ui.write(('Branchpoints: %s \n') %
', '.join(sorted(cs.branchpoints)))
if opts["parents"] and cs.parents:
if len(cs.parents) > 1:
ui.write(('Parents: %s\n' %
(','.join([str(p.id) for p in cs.parents]))))
else:
ui.write(('Parent: %d\n' % cs.parents[0].id))
if opts["ancestors"]:
b = cs.branch
r = []
while b:
b, c = ancestors[b]
r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
if r:
ui.write(('Ancestors: %s\n' % (','.join(r))))
ui.write(('Log:\n'))
ui.write('%s\n\n' % cs.comment)
ui.write(('Members: \n'))
for f in cs.entries:
fn = f.file
if fn.startswith(opts["prefix"]):
fn = fn[len(opts["prefix"]):]
ui.write('\t%s:%s->%s%s \n' % (
fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
'.'.join([str(x) for x in f.revision]),
['', '(DEAD)'][f.dead]))
ui.write('\n')
# have we seen the start tag?
if revisions and off:
if revisions[0] == str(cs.id) or \
revisions[0] in cs.tags:
off = False
# see if we reached the end tag
if len(revisions) > 1 and not off:
if revisions[1] == str(cs.id) or \
revisions[1] in cs.tags:
break
|
wavesaudio/instl
|
refs/heads/master
|
pyinstl/instl_main.py
|
1
|
import sys
import os
import string
import appdirs
import tempfile
import random
import sqlite3
import datetime
import logging
import platform
from pathlib import Path
from functools import lru_cache
import json
from configVar import config_vars
from pybatch import PythonBatchRuntime
from pyinstl.cmdOptions import CommandLineOptions, read_command_line_options
from pyinstl.instlException import InstlException
import utils
#utils.set_max_open_files(2048)
from utils.log_utils import config_logger
log = logging.getLogger()
log.setLevel(logging.DEBUG)
current_os_names = utils.get_current_os_names()
os_family_name = current_os_names[0]
os_second_name = current_os_names[0]
if len(current_os_names) > 1:
os_second_name = current_os_names[1]
@lru_cache(maxsize=None)
def get_path_to_instl_app():
"""
@return: returns the path to this
"""
application_path = None
if getattr(sys, 'frozen', False):
application_path = Path(sys.executable).resolve()
elif __file__:
application_path = Path(__file__).resolve().parent.parent.joinpath('instl')
return application_path
@lru_cache(maxsize=None)
def get_instl_launch_command():
"""
@return: returns the path to this
"""
launch_command = None
exec_path = get_path_to_instl_app()
if getattr(sys, 'frozen', False):
launch_command = utils.quoteme_double(os.fspath(exec_path))
elif __file__:
if os_family_name == "Win":
launch_command = " ".join((utils.quoteme_double(sys.executable), utils.quoteme_double(os.fspath(exec_path))))
else:
launch_command = utils.quoteme_double(os.fspath(exec_path))
return launch_command
@lru_cache(maxsize=None)
def get_data_folder():
""" get the path to where we can find data folders such as defaults or help
data folder should be the instl folder where either instl (in case running directly form python)
or instl.exe (in case running frozen). In both cases this is the parent folder of instl.
"""
application_path = get_path_to_instl_app()
data_folder = Path(application_path).parent
return data_folder
class InvocationReporter(PythonBatchRuntime):
def __init__(self, argv, **kwargs) -> None:
super().__init__(name="InvocationReporter", **kwargs) #TODO: ask Shai about the name arg
self.start_time = datetime.datetime.now()
self.random_invocation_name = ''.join(random.choice(string.ascii_lowercase) for i in range(16))
self.argv = argv.copy() # argument argv is usually sys.argv, which might change with recursive process calls
def enter_self(self) -> None:
try:
vendor_name = os.environ.setdefault("VENDOR_NAME", "Waves Audio")
app_name = os.environ.setdefault("APPLICATION_NAME", "Waves Central")
config_logger(argv=self.argv, config_vars=config_vars)
log.debug(f"===== {self.random_invocation_name} =====")
log.debug(f"Start: {self.start_time}")
log.debug(f"instl: {self.argv[0]}")
log.debug(f'argv: {" ".join(self.argv[1:])}')
except Exception as e:
log.warning(f'instl log file report start failed - {e}')
def exit_self(self, exit_return) -> None:
# self.doing = self.doing if self.doing else utils.get_latest_action_from_stack()
try:
end_time = datetime.datetime.now()
log.debug(f"Run time: {self.command_time_sec}")
log.debug(f"End: {end_time}")
log.debug(f"===== {self.random_invocation_name} =====")
except Exception as e:
log.warning(f'InvocationReporter.__exit__ internal exception - {e}')
def instl_own_main(argv):
""" Main instl entry point. Reads command line options and decides if to go into interactive or client mode.
"""
with InvocationReporter(argv, report_own_progress=False):
argv = argv.copy() # argument argv is usually sys.argv, which might change with recursive process calls
options = CommandLineOptions()
command_names = read_command_line_options(options, argv[1:])
initial_vars = {"__INSTL_EXE_PATH__": get_path_to_instl_app(),
"__CURR_WORKING_DIR__": utils.safe_getcwd(), # the working directory when instl was launched
"__INSTL_LAUNCH_COMMAND__": get_instl_launch_command(),
"__INSTL_DATA_FOLDER__": get_data_folder(),
"__INSTL_DEFAULTS_FOLDER__": "$(__INSTL_DATA_FOLDER__)/defaults",
"__INSTL_COMPILED__": str(getattr(sys, 'frozen', False)),
"__PYTHON_VERSION__": sys.version_info,
"__PLATFORM_NODE__": platform.node(),
"__PYSQLITE3_VERSION__": sqlite3.version,
"__SQLITE_VERSION__": sqlite3.sqlite_version,
"__COMMAND_NAMES__": command_names,
"__CURRENT_OS__": os_family_name,
"__CURRENT_OS_SECOND_NAME__": os_second_name,
"__CURRENT_OS_NAMES__": current_os_names,
"__CURRENT_OS_DESCRIPTION__": utils.get_os_description(),
"__SITE_DATA_DIR__": os.path.normpath(appdirs.site_data_dir()),
"__SITE_CONFIG_DIR__": os.path.normpath(appdirs.site_config_dir()),
"__USER_DATA_DIR__": os.path.normpath(appdirs.user_data_dir()),
"__USER_CONFIG_DIR__": os.path.normpath(appdirs.user_config_dir()),
"__USER_HOME_DIR__": os.path.normpath(os.path.expanduser("~")),
"__USER_DESKTOP_DIR__": os.path.normpath("$(__USER_HOME_DIR__)/Desktop"),
"__USER_TEMP_DIR__": os.path.normpath(os.path.join(tempfile.gettempdir(), "$(SYNC_BASE_URL_MAIN_ITEM)/$(REPO_NAME)")),
"__SYSTEM_LOG_FILE_PATH__": utils.get_system_log_file_path(),
"__INVOCATION_RANDOM_ID__": ''.join(random.choice(string.ascii_lowercase) for _ in range(16)),
"__SUDO_USER__": os.environ.get("SUDO_USER", "no set"),
# VENDOR_NAME, APPLICATION_NAME need to be set so logging can be redirected to the correct folder
"VENDOR_NAME": os.environ.get("VENDOR_NAME", "Waves Audio"),
"APPLICATION_NAME": os.environ.get("APPLICATION_NAME", "Waves Central"),
"__ARGV__": argv,
"ACTING_UID": -1,
"ACTING_GID": -1,
}
if os_family_name != "Win":
initial_vars.update(
{"__USER_ID__": str(os.getuid()),
"__GROUP_ID__": str(os.getgid())})
else:
initial_vars.update(
{"__USER_ID__": -1,
"__GROUP_ID__": -1,
"__WHO_LOCKS_FILE_DLL_PATH__": "$(__INSTL_DATA_FOLDER__)/who_locks_file.dll"})
instance = None
if options.__MAIN_COMMAND__ == "command-list":
from pyinstl.instlCommandList import run_commands_from_file
run_commands_from_file(initial_vars, options)
elif options.mode == "client": #shai, maybe add a log here? before all imports
log.debug("begin, importing instl object") #added by oren
from pyinstl.instlClient import InstlClientFactory
instance = InstlClientFactory(initial_vars, options.__MAIN_COMMAND__)
instance.progress("welcome to instl", instance.get_version_str(short=True), options.__MAIN_COMMAND__)
instance.init_from_cmd_line_options(options)
instance.do_command()# after all preprartion is done, we execute the command itself
elif options.mode == "doit":
from pyinstl.instlDoIt import InstlDoIt
instance = InstlDoIt(initial_vars)
instance.progress("welcome to instl", instance.get_version_str(short=True), options.__MAIN_COMMAND__)
instance.init_from_cmd_line_options(options)
instance.do_command()
elif options.mode == "do_something":
from pyinstl.instlMisc import InstlMisc
instance = InstlMisc(initial_vars, options.__MAIN_COMMAND__)
instance.progress("welcome to instl", instance.get_version_str(short=True), options.__MAIN_COMMAND__)
instance.init_from_cmd_line_options(options)
instance.do_command()
elif not getattr(sys, 'frozen', False): # these modes are not available in compiled instl to avoid issues such as import errors for users
if options.mode == "admin":
if os_family_name not in ("Linux", "Mac"):
raise EnvironmentError("instl admin commands can only run under Mac or Linux")
from pyinstl.instlAdmin import InstlAdmin
instance = InstlAdmin(initial_vars)
instance.progress("welcome to instl", instance.get_version_str(short=True), options.__MAIN_COMMAND__)
instance.init_from_cmd_line_options(options)
instance.do_command()
elif options.mode == "interactive":
from pyinstl.instlClient import InstlClient
client = InstlClient(initial_vars)
client.init_from_cmd_line_options(options)
from pyinstl.instlAdmin import InstlAdmin
from pyinstl.instlInstanceBase_interactive import go_interactive
admin = InstlAdmin(initial_vars)
admin.init_from_cmd_line_options(options)
go_interactive(client, admin)
elif options.mode == "gui":
from pyinstl.instlGui import InstlGui
instance = InstlGui(initial_vars)
instance.init_from_cmd_line_options(options)
instance.do_command()
# make sure instance's dispose functions are called
if instance is not None:
instance.close()
|
drocco007/pyatl-english-numerals
|
refs/heads/master
|
test_english.py
|
1
|
from english_number import english_number
import pytest
def test_zero():
assert 'zero' == english_number(0)
def test_one():
assert 'one' == english_number(1)
@pytest.mark.parametrize('n, english',
[(2, 'two'),
(3, 'three'),
(4, 'four'),
(5, 'five'),
(6, 'six'),
(7, 'seven'),
(8, 'eight'),
(9, 'nine')]
)
def test_ones(n, english):
assert english == english_number(n)
@pytest.mark.parametrize('n, english',
[(10, 'ten'),
(11, 'eleven'),
(12, 'twelve'),
(13, 'thirteen'),
(14, 'fourteen'),
(15, 'fifteen'),
(16, 'sixteen'),
(17, 'seventeen'),
(18, 'eighteen'),
(19, 'nineteen')]
)
def test_tens(n, english):
assert english == english_number(n)
def test_twenty():
assert 'twenty' == english_number(20)
def test_twenty_one():
assert 'twenty-one' == english_number(21)
def test_twenty_two():
assert 'twenty-two' == english_number(22)
@pytest.mark.parametrize('n, english',
[(23, 'twenty-three'),
(24, 'twenty-four'),
(25, 'twenty-five'),
(26, 'twenty-six'),
(27, 'twenty-seven'),
(28, 'twenty-eight'),
(29, 'twenty-nine')]
)
def test_twenties(n, english):
assert english == english_number(n)
@pytest.mark.parametrize('n, english',
[(30, 'thirty'),
(31, 'thirty-one'),
(32, 'thirty-two'),
(33, 'thirty-three'),
(34, 'thirty-four'),
(35, 'thirty-five'),
(36, 'thirty-six'),
(37, 'thirty-seven'),
(38, 'thirty-eight'),
(39, 'thirty-nine')]
)
def test_thirties(n, english):
assert english == english_number(n)
@pytest.mark.parametrize('n, english',
[(99, 'ninety-nine'),
(93, 'ninety-three'),
(90, 'ninety'),
(82, 'eighty-two'),
(78, 'seventy-eight'),
(75, 'seventy-five'),
(70, 'seventy'),
(65, 'sixty-five'),
(56, 'fifty-six'),
(47, 'forty-seven'),]
)
def test_double_digit(n, english):
assert english == english_number(n)
def test_one_hundred():
assert 'one hundred' == english_number(100)
def test_one_hundred_one():
assert 'one hundred one' == english_number(101)
@pytest.mark.parametrize('n, english',
[(102, 'one hundred two'),
(103, 'one hundred three'),
(104, 'one hundred four'),
(105, 'one hundred five'),
(106, 'one hundred six'),
(107, 'one hundred seven'),
(108, 'one hundred eight'),
(109, 'one hundred nine')]
)
def test_one_hundred_ones(n, english):
assert english == english_number(n)
@pytest.mark.parametrize('n, english',
[(123, 'one hundred twenty-three'),
(124, 'one hundred twenty-four'),
(125, 'one hundred twenty-five'),
(126, 'one hundred twenty-six'),
(127, 'one hundred twenty-seven'),
(128, 'one hundred twenty-eight'),
(129, 'one hundred twenty-nine')]
)
def test_one_hundred_twenties(n, english):
assert english == english_number(n)
@pytest.mark.parametrize('n, english',
[(130, 'one hundred thirty'),
(131, 'one hundred thirty-one'),
(132, 'one hundred thirty-two'),
(133, 'one hundred thirty-three'),
(134, 'one hundred thirty-four'),
(135, 'one hundred thirty-five'),
(136, 'one hundred thirty-six'),
(137, 'one hundred thirty-seven'),
(138, 'one hundred thirty-eight'),
(139, 'one hundred thirty-nine')]
)
def test_one_hundred_thirties(n, english):
assert english == english_number(n)
@pytest.mark.parametrize('n, english',
[(199, 'one hundred ninety-nine'),
(193, 'one hundred ninety-three'),
(190, 'one hundred ninety'),
(182, 'one hundred eighty-two'),
(178, 'one hundred seventy-eight'),
(175, 'one hundred seventy-five'),
(170, 'one hundred seventy'),
(165, 'one hundred sixty-five'),
(156, 'one hundred fifty-six'),
(147, 'one hundred forty-seven'),]
)
def test_other_one_hundreds(n, english):
assert english == english_number(n)
@pytest.mark.parametrize('n, english',
[(200, 'two hundred'),
(201, 'two hundred one'),
(300, 'three hundred'),
(301, 'three hundred one'),
(400, 'four hundred'),
(401, 'four hundred one'),
(500, 'five hundred'),
(501, 'five hundred one'),
(600, 'six hundred'),
(601, 'six hundred one'),
(700, 'seven hundred'),
(701, 'seven hundred one'),
(800, 'eight hundred'),
(801, 'eight hundred one'),
(900, 'nine hundred'),
(901, 'nine hundred one'),]
)
def test_other_hundreds_zero_one(n, english):
assert english == english_number(n)
@pytest.mark.parametrize('n, english',
[(999, 'nine hundred ninety-nine'),
(893, 'eight hundred ninety-three'),
(790, 'seven hundred ninety'),
(682, 'six hundred eighty-two'),
(678, 'six hundred seventy-eight'),
(575, 'five hundred seventy-five'),
(470, 'four hundred seventy'),
(365, 'three hundred sixty-five'),
(256, 'two hundred fifty-six'),
(147, 'one hundred forty-seven'),]
)
def test_other_hundreds(n, english):
assert english == english_number(n)
def test_one_thousand():
assert 'one thousand' == english_number(1000)
def test_one_thousand_one():
assert 'one thousand one' == english_number(1001)
@pytest.mark.parametrize('n, english',
[(1002, 'one thousand two'),
(1003, 'one thousand three'),
(1004, 'one thousand four'),
(1005, 'one thousand five'),
(1006, 'one thousand six'),
(1007, 'one thousand seven'),
(1008, 'one thousand eight'),
(1009, 'one thousand nine'),
(1010, 'one thousand ten'),
(1011, 'one thousand eleven'),
(1012, 'one thousand twelve'),
(1013, 'one thousand thirteen'),
(1014, 'one thousand fourteen'),
(1015, 'one thousand fifteen'),
(1016, 'one thousand sixteen'),
(1017, 'one thousand seventeen'),
(1018, 'one thousand eighteen'),
(1019, 'one thousand nineteen')]
)
def test_one_thousand_ones(n, english):
assert english == english_number(n)
def test_party_like_its():
assert 'one thousand nine hundred ninety-nine' == english_number(1999)
def test_sobering_social_commentary():
assert 'one thousand nine hundred eighty-four' == english_number(1984)
def test_inception_of_dan():
assert 'one thousand nine hundred seventy-eight' == english_number(1978)
def test_war_of():
assert 'one thousand eight hundred twelve' == english_number(1812)
def test_spirit_of():
assert 'one thousand seven hundred seventy-six' == english_number(1776)
def test_happy_birthday_Bach():
assert 'one thousand six hundred eighty-five' == english_number(1685)
def test_two_thousand():
assert 'two thousand' == english_number(2000)
def test_two_thousand_one():
assert 'two thousand one' == english_number(2001)
def test_ten_thousand():
assert 'ten thousand' == english_number(10000)
@pytest.mark.parametrize('n, english',
[(99999, 'ninety-nine thousand nine hundred ninety-nine'),
(80000, 'eighty thousand'),
(65536, 'sixty-five thousand five hundred thirty-six'),
(32768, 'thirty-two thousand seven hundred sixty-eight'),
(16384, 'sixteen thousand three hundred eighty-four'),
(8192, 'eight thousand one hundred ninety-two'),
(4096, 'four thousand ninety-six'),
(2048, 'two thousand forty-eight'),
(1024, 'one thousand twenty-four'),]
)
def test_other_thousands(n, english):
assert english == english_number(n)
def test_one_hundred_thousand():
assert 'one hundred thousand' == english_number(100000)
@pytest.mark.parametrize('n, english',
[(200000, 'two hundred thousand'),
(300000, 'three hundred thousand'),
(400000, 'four hundred thousand'),
(500000, 'five hundred thousand'),
(600000, 'six hundred thousand'),
(700000, 'seven hundred thousand'),
(800000, 'eight hundred thousand'),
(900000, 'nine hundred thousand'),]
)
def test_other_hundred_thousands(n, english):
assert english == english_number(n)
def test_one_million():
assert 'one million' == english_number(1000000)
def test_big_million():
assert ('eight hundred sixty-seven million three hundred twenty-four '
'thousand eight hundred seventy-six') == english_number(867324876)
def test_one_billion():
assert 'one billion' == english_number(1000000000)
def test_big_billion():
assert ('four hundred thirty-five billion seven hundred twenty million '
'one hundred one thousand nine hundred forty-three') \
== english_number(435720101943)
def test_one_trillion():
assert 'one trillion' == english_number(1000000000000)
def test_big_trillion():
assert ('one hundred eleven trillion one hundred ten million one hundred '
'one') == english_number(111000110000101)
def test_one_quadrillion():
assert 'one quadrillion' == english_number(1000000000000000)
def test_googol():
assert 'two googol' == english_number(2*10**100)
|
petertodd/bitcoin
|
refs/heads/master
|
qa/rpc-tests/skeleton.py
|
148
|
#!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Skeleton for python-based regression tests using
# JSON-RPC
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def run_test(nodes):
# Replace this as appropriate
for node in nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
nodes = start_nodes(2, options.tmpdir)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
run_test(nodes)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
|
XiaodunServerGroup/xiaodun-platform
|
refs/heads/master
|
docs/en_us/data/source/conf.py
|
10
|
# -*- coding: utf-8 -*-
#
import sys, os
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
sys.path.append(os.path.abspath('../../../'))
sys.path.append(os.path.abspath('../../'))
#from docs.shared.conf import *
sys.path.insert(0, os.path.abspath('.'))
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
#templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path.append('source/_static')
project = u'edX Data Documentation'
copyright = u'2014, edX'
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
#Added to turn off smart quotes so users can copy JSON values without problems.
html_use_smartypants = False
|
benspaulding/django
|
refs/heads/master
|
django/db/models/sql/constants.py
|
16
|
from collections import namedtuple
import re
# Valid query types (a dictionary is used for speedy lookups).
QUERY_TERMS = set([
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
# Constants to make looking up tuple values clearer.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
JoinInfo = namedtuple('JoinInfo',
'table_name rhs_alias join_type lhs_alias '
'lhs_join_col rhs_join_col nullable')
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC'),
}
|
igel-kun/pyload
|
refs/heads/stable
|
module/plugins/hoster/PorntrexCom.py
|
1
|
# -*- coding: utf-8 -*-
import re
from ..internal.SimpleHoster import SimpleHoster
class PorntrexCom(SimpleHoster):
__name__ = "PorntrexCom"
__type__ = "hoster"
__version__ = "0.01"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?porntrex\.com/video/.+'
__config__ = [("activated", "bool", "Activated", True),
("chk_filesize", "bool", "Check file size", True),
("quality", "360p;480p;720p;1080p;1440p;2160p", "Quality Setting", "1080p")]
__description__ = """Porntrex.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("ondrej", "git@ondrej.it")]
NAME_PATTERN = r'<p class="title-video">(?P<N>.+?)</p>'
OFFLINE_PATTERN = r'<title>page not found</title>'
DISPOSITION = False
def setup(self):
self.multiDL = True
self.resume_download = False
def handle_free(self, pyfile):
html = self.load(pyfile.url)
quality = self.config.get("quality")
all_quality = ["2160p", "1440p", "1080p", "720p", "480p", "360p"]
for i in all_quality[all_quality.index(quality):]:
video_url = re.findall(r"https://www.porntrex.com/get_file/[\w\d/]+_{0}.mp4".format(i), html)
if video_url:
self.link = video_url[0]
break
if not self.link:
self.error(_("Video url not found"))
self.pyfile.name = re.search(self.NAME_PATTERN, html).group(1)
self.pyfile.name += "." + self.link.split(".")[-1]
|
blackzw/openwrt_sdk_dev1
|
refs/heads/master
|
staging_dir/host/lib/python2.7/Cookie.py
|
65
|
#!/usr/bin/env python
#
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy..
>>> import Cookie
Most of the time you start by creating a cookie. Cookies come in
three flavors, each with slightly different encoding semantics, but
more on that later.
>>> C = Cookie.SimpleCookie()
>>> C = Cookie.SerialCookie()
>>> C = Cookie.SmartCookie()
[Note: Long-time users of Cookie.py will remember using
Cookie.Cookie() to create an Cookie object. Although deprecated, it
is still supported by the code. See the Backward Compatibility notes
for more information.]
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = Cookie.SmartCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = Cookie.SmartCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print C.output(header="Cookie:")
Cookie: rocky=road; Path=/cookie
>>> print C.output(attrs=[], header="Cookie:")
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = Cookie.SmartCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = Cookie.SmartCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print C
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = Cookie.SmartCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print C
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = Cookie.SmartCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
A Bit More Advanced
-------------------
As mentioned before, there are three different flavors of Cookie
objects, each with different encoding/decoding semantics. This
section briefly discusses the differences.
SimpleCookie
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = Cookie.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
SerialCookie
The SerialCookie expects that all values should be serialized using
cPickle (or pickle, if cPickle isn't available). As a result of
serializing, SerialCookie can save almost any Python object to a
value, and recover the exact same object when the cookie has been
returned. (SerialCookie can yield some strange-looking cookie
values, however.)
>>> C = Cookie.SerialCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
Be warned, however, if SerialCookie cannot de-serialize a value (because
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
SmartCookie
The SmartCookie combines aspects of each of the other two flavors.
When setting a value in a dictionary-fashion, the SmartCookie will
serialize (ala cPickle) the value *if and only if* it isn't a
Python string. String objects are *not* serialized. Similarly,
when the load() method parses out values, it attempts to de-serialize
the value. If it fails, then it fallsback to treating the value
as a string.
>>> C = Cookie.SmartCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
7
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
Backwards Compatibility
-----------------------
In order to keep compatibilty with earlier versions of Cookie.py,
it is still possible to use Cookie.Cookie() to create a Cookie. In
fact, this simply returns a SmartCookie.
>>> C = Cookie.Cookie()
>>> print C.__class__.__name__
SmartCookie
Finis.
""" #"
# ^
# |----helps out font-lock
#
# Import our required modules
#
import string
try:
from cPickle import dumps, loads
except ImportError:
from pickle import dumps, loads
import re, warnings
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
"SmartCookie","Cookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
_idmap = ''.join(chr(x) for x in xrange(256))
def _quote(str, LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
#
# If the string does not need to be double-quoted,
# then just return the string. Otherwise, surround
# the string in doublequotes and precede quote (with a \)
# special characters.
#
if "" == translate(str, idmap, LegalChars):
return str
else:
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
# end _quote
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
Omatch = _OctalPatt.search(str, i)
Qmatch = _QuotePatt.search(str, i)
if not Omatch and not Qmatch: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if Omatch: j = Omatch.start(0)
if Qmatch: k = Qmatch.start(0)
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k+2
else: # OctalPatt matched
res.append(str[i:j])
res.append( chr( int(str[j+1:j+4], 8) ) )
i = j+4
return _nulljoin(res)
# end _unquote
# The _getdate() routine is used to set the expiration time in
# the cookie's HTTP header. By default, _getdate() returns the
# current time in the appropriate "expires" format for a
# Set-Cookie header. The one optional argument is an offset from
# now, in seconds. For example, an offset of -3600 means "one hour ago".
# The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
#
# A class to hold ONE key,value pair.
# In a cookie, each such pair may have several attributes.
# so this class is used to keep the attributes associated
# with the appropriate key,value pair.
# This class also includes a coded_value attribute, which
# is used to hold the network representation of the
# value. This is most useful when Python objects are
# pickled for network transit.
#
class Morsel(dict):
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = { "expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for K in self._reserved:
dict.__setitem__(self, K, "")
# end __init__
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
# end __setitem__
def isReservedKey(self, K):
return K.lower() in self._reserved
# end isReservedKey
def set(self, key, val, coded_val,
LegalChars=_LegalChars,
idmap=_idmap, translate=string.translate):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if "" != translate(key, idmap, LegalChars):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
# end set
def output(self, attrs=None, header = "Set-Cookie:"):
return "%s %s" % ( header, self.OutputString(attrs) )
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value) )
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % ( self.OutputString(attrs).replace('"',r'\"'), )
# end js_output()
def OutputString(self, attrs=None):
# Build up our result
#
result = []
RA = result.append
# First, the key=value pair
RA("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = self.items()
items.sort()
for K,V in items:
if V == "": continue
if K not in attrs: continue
if K == "expires" and type(V) == type(1):
RA("%s=%s" % (self._reserved[K], _getdate(V)))
elif K == "max-age" and type(V) == type(1):
RA("%s=%d" % (self._reserved[K], V))
elif K == "secure":
RA(str(self._reserved[K]))
elif K == "httponly":
RA(str(self._reserved[K]))
else:
RA("%s=%s" % (self._reserved[K], V))
# Return the result
return _semispacejoin(result)
# end OutputString
# end Morsel class
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(
r"(?x)" # This is a Verbose pattern
r"(?P<key>" # Start of group 'key'
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
r")" # End of group 'key'
r"\s*=\s*" # Equal Sign
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
r"\s*;?" # Probably ending in a semi-colon
)
# At long last, here is the cookie class.
# Using this class is almost just like using a dictionary.
# See this module's docstring for example usage.
#
class BaseCookie(dict):
# A container class for a set of Morsels
#
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
# end value_encode
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
# end value_encode
def __init__(self, input=None):
if input: self.load(input)
# end __init__
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
# end __set
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
# end __setitem__
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.output(attrs, header) )
return sep.join(result)
# end output
__str__ = output
def __repr__(self):
L = []
items = self.items()
items.sort()
for K,V in items:
L.append( '%s=%s' % (K,repr(V.value) ) )
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = self.items()
items.sort()
for K,V in items:
result.append( V.js_output(attrs) )
return _nulljoin(result)
# end js_output
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if type(rawdata) == type(""):
self.__ParseString(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for k, v in rawdata.items():
self[k] = v
return
# end load()
def __ParseString(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match: break # No more cookies
K,V = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if K[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[ K[1:] ] = V
elif K.lower() in Morsel._reserved:
if M:
M[ K ] = _unquote(V)
else:
rval, cval = self.value_decode(V)
self.__set(K, rval, cval)
M = self[K]
# end __ParseString
# end BaseCookie class
class SimpleCookie(BaseCookie):
"""SimpleCookie
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote( val ), val
def value_encode(self, val):
strval = str(val)
return strval, _quote( strval )
# end SimpleCookie
class SerialCookie(BaseCookie):
"""SerialCookie
SerialCookie supports arbitrary objects as cookie values. All
values are serialized (using cPickle) before being sent to the
client. All incoming values are assumed to be valid Pickle
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("SerialCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
# This could raise an exception!
return loads( _unquote(val) ), val
def value_encode(self, val):
return val, _quote( dumps(val) )
# end SerialCookie
class SmartCookie(BaseCookie):
"""SmartCookie
SmartCookie supports arbitrary objects as cookie values. If the
object is a string, then it is quoted. If the object is not a
string, however, then SmartCookie will use cPickle to serialize
the object into a string representation.
Note: Large cookie values add overhead because they must be
retransmitted on every HTTP transaction.
Note: HTTP has a 2k limit on the size of a cookie. This class
does not check for this limit, so be careful!!!
"""
def __init__(self, input=None):
warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
DeprecationWarning)
BaseCookie.__init__(self, input)
# end __init__
def value_decode(self, val):
strval = _unquote(val)
try:
return loads(strval), val
except:
return strval, val
def value_encode(self, val):
if type(val) == type(""):
return val, _quote(val)
else:
return val, _quote( dumps(val) )
# end SmartCookie
###########################################################
# Backwards Compatibility: Don't break any existing code!
# We provide Cookie() as an alias for SmartCookie()
Cookie = SmartCookie
#
###########################################################
def _test():
import doctest, Cookie
return doctest.testmod(Cookie)
if __name__ == "__main__":
_test()
#Local Variables:
#tab-width: 4
#end:
|
adit-chandra/tensorflow
|
refs/heads/master
|
tensorflow/python/util/module_wrapper_test.py
|
8
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.util.module_wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import types
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import module_wrapper
from tensorflow.python.util import tf_inspect
from tensorflow.tools.compatibility import all_renames_v2
module_wrapper._PER_MODULE_WARNING_LIMIT = 5
class MockModule(types.ModuleType):
pass
class DeprecationWrapperTest(test.TestCase):
def testWrapperIsAModule(self):
module = MockModule('test')
wrapped_module = module_wrapper.TFModuleWrapper(module, 'test')
self.assertTrue(tf_inspect.ismodule(wrapped_module))
@test.mock.patch.object(logging, 'warning', autospec=True)
def testDeprecationWarnings(self, mock_warning):
module = MockModule('test')
module.foo = 1
module.bar = 2
module.baz = 3
all_renames_v2.symbol_renames['tf.test.bar'] = 'tf.bar2'
all_renames_v2.symbol_renames['tf.test.baz'] = 'tf.compat.v1.baz'
wrapped_module = module_wrapper.TFModuleWrapper(module, 'test')
self.assertTrue(tf_inspect.ismodule(wrapped_module))
self.assertEqual(0, mock_warning.call_count)
bar = wrapped_module.bar
self.assertEqual(1, mock_warning.call_count)
foo = wrapped_module.foo
self.assertEqual(1, mock_warning.call_count)
baz = wrapped_module.baz # pylint: disable=unused-variable
self.assertEqual(2, mock_warning.call_count)
baz = wrapped_module.baz
self.assertEqual(2, mock_warning.call_count)
# Check that values stayed the same
self.assertEqual(module.foo, foo)
self.assertEqual(module.bar, bar)
class LazyLoadingWrapperTest(test.TestCase):
def testLazyLoad(self):
module = MockModule('test')
apis = {'cmd': ('', 'cmd'), 'ABCMeta': ('abc', 'ABCMeta')}
wrapped_module = module_wrapper.TFModuleWrapper(
module, 'test', public_apis=apis, deprecation=False)
import cmd as _cmd # pylint: disable=g-import-not-at-top
from abc import ABCMeta as _ABCMeta # pylint: disable=g-import-not-at-top, g-importing-member
self.assertEqual(wrapped_module.cmd, _cmd)
self.assertEqual(wrapped_module.ABCMeta, _ABCMeta)
def testLazyLoadLocalOverride(self):
# Test that we can override and add fields to the wrapped module.
module = MockModule('test')
apis = {'cmd': ('', 'cmd')}
wrapped_module = module_wrapper.TFModuleWrapper(
module, 'test', public_apis=apis, deprecation=False)
import cmd as _cmd # pylint: disable=g-import-not-at-top
self.assertEqual(wrapped_module.cmd, _cmd)
setattr(wrapped_module, 'cmd', 1)
setattr(wrapped_module, 'cgi', 2)
self.assertEqual(wrapped_module.cmd, 1) # override
self.assertEqual(wrapped_module.cgi, 2) # add
def testLazyLoadDict(self):
# Test that we can override and add fields to the wrapped module.
module = MockModule('test')
apis = {'cmd': ('', 'cmd')}
wrapped_module = module_wrapper.TFModuleWrapper(
module, 'test', public_apis=apis, deprecation=False)
import cmd as _cmd # pylint: disable=g-import-not-at-top
# At first cmd key does not exist in __dict__
self.assertNotIn('cmd', wrapped_module.__dict__)
# After it is referred (lazyloaded), it gets added to __dict__
wrapped_module.cmd # pylint: disable=pointless-statement
self.assertEqual(wrapped_module.__dict__['cmd'], _cmd)
# When we call setattr, it also gets added to __dict__
setattr(wrapped_module, 'cmd2', _cmd)
self.assertEqual(wrapped_module.__dict__['cmd2'], _cmd)
def testLazyLoadWildcardImport(self):
# Test that public APIs are in __all__.
module = MockModule('test')
module._should_not_be_public = 5
apis = {'cmd': ('', 'cmd')}
wrapped_module = module_wrapper.TFModuleWrapper(
module, 'test', public_apis=apis, deprecation=False)
setattr(wrapped_module, 'hello', 1)
self.assertIn('hello', wrapped_module.__all__)
self.assertIn('cmd', wrapped_module.__all__)
self.assertNotIn('_should_not_be_public', wrapped_module.__all__)
def testLazyLoadCorrectLiteModule(self):
# If set, always load lite module from public API list.
module = MockModule('test')
apis = {'lite': ('', 'cmd')}
module.lite = 5
import cmd as _cmd # pylint: disable=g-import-not-at-top
wrapped_module = module_wrapper.TFModuleWrapper(
module, 'test', public_apis=apis, deprecation=False, has_lite=True)
self.assertEqual(wrapped_module.lite, _cmd)
if __name__ == '__main__':
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.