repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
chokribr/inveniotest
|
refs/heads/master
|
modules/docextract/lib/refextract_engine.py
|
11
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is the main body of refextract. It is used to extract references from
fulltext PDF documents.
"""
__revision__ = "$Id$"
import re
import os
import subprocess
from itertools import chain
from invenio.refextract_config import (CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM,
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL,
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND,
CFG_REFEXTRACT_MARKER_CLOSING_VOLUME,
CFG_REFEXTRACT_MARKER_CLOSING_YEAR,
CFG_REFEXTRACT_MARKER_CLOSING_PAGE,
CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID,
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL,
CFG_REFEXTRACT_MARKER_CLOSING_TITLE,
CFG_REFEXTRACT_MARKER_CLOSING_SERIES)
# make refextract runnable without requiring the full Invenio installation:
from invenio.config import CFG_PATH_GFILE
from invenio.refextract_tag import (tag_reference_line,
sum_2_dictionaries,
identify_and_tag_DOI,
identify_and_tag_URLs,
find_numeration,
extract_series_from_volume)
from invenio.refextract_record import (build_record,
build_references)
from invenio.docextract_pdf import convert_PDF_to_plaintext
from invenio.docextract_utils import write_message
from invenio.refextract_kbs import get_kbs
from invenio.refextract_linker import find_referenced_recid
from invenio.refextract_re import (get_reference_line_numeration_marker_patterns,
regex_match_list,
re_tagged_citation,
re_numeration_no_ibid_txt,
re_roman_numbers,
re_recognised_numeration_for_title_plus_series)
description = """
Refextract tries to extract the reference section from a full-text document.
Extracted reference lines are processed and any recognised citations are
marked up using MARC XML. Recognises author names, URL's, DOI's, and also
journal titles and report numbers as per the relevant knowledge bases. Results
are output to the standard output stream as default, or instead to an xml file.
"""
# General initiation tasks:
# components relating to the standardisation and
# recognition of citations in reference lines:
def remove_reference_line_marker(line):
"""Trim a reference line's 'marker' from the beginning of the line.
@param line: (string) - the reference line.
@return: (tuple) containing two strings:
+ The reference line's marker (or if there was not one,
a 'space' character.
+ The reference line with it's marker removed from the
beginning.
"""
# Get patterns to identify reference-line marker patterns:
marker_patterns = get_reference_line_numeration_marker_patterns()
line = line.lstrip()
marker_match = regex_match_list(line, marker_patterns)
if marker_match is not None:
# found a marker:
marker_val = marker_match.group(u'mark')
# trim the marker from the start of the line:
line = line[marker_match.end():].lstrip()
else:
marker_val = u" "
return (marker_val, line)
def roman2arabic(num):
"""Convert numbers from roman to arabic
This function expects a string like XXII
and outputs an integer
"""
t = 0
p = 0
for r in num:
n = 10 ** (205558 % ord(r) % 7) % 9995
t += n - 2 * p % n
p = n
return t
## Transformations
def format_volume(citation_elements):
"""format volume number (roman numbers to arabic)
When the volume number is expressed in roman numbers (CXXII),
they are converted to their equivalent in arabic numbers (42)
"""
re_roman = re.compile(re_roman_numbers + u'$', re.UNICODE)
for el in citation_elements:
if el['type'] == 'JOURNAL' and re_roman.match(el['volume']):
el['volume'] = str(roman2arabic(el['volume'].upper()))
return citation_elements
def handle_special_journals(citation_elements, kbs):
"""format special journals (like JHEP) volume number
JHEP needs the volume number prefixed with the year
e.g. JHEP 0301 instead of JHEP 01
"""
for el in citation_elements:
if el['type'] == 'JOURNAL' and el['title'] in kbs['special_journals'] \
and re.match(r'\d{1,2}$', el['volume']):
# Sometimes the page is omitted and the year is written in its place
# We can never be sure but it's very likely that page > 1900 is
# actually a year, so we skip this reference
if el['year'] == '' and re.match(r'(19|20)\d{2}$', el['page']):
el['type'] = 'MISC'
el['misc_txt'] = "%s,%s,%s" \
% (el['title'], el['volume'], el['page'])
el['volume'] = el['year'][-2:] + '%02d' % int(el['volume'])
return citation_elements
def format_report_number(citation_elements):
"""Format report numbers that are missing a dash
e.g. CERN-LCHH2003-01 to CERN-LHCC-2003-01
"""
re_report = re.compile(ur'^(?P<name>[A-Z-]+)(?P<nums>[\d-]+)$', re.UNICODE)
for el in citation_elements:
if el['type'] == 'REPORTNUMBER':
m = re_report.match(el['report_num'])
if m:
name = m.group('name')
if not name.endswith('-'):
el['report_num'] = m.group('name') + '-' + m.group('nums')
return citation_elements
def format_hep(citation_elements):
"""Format hep-th report numbers with a dash
e.g. replaces hep-th-9711200 with hep-th/9711200
"""
prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-',
'math-ph-')
for el in citation_elements:
if el['type'] == 'REPORTNUMBER':
for p in prefixes:
if el['report_num'].startswith(p):
el['report_num'] = el['report_num'][:len(p) - 1] + '/' + \
el['report_num'][len(p):]
return citation_elements
def format_author_ed(citation_elements):
"""Standardise to (ed.) and (eds.)
e.g. Remove extra space in (ed. )
"""
for el in citation_elements:
if el['type'] == 'AUTH':
el['auth_txt'] = el['auth_txt'].replace('(ed. )', '(ed.)')
el['auth_txt'] = el['auth_txt'].replace('(eds. )', '(eds.)')
return citation_elements
def look_for_books(citation_elements, kbs):
"""Look for books in our kb
Create book tags by using the authors and the title to find books
in our knowledge base
"""
title = None
for el in citation_elements:
if el['type'] == 'QUOTED':
title = el
break
if title:
normalized_title = title['title'].upper()
if normalized_title in kbs['books']:
line = kbs['books'][normalized_title]
el = {'type': 'BOOK',
'misc_txt': '',
'authors': line[0],
'title': line[1],
'year': line[2].strip(';')}
citation_elements.append(el)
citation_elements.remove(title)
return citation_elements
def split_volume_from_journal(citation_elements):
"""Split volume from journal title
We need this because sometimes the volume is attached to the journal title
instead of the volume. In those cases we move it here from the title to the
volume
"""
for el in citation_elements:
if el['type'] == 'JOURNAL' and ';' in el['title']:
el['title'], series = el['title'].rsplit(';', 1)
el['volume'] = series + el['volume']
return citation_elements
def remove_b_for_nucl_phys(citation_elements):
"""Removes b from the volume of some journals
Removes the B from the volume for Nucl.Phys.Proc.Suppl. because in INSPIRE
that journal is handled differently.
"""
for el in citation_elements:
if el['type'] == 'JOURNAL' and el['title'] == 'Nucl.Phys.Proc.Suppl.' \
and 'volume' in el \
and (el['volume'].startswith('b') or el['volume'].startswith('B')):
el['volume'] = el['volume'][1:]
return citation_elements
def mangle_volume(citation_elements):
"""Make sure the volume letter is before the volume number
e.g. transforms 100B to B100
"""
volume_re = re.compile(ur"(\d+)([A-Z])", re.U|re.I)
for el in citation_elements:
if el['type'] == 'JOURNAL':
matches = volume_re.match(el['volume'])
if matches:
el['volume'] = matches.group(2) + matches.group(1)
return citation_elements
def balance_authors(splitted_citations, new_elements):
if not splitted_citations:
return
last_citation = splitted_citations[-1]
current_citation = new_elements
if last_citation[-1]['type'] == 'AUTH' \
and sum([1 for cit in last_citation if cit['type'] == 'AUTH']) > 1:
el = last_citation.pop()
current_citation.insert(0, el)
def associate_recids(citation_elements):
for el in citation_elements:
try:
el['recid'] = find_referenced_recid(el).pop()
except (IndexError, KeyError):
el['recid'] = None
return citation_elements
def associate_recids_catchup(splitted_citations):
for citation_elements in splitted_citations:
associate_recids(citation_elements)
def split_citations(citation_elements):
"""Split a citation line in multiple citations
We handle the case where the author has put 2 citations in the same line
but split with ; or some other method.
"""
splitted_citations = []
new_elements = []
current_recid = None
current_doi = None
def check_ibid(current_elements, trigger_el):
for el in new_elements:
if el['type'] == 'AUTH':
return
# Check for ibid
if trigger_el.get('is_ibid', False):
if splitted_citations:
els = chain(reversed(current_elements),
reversed(splitted_citations[-1]))
else:
els = reversed(current_elements)
for el in els:
if el['type'] == 'AUTH':
new_elements.append(el.copy())
break
def start_new_citation():
"""Start new citation"""
splitted_citations.append(new_elements[:])
del new_elements[:]
for el in citation_elements:
try:
el_recid = el['recid']
except KeyError:
el_recid = None
if current_recid and el_recid and current_recid == el_recid:
# Do not start a new citation
pass
elif current_recid and el_recid and current_recid != el_recid \
or current_doi and el['type'] == 'DOI' and \
current_doi != el['doi_string']:
start_new_citation()
# Some authors may be found in the previous citation
balance_authors(splitted_citations, new_elements)
elif ';' in el['misc_txt']:
misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1)
if misc_txt:
new_elements.append({'type': 'MISC',
'misc_txt': misc_txt})
start_new_citation()
# In case el['recid'] is None, we want to reset it
# because we are starting a new reference
current_recid = el_recid
while ';' in el['misc_txt']:
misc_txt, el['misc_txt'] = el['misc_txt'].split(';', 1)
if misc_txt:
new_elements.append({'type': 'MISC',
'misc_txt': misc_txt})
start_new_citation()
current_recid = None
if el_recid:
current_recid = el_recid
if el['type'] == 'DOI':
current_doi = el['doi_string']
check_ibid(new_elements, el)
new_elements.append(el)
splitted_citations.append(new_elements)
return [el for el in splitted_citations if not empty_citation(el)]
def empty_citation(citation):
els_to_remove = ('MISC', )
for el in citation:
if el['type'] not in els_to_remove:
return False
if el['misc_txt']:
return False
return True
def valid_citation(citation):
els_to_remove = ('MISC', )
for el in citation:
if el['type'] not in els_to_remove:
return True
return False
def remove_invalid_references(splitted_citations):
def add_misc(el, txt):
if not el.get('misc_txt'):
el['misc_txt'] = txt
else:
el['misc_txt'] += " " + txt
splitted_citations = [citation for citation in splitted_citations
if citation]
# We merge some elements in here which means it only makes sense when
# we have at least 2 elements to merge together
if len(splitted_citations) > 1:
previous_citation = None
for citation in splitted_citations:
if not valid_citation(citation):
# Merge to previous one misc txt
if previous_citation:
citation_to_merge_into = previous_citation
else:
citation_to_merge_into = splitted_citations[1]
for el in citation:
add_misc(citation_to_merge_into[-1], el['misc_txt'])
previous_citation = citation
return [citation for citation in splitted_citations
if valid_citation(citation)]
def merge_invalid_references(splitted_citations):
def add_misc(el, txt):
if not el.get('misc_txt'):
el['misc_txt'] = txt
else:
el['misc_txt'] += " " + txt
splitted_citations = [citation for citation in splitted_citations
if citation]
# We merge some elements in here which means it only makes sense when
# we have at least 2 elements to merge together
if len(splitted_citations) > 1:
previous_citation = None
previous_citation_valid = True
for citation in splitted_citations:
current_citation_valid = valid_citation(citation)
if not current_citation_valid:
# Merge to previous one misc txt
if not previous_citation_valid and not current_citation_valid:
for el in citation:
add_misc(previous_citation[-1], el['misc_txt'])
previous_citation = citation
previous_citation_valid = current_citation_valid
return [citation for citation in splitted_citations
if valid_citation(citation)]
def add_year_elements(splitted_citations):
for citation in splitted_citations:
for el in citation:
if el['type'] == 'YEAR':
continue
year = None
for el in citation:
if el['type'] == 'JOURNAL' or el['type'] == 'BOOK' \
and 'year' in el:
year = el['year']
break
if year:
citation.append({'type': 'YEAR',
'year': year,
'misc_txt': '',
})
return splitted_citations
def look_for_implied_ibids(splitted_citations):
def look_for_journal(els):
for el in els:
if el['type'] == 'JOURNAL':
return True
return False
current_journal = None
for citation in splitted_citations:
if current_journal and not look_for_journal(citation):
for el in citation:
if el['type'] == 'MISC':
numeration = find_numeration(el['misc_txt'])
if numeration:
if not numeration['series']:
numeration['series'] = extract_series_from_volume(current_journal['volume'])
if numeration['series']:
volume = numeration['series'] + numeration['volume']
else:
volume = numeration['volume']
ibid_el = {'type' : 'JOURNAL',
'misc_txt' : '',
'title' : current_journal['title'],
'volume' : volume,
'year' : numeration['year'],
'page' : numeration['page'],
'page_end' : numeration['page_end'],
'is_ibid' : True,
'extra_ibids': []}
citation.append(ibid_el)
el['misc_txt'] = el['misc_txt'][numeration['len']:]
current_journal = None
for el in citation:
if el['type'] == 'JOURNAL':
current_journal = el
return splitted_citations
def remove_duplicated_authors(splitted_citations):
for citation in splitted_citations:
found_author = False
for el in citation:
if el['type'] == 'AUTH':
if found_author:
el['type'] = 'MISC'
el['misc_txt'] = el['misc_txt'] + " " + el['auth_txt']
else:
found_author = True
return splitted_citations
def remove_duplicated_dois(splitted_citations):
for citation in splitted_citations:
found_doi = False
for el in citation[:]:
if el['type'] == 'DOI':
if found_doi:
citation.remove(el)
else:
found_doi = True
return splitted_citations
def add_recid_elements(splitted_citations):
for citation in splitted_citations:
for el in citation:
if el.get('recid', None):
citation.append({'type': 'RECID',
'recid': el['recid'],
'misc_txt': ''})
break
## End of elements transformations
def print_citations(splitted_citations, line_marker):
write_message('* splitted_citations', verbose=9)
write_message(' * line marker %s' % line_marker, verbose=9)
for citation in splitted_citations:
write_message(" * elements", verbose=9)
for el in citation:
write_message(' * %s %s' % (el['type'], repr(el)), verbose=9)
def parse_reference_line(ref_line, kbs, bad_titles_count={}):
"""Parse one reference line
@input a string representing a single reference bullet
@output parsed references (a list of elements objects)
"""
# Strip the 'marker' (e.g. [1]) from this reference line:
line_marker, ref_line = remove_reference_line_marker(ref_line)
# Find DOI sections in citation
ref_line, identified_dois = identify_and_tag_DOI(ref_line)
# Identify and replace URLs in the line:
ref_line, identified_urls = identify_and_tag_URLs(ref_line)
# Tag <cds.JOURNAL>, etc.
tagged_line, bad_titles_count = tag_reference_line(ref_line,
kbs,
bad_titles_count)
# Debug print tagging (authors, titles, volumes, etc.)
write_message('* tags %r' % tagged_line, verbose=9)
# Using the recorded information, create a MARC XML representation
# of the rebuilt line:
# At the same time, get stats of citations found in the reference line
# (titles, urls, etc):
citation_elements, line_marker, counts = \
parse_tagged_reference_line(line_marker,
tagged_line,
identified_dois,
identified_urls)
# Transformations on elements
split_volume_from_journal(citation_elements)
format_volume(citation_elements)
handle_special_journals(citation_elements, kbs)
format_report_number(citation_elements)
format_author_ed(citation_elements)
look_for_books(citation_elements, kbs)
format_hep(citation_elements)
remove_b_for_nucl_phys(citation_elements)
mangle_volume(citation_elements)
associate_recids(citation_elements)
# Split the reference in multiple ones if needed
splitted_citations = split_citations(citation_elements)
# Look for books in misc field
look_for_undetected_books(splitted_citations, kbs)
# Look for implied ibids
look_for_implied_ibids(splitted_citations)
# Associate recids to the newly added ibids/books
associate_recids_catchup(splitted_citations)
# Remove references with only misc text
# splitted_citations = remove_invalid_references(splitted_citations)
# Merge references with only misc text
# splitted_citations = merge_invalid_references(splitted_citations)
# Find year
add_year_elements(splitted_citations)
# Remove duplicate authors
remove_duplicated_authors(splitted_citations)
# Remove duplicate DOIs
remove_duplicated_dois(splitted_citations)
# Add recid elements
add_recid_elements(splitted_citations)
# For debugging puposes
print_citations(splitted_citations, line_marker)
return splitted_citations, line_marker, counts, bad_titles_count
def look_for_undetected_books(splitted_citations, kbs):
for citation in splitted_citations:
if is_unknown_citation(citation):
search_for_book_in_misc(citation, kbs)
def search_for_book_in_misc(citation, kbs):
"""Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc.
"""
for citation_element in citation:
if citation_element['type'] == 'MISC':
write_message('* Unknown citation found. Searching for book title in: %s' % citation_element['misc_txt'], verbose=9)
for title in kbs['books']:
startIndex = find_substring_ignore_special_chars(citation_element['misc_txt'], title)
if startIndex != -1:
line = kbs['books'][title.upper()]
book_year = line[2].strip(';')
book_authors = line[0]
book_found = False
if citation_element['misc_txt'].find(book_year) != -1:
# For now consider the citation as valid, we are using
# an exact search, we don't need to check the authors
# However, the code below will be useful if we decide
# to introduce fuzzy matching.
book_found = True
for author in get_possible_author_names(citation):
if find_substring_ignore_special_chars(book_authors, author) != -1:
book_found = True
for author in re.findall('[a-zA-Z]{4,}', book_authors):
if find_substring_ignore_special_chars(citation_element['misc_txt'], author) != -1:
book_found = True
if book_found is True:
write_message('* Book found: %s' % title, verbose=9)
book_element = {'type': 'BOOK',
'misc_txt': '',
'authors': book_authors,
'title': line[1],
'year': book_year}
citation.append(book_element)
citation_element['misc_txt'] = cut_substring_with_special_chars(citation_element['misc_txt'], title, startIndex)
return True
write_message(' * Book not found!', verbose=9)
return False
def get_possible_author_names(citation):
for citation_element in citation:
if citation_element['type'] == 'AUTH':
return re.findall('[a-zA-Z]{4,}', citation_element['auth_txt'])
return []
def find_substring_ignore_special_chars(s, substr):
s = s.upper()
substr = substr.upper()
clean_s, subs_in_s = re.subn('[^A-Z0-9]', '', s)
clean_substr, subs_in_substr = re.subn('[^A-Z0-9]', '', substr)
startIndex = clean_s.find(clean_substr)
if startIndex != -1:
i = 0
re_alphanum = re.compile('[A-Z0-9]')
for real_index, char in enumerate(s):
if re_alphanum.match(char):
i += 1
if i > startIndex:
break
return real_index
else:
return -1
def cut_substring_with_special_chars(s, sub, startIndex):
counter = 0
subPosition = 0
s_Upper = s.upper()
sub = sub.upper()
clean_sub = re.sub('[^A-Z0-9]', '', sub)
for char in s_Upper[startIndex:]:
if char == clean_sub[subPosition]:
subPosition += 1
counter += 1
#end of substrin reached?
if subPosition >= len(clean_sub):
#include everything till a space, open bracket or a normal character
counter += len(re.split('[ [{(a-zA-Z0-9]',s[startIndex+counter:],1)[0])
return s[0:startIndex].strip()+ ' ' +s[startIndex+counter:].strip()
def is_unknown_citation(citation):
"""Checks if the citation got recognized as one of the known types.
"""
knownTypes = ['BOOK', 'JOURNAL', 'DOI', 'ISBN', 'RECID']
for citation_element in citation:
if citation_element['type'] in knownTypes:
return False
return True
def parse_references_elements(ref_sect, kbs):
"""Passed a complete reference section, process each line and attempt to
## identify and standardise individual citations within the line.
@param ref_sect: (list) of strings - each string in the list is a
reference line.
@param preprint_repnum_search_kb: (dictionary) - keyed by a tuple
containing the line-number of the pattern in the KB and the non-standard
category string. E.g.: (3, 'ASTRO PH'). Value is regexp pattern used to
search for that report-number.
@param preprint_repnum_standardised_categs: (dictionary) - keyed by non-
standard version of institutional report number, value is the
standardised version of that report number.
@param periodical_title_search_kb: (dictionary) - keyed by non-standard
title to search for, value is the compiled regexp pattern used to
search for that title.
@param standardised_periodical_titles: (dictionary) - keyed by non-
standard title to search for, value is the standardised version of that
title.
@param periodical_title_search_keys: (list) - ordered list of non-
standard titles to search for.
@return: (tuple) of 6 components:
( list -> of strings, each string is a MARC XML-ized reference
line.
integer -> number of fields of miscellaneous text found for the
record.
integer -> number of title citations found for the record.
integer -> number of institutional report-number citations found
for the record.
integer -> number of URL citations found for the record.
integer -> number of DOI's found
integer -> number of author groups found
dictionary -> The totals for each 'bad title' found in the reference
section.
)
"""
# a list to contain the processed reference lines:
citations = []
# counters for extraction stats:
counts = {
'misc': 0,
'title': 0,
'reportnum': 0,
'url': 0,
'doi': 0,
'auth_group': 0,
}
# A dictionary to contain the total count of each 'bad title' found
# in the entire reference section:
bad_titles_count = {}
# process references line-by-line:
for ref_line in ref_sect:
citation_elements, line_marker, this_counts, bad_titles_count = \
parse_reference_line(ref_line, kbs, bad_titles_count)
# Accumulate stats
counts = sum_2_dictionaries(counts, this_counts)
citations.append({'elements' : citation_elements,
'line_marker': line_marker})
# Return the list of processed reference lines:
return citations, counts, bad_titles_count
def parse_tagged_reference_line(line_marker,
line,
identified_dois,
identified_urls):
""" Given a single tagged reference line, convert it to its MARC-XML representation.
Try to find all tags and extract their contents and their types into corresponding
dictionary elements. Append each dictionary tag representation onto a list, which
is given to 'build_formatted_xml_citation()' where the correct xml output will be generated.
This method is dumb, with very few heuristics. It simply looks for tags, and makes dictionaries
from the data it finds in a tagged reference line.
@param line_marker: (string) The line marker for this single reference line (e.g. [19])
@param line: (string) The tagged reference line.
@param identified_dois: (list) a list of dois which were found in this line. The ordering of
dois corresponds to the ordering of tags in the line, reading from left to right.
@param identified_urls: (list) a list of urls which were found in this line. The ordering of
urls corresponds to the ordering of tags in the line, reading from left to right.
@param which format to use for references,
roughly "<title> <volume> <page>" or "<title>,<volume>,<page>"
@return xml_line: (string) the MARC-XML representation of the tagged reference line
@return count_*: (integer) the number of * (pieces of info) found in the reference line.
"""
count_misc = count_title = count_reportnum = count_url = count_doi = count_auth_group = 0
processed_line = line
cur_misc_txt = u""
tag_match = re_tagged_citation.search(processed_line)
# contains a list of dictionary entries of previously cited items
citation_elements = []
# the last tag element found when working from left-to-right across the line
identified_citation_element = None
while tag_match is not None:
# While there are tags inside this reference line...
tag_match_start = tag_match.start()
tag_match_end = tag_match.end()
tag_type = tag_match.group(1)
cur_misc_txt += processed_line[0:tag_match_start]
# Catches both standard titles, and ibid's
if tag_type.find("JOURNAL") != -1:
# This tag is an identified journal TITLE. It should be followed
# by VOLUME, YEAR and PAGE tags.
# See if the found title has been tagged as an ibid: <cds.JOURNALibid>
if tag_match.group('ibid'):
is_ibid = True
closing_tag_length = len(CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID)
idx_closing_tag = processed_line.find(CFG_REFEXTRACT_MARKER_CLOSING_TITLE_IBID,
tag_match_end)
else:
is_ibid = False
closing_tag_length = len(CFG_REFEXTRACT_MARKER_CLOSING_TITLE)
# extract the title from the line:
idx_closing_tag = processed_line.find(CFG_REFEXTRACT_MARKER_CLOSING_TITLE,
tag_match_end)
if idx_closing_tag == -1:
# no closing TITLE tag found - get rid of the solitary tag
processed_line = processed_line[tag_match_end:]
identified_citation_element = None
else:
# Closing tag was found:
# The title text to be used in the marked-up citation:
title_text = processed_line[tag_match_end:idx_closing_tag]
# Now trim this matched title and its tags from the start of the line:
processed_line = processed_line[idx_closing_tag+closing_tag_length:]
numeration_match = re_recognised_numeration_for_title_plus_series.search(processed_line)
if numeration_match:
# recognised numeration immediately after the title - extract it:
reference_volume = numeration_match.group('vol')
reference_year = numeration_match.group('yr') or ''
reference_page = numeration_match.group('pg')
# This is used on two accounts:
# 1. To get the series char from the title, if no series was found with the numeration
# 2. To always remove any series character from the title match text
# series_from_title = re_series_from_title.search(title_text)
#
if numeration_match.group('series'):
reference_volume = numeration_match.group('series') + reference_volume
# Skip past the matched numeration in the working line:
processed_line = processed_line[numeration_match.end():]
# 'id_ibid' saves whether THIS TITLE is an ibid or not. (True or False)
# 'extra_ibids' are there to hold ibid's without the word 'ibid', which
# come directly after this title
# i.e., they are recognised using title numeration instead of ibid notation
identified_citation_element = {'type' : "JOURNAL",
'misc_txt' : cur_misc_txt,
'title' : title_text,
'volume' : reference_volume,
'year' : reference_year,
'page' : reference_page,
'is_ibid' : is_ibid,
'extra_ibids': []
}
count_title += 1
cur_misc_txt = u""
# Try to find IBID's after this title, on top of previously found titles that were
# denoted with the word 'IBID'. (i.e. look for IBID's without the word 'IBID' by
# looking at extra numeration after this title)
numeration_match = re_numeration_no_ibid_txt.match(processed_line)
while numeration_match is not None:
reference_volume = numeration_match.group('vol')
reference_year = numeration_match.group('yr')
reference_page = numeration_match.group('pg')
if numeration_match.group('series'):
reference_volume = numeration_match.group('series') + reference_volume
# Skip past the matched numeration in the working line:
processed_line = processed_line[numeration_match.end():]
# Takes the just found title text
identified_citation_element['extra_ibids'].append(
{'type' : "JOURNAL",
'misc_txt' : "",
'title' : title_text,
'volume' : reference_volume,
'year' : reference_year,
'page' : reference_page,
})
# Increment the stats counters:
count_title += 1
title_text = ""
reference_volume = ""
reference_year = ""
reference_page = ""
numeration_match = re_numeration_no_ibid_txt.match(processed_line)
else:
# No numeration was recognised after the title. Add the title into a MISC item instead:
cur_misc_txt += "%s" % title_text
identified_citation_element = None
elif tag_type == "REPORTNUMBER":
# This tag is an identified institutional report number:
# extract the institutional report-number from the line:
idx_closing_tag = processed_line.find(CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM,
tag_match_end)
# Sanity check - did we find a closing report-number tag?
if idx_closing_tag == -1:
# no closing </cds.REPORTNUMBER> tag found - strip the opening tag and move past this
# recognised reportnumber as it is unreliable:
processed_line = processed_line[tag_match_end:]
identified_citation_element = None
else:
# closing tag was found
report_num = processed_line[tag_match_end:idx_closing_tag]
# now trim this matched institutional report-number
# and its tags from the start of the line:
ending_tag_pos = idx_closing_tag \
+ len(CFG_REFEXTRACT_MARKER_CLOSING_REPORT_NUM)
processed_line = processed_line[ending_tag_pos:]
identified_citation_element = {'type' : "REPORTNUMBER",
'misc_txt' : cur_misc_txt,
'report_num' : report_num}
count_reportnum += 1
cur_misc_txt = u""
elif tag_type == "URL":
# This tag is an identified URL:
# From the "identified_urls" list, get this URL and its
# description string:
url_string = identified_urls[0][0]
url_desc = identified_urls[0][1]
# Now move past this "<cds.URL />"tag in the line:
processed_line = processed_line[tag_match_end:]
# Delete the information for this URL from the start of the list
# of identified URLs:
identified_urls[0:1] = []
# Save the current misc text
identified_citation_element = {
'type' : "URL",
'misc_txt' : "%s" % cur_misc_txt,
'url_string' : "%s" % url_string,
'url_desc' : "%s" % url_desc
}
count_url += 1
cur_misc_txt = u""
elif tag_type == "DOI":
# This tag is an identified DOI:
# From the "identified_dois" list, get this DOI and its
# description string:
doi_string = identified_dois[0]
# Now move past this "<cds.CDS />"tag in the line:
processed_line = processed_line[tag_match_end:]
# Remove DOI from the list of DOI strings
identified_dois[0:1] = []
# SAVE the current misc text
identified_citation_element = {
'type' : "DOI",
'misc_txt' : "%s" % cur_misc_txt,
'doi_string' : "%s" % doi_string
}
# Increment the stats counters:
count_doi += 1
cur_misc_txt = u""
elif tag_type.find("AUTH") != -1:
# This tag is an identified Author:
auth_type = ""
# extract the title from the line:
if tag_type.find("stnd") != -1:
auth_type = "stnd"
idx_closing_tag_nearest = processed_line.find(
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_STND, tag_match_end)
elif tag_type.find("etal") != -1:
auth_type = "etal"
idx_closing_tag_nearest = processed_line.find(
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_ETAL, tag_match_end)
elif tag_type.find("incl") != -1:
auth_type = "incl"
idx_closing_tag_nearest = processed_line.find(
CFG_REFEXTRACT_MARKER_CLOSING_AUTHOR_INCL, tag_match_end)
if idx_closing_tag_nearest == -1:
# no closing </cds.AUTH****> tag found - strip the opening tag
# and move past it
processed_line = processed_line[tag_match_end:]
identified_citation_element = None
else:
auth_txt = processed_line[tag_match_end:idx_closing_tag_nearest]
# Now move past the ending tag in the line:
processed_line = processed_line[idx_closing_tag_nearest + len("</cds.AUTHxxxx>"):]
#SAVE the current misc text
identified_citation_element = {
'type' : "AUTH",
'misc_txt' : "%s" % cur_misc_txt,
'auth_txt' : "%s" % auth_txt,
'auth_type' : "%s" % auth_type
}
# Increment the stats counters:
count_auth_group += 1
cur_misc_txt = u""
# These following tags may be found separately;
# They are usually found when a "JOURNAL" tag is hit
# (ONLY immediately afterwards, however)
# Sitting by themselves means they do not have
# an associated TITLE tag, and should be MISC
elif tag_type == "SER":
# This tag is a SERIES tag; Since it was not preceeded by a TITLE
# tag, it is useless - strip the tag and put it into miscellaneous:
(cur_misc_txt, processed_line) = \
convert_unusable_tag_to_misc(processed_line, cur_misc_txt,
tag_match_end,
CFG_REFEXTRACT_MARKER_CLOSING_SERIES)
identified_citation_element = None
elif tag_type == "VOL":
# This tag is a VOLUME tag; Since it was not preceeded by a TITLE
# tag, it is useless - strip the tag and put it into miscellaneous:
(cur_misc_txt, processed_line) = \
convert_unusable_tag_to_misc(processed_line, cur_misc_txt,
tag_match_end,
CFG_REFEXTRACT_MARKER_CLOSING_VOLUME)
identified_citation_element = None
elif tag_type == "YR":
# This tag is a YEAR tag; Since it's not preceeded by TITLE and
# VOLUME tags, it is useless - strip the tag and put the contents
# into miscellaneous:
(cur_misc_txt, processed_line) = \
convert_unusable_tag_to_misc(processed_line, cur_misc_txt,
tag_match_end,
CFG_REFEXTRACT_MARKER_CLOSING_YEAR)
identified_citation_element = None
elif tag_type == "PG":
# This tag is a PAGE tag; Since it's not preceeded by TITLE,
# VOLUME and YEAR tags, it is useless - strip the tag and put the
# contents into miscellaneous:
(cur_misc_txt, processed_line) = \
convert_unusable_tag_to_misc(processed_line, cur_misc_txt,
tag_match_end,
CFG_REFEXTRACT_MARKER_CLOSING_PAGE)
identified_citation_element = None
elif tag_type == "QUOTED":
identified_citation_element, processed_line, cur_misc_txt = \
map_tag_to_subfield(tag_type,
processed_line[tag_match_end:],
cur_misc_txt,
'title')
elif tag_type == "ISBN":
identified_citation_element, processed_line, cur_misc_txt = \
map_tag_to_subfield(tag_type,
processed_line[tag_match_end:],
cur_misc_txt,
tag_type)
elif tag_type == "PUBLISHER":
identified_citation_element, processed_line, cur_misc_txt = \
map_tag_to_subfield(tag_type,
processed_line[tag_match_end:],
cur_misc_txt,
'publisher')
elif tag_type == "COLLABORATION":
identified_citation_element, processed_line, cur_misc_txt = \
map_tag_to_subfield(tag_type,
processed_line[tag_match_end:],
cur_misc_txt,
'collaboration')
if identified_citation_element:
# Append the found tagged data and current misc text
citation_elements.append(identified_citation_element)
identified_citation_element = None
# Look for the next tag in the processed line:
tag_match = re_tagged_citation.search(processed_line)
# place any remaining miscellaneous text into the
# appropriate MARC XML fields:
cur_misc_txt += processed_line
# This MISC element will hold the entire citation in the event
# that no tags were found.
if len(cur_misc_txt.strip(" .;,")) > 0:
# Increment the stats counters:
count_misc += 1
identified_citation_element = {
'type' : "MISC",
'misc_txt' : "%s" % cur_misc_txt,
}
citation_elements.append(identified_citation_element)
return (citation_elements, line_marker, {
'misc': count_misc,
'title': count_title,
'reportnum': count_reportnum,
'url': count_url,
'doi': count_doi,
'auth_group': count_auth_group
})
def map_tag_to_subfield(tag_type, line, cur_misc_txt, dest):
"""Create a new reference element"""
closing_tag = '</cds.%s>' % tag_type
# extract the institutional report-number from the line:
idx_closing_tag = line.find(closing_tag)
# Sanity check - did we find a closing tag?
if idx_closing_tag == -1:
# no closing </cds.TAG> tag found - strip the opening tag and move past this
# recognised reportnumber as it is unreliable:
identified_citation_element = None
line = line[len('<cds.%s>' % tag_type):]
else:
tag_content = line[:idx_closing_tag]
identified_citation_element = {'type' : tag_type,
'misc_txt' : cur_misc_txt,
dest : tag_content}
ending_tag_pos = idx_closing_tag + len(closing_tag)
line = line[ending_tag_pos:]
cur_misc_txt = u""
return identified_citation_element, line, cur_misc_txt
def convert_unusable_tag_to_misc(line,
misc_text,
tag_match_end,
closing_tag):
"""Function to remove an unwanted, tagged, citation item from a reference
line. The tagged item itself is put into the miscellaneous text variable;
the data up to the closing tag is then trimmed from the beginning of the
working line. For example, the following working line:
Example, AN. Testing software; <cds.YR>(2001)</cds.YR>, CERN, Geneva.
...would be trimmed down to:
, CERN, Geneva.
...And the Miscellaneous text taken from the start of the line would be:
Example, AN. Testing software; (2001)
...(assuming that the details of <cds.YR> and </cds.YR> were passed to
the function).
@param line: (string) - the reference line.
@param misc_text: (string) - the variable containing the miscellaneous
text recorded so far.
@param tag_match_end: (integer) - the index of the end of the opening tag
in the line.
@param closing_tag: (string) - the closing tag to look for in the line
(e.g. </cds.YR>).
@return: (tuple) - containing misc_text (string) and line (string)
"""
# extract the tagged information:
idx_closing_tag = line.find(closing_tag, tag_match_end)
# Sanity check - did we find a closing tag?
if idx_closing_tag == -1:
# no closing tag found - strip the opening tag and move past this
# recognised item as it is unusable:
line = line[tag_match_end:]
else:
# closing tag was found
misc_text += line[tag_match_end:idx_closing_tag]
# now trim the matched item and its tags from the start of the line:
line = line[idx_closing_tag+len(closing_tag):]
return (misc_text, line)
# Tasks related to extraction of reference section from full-text:
# ----> 1. Removing page-breaks, headers and footers before
# searching for reference section:
# ----> 2. Finding reference section in full-text:
# ----> 3. Found reference section - now take out lines and rebuild them:
def remove_leading_garbage_lines_from_reference_section(ref_sectn):
"""Sometimes, the first lines of the extracted references are completely
blank or email addresses. These must be removed as they are not
references.
@param ref_sectn: (list) of strings - the reference section lines
@return: (list) of strings - the reference section without leading
blank lines or email addresses.
"""
p_email = re.compile(ur'^\s*e\-?mail', re.UNICODE)
while ref_sectn and (ref_sectn[0].isspace() or p_email.match(ref_sectn[0])):
ref_sectn.pop(0)
return ref_sectn
# ----> Glue - logic for finding and extracting reference section:
# Tasks related to conversion of full-text to plain-text:
def get_plaintext_document_body(fpath, keep_layout=False):
"""Given a file-path to a full-text, return a list of unicode strings
whereby each string is a line of the fulltext.
In the case of a plain-text document, this simply means reading the
contents in from the file. In the case of a PDF/PostScript however,
this means converting the document to plaintext.
@param fpath: (string) - the path to the fulltext file
@return: (list) of strings - each string being a line in the document.
"""
textbody = []
status = 0
if os.access(fpath, os.F_OK|os.R_OK):
# filepath OK - attempt to extract references:
# get file type:
cmd_pdftotext = [CFG_PATH_GFILE, fpath]
pipe_pdftotext = subprocess.Popen(cmd_pdftotext, stdout=subprocess.PIPE)
res_gfile = pipe_pdftotext.stdout.read()
if (res_gfile.lower().find("text") != -1) and \
(res_gfile.lower().find("pdf") == -1):
# plain-text file: don't convert - just read in:
f = open(fpath, "r")
try:
textbody = [line.decode("utf-8") for line in f.readlines()]
finally:
f.close()
elif (res_gfile.lower().find("pdf") != -1) or \
(res_gfile.lower().find("pdfa") != -1):
# convert from PDF
(textbody, status) = convert_PDF_to_plaintext(fpath, keep_layout)
else:
# invalid format
status = 1
else:
# filepath not OK
status = 1
return (textbody, status)
def parse_references(reference_lines, recid=None, kbs_files=None):
"""Parse a list of references
Given a list of raw reference lines (list of strings),
output the MARC-XML content extracted version
"""
# RefExtract knowledge bases
kbs = get_kbs(custom_kbs_files=kbs_files)
# Identify journal titles, report numbers, URLs, DOIs, and authors...
processed_references, counts, dummy_bad_titles_count = \
parse_references_elements(reference_lines, kbs)
# Generate marc xml using the elements list
fields = build_references(processed_references)
# Generate the xml string to be outputted
return build_record(counts, fields, recid=recid)
|
NCSSM-CS/CSAssess
|
refs/heads/master
|
controller/getAssessment.py
|
1
|
#!/usr/local/bin/python3
"""
created_by: John Fang
created_date: 3/4/2015
last_modified_by: Aninda Manocha
last_modified_date: 3/6/2014
"""
# imports
import json
import utils
from sql.user import User
from sql.assessment import Assessment
from sql.session import Session
#Format of JSON -KP
#requestType: getAssessment
#course: "string"
#name: "string"
#question: integer
#section: "string"
#session: integer
#user: "string"
def iChooseU(form):
thisUser = utils.findUser(form)
# course = form.getlist("course")[0]
name = form.getlist("name")[0]
question = form.getlist("question")[0]
# section = form.getlist("section")[0]
# session = form.getlist("session")[0]
# user = form.getlist("user")[0]
complete = []
count = 0
# if not course == "":
# complete += Assessment.get(0, Course.get(0, course))
# count += 1
if not name == "":
complete += Assessment.get(0, name)
count += 1
if not question == "":
complete += Assessment.get(0, Question.get(0, question))
count += 1
# if not section == "":
# complete += Assessment.get(0, Section.get(0, section))
# count += 1
# if not session == "":
# complete += Assessment.get(0, Session.get(0, session))
# count += 1
# if not user == "":
# complete += Assessment.get(0, User.get(0, None, user))
# count += 1
collect = []
intersect = []
for response in complete:
if collect.count(response) < count:
collect.append(response)
else:
intersect.add(response)
out = {}
out["assessmentList"] = intersect
out["sessionID"] = form.getlist("session")[0]
return json.dumps(out)
|
jylaxp/django
|
refs/heads/master
|
django/contrib/postgres/operations.py
|
374
|
from django.contrib.postgres.signals import register_hstore_handler
from django.db.migrations.operations.base import Operation
class CreateExtension(Operation):
reversible = True
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.vendor != 'postgresql':
return
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % self.name)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("DROP EXTENSION %s" % self.name)
def describe(self):
return "Creates extension %s" % self.name
class HStoreExtension(CreateExtension):
def __init__(self):
self.name = 'hstore'
def database_forwards(self, app_label, schema_editor, from_state, to_state):
super(HStoreExtension, self).database_forwards(app_label, schema_editor, from_state, to_state)
# Register hstore straight away as it cannot be done before the
# extension is installed, a subsequent data migration would use the
# same connection
register_hstore_handler(schema_editor.connection)
class UnaccentExtension(CreateExtension):
def __init__(self):
self.name = 'unaccent'
|
mpdevilleres/tbpc_app
|
refs/heads/master
|
tbpc/contract_mgt/tests/test_models.py
|
1
|
from django.test import TestCase
from .factories import ContractFactory, ContractChangeFactory, ContractorFactory, ContractorContactFactory, \
SupportContractFactory
from ..models import Contract, ContractChange, Contractor, ContractorContact, SupportContract
import pytest
@pytest.mark.django_db
class TestContractModel:
def test_model_instance(self):
model = ContractFactory.create()
assert isinstance(model, Contract) == True
@pytest.mark.django_db
class TestContractChangeModel:
def test_model_instance(self):
model = ContractChangeFactory.create()
assert isinstance(model, ContractChange) == True
@pytest.mark.django_db
class TestContractorModel:
def test_model_instance(self):
model = ContractorFactory.create()
assert isinstance(model, Contractor) == True
@pytest.mark.django_db
class TestContractorContactModel:
def test_model_instance(self):
model = ContractorContactFactory.create()
assert isinstance(model, ContractorContact) == True
@pytest.mark.django_db
class TestContractorContactModel:
def test_model_instance(self):
model = SupportContractFactory.create()
assert isinstance(model, SupportContract) == True
|
liang42hao/bokeh
|
refs/heads/master
|
bokeh/query.py
|
43
|
''' The query module provides functions for searching Bokeh object
graphs for objects that match specified criteria.
Queries are specified as selectors similar to MongoDB style query
selectors.
Examples::
# find all objects with type "grid"
find(p, {'type': 'grid'})
# find all objects with type "grid" or "axis"
find(p, {OR: [
{'type': 'grid'}, {'type': 'axis'}
]})
# same query, using IN operator
find(p, {'type': {IN: ['grid', 'axis']})
# find all plot objects on the 'left' layout of the Plot
list(find(p, {'layout': 'left'}, {'plot': p}))
# find all subplots in column 0
find(p, {type: 'plot', 'column: 0}, {'gridplot': p})
# find all subplots the last row
find(p, {type: 'plot', 'row': -1}, {'gridplot': p})
'''
from __future__ import absolute_import
from six import string_types
class OR(object): pass
class IN(object): pass
class GT(object): pass
class LT(object): pass
class EQ(object): pass
class GEQ(object): pass
class LEQ(object): pass
class NEQ(object): pass
def match(obj, selector, context=None):
''' Test whether a particular object matches a given
selector.
Args:
obj (PlotObject) : object to Test
selector (JSON-like) : query selector
See module docs for details
Returns:
bool : True if the object matches, False otherwise
There are two selector keys that are handled specially. The first
is 'type', which will do an isinstance check::
>>> from bokeh.plotting import line
>>> from bokeh.models import Axis
>>> p = line([1,2,3], [4,5,6])
>>> len(list(p.select({'type': Axis})))
2
There is also a 'tags' attribute that `PlotObject` objects have,
that is a list of user-supplied values. The 'tags' selector key can
be used to query against this list of tags. An object matches if
any of the tags in the selector match any of the tags on the
object::
>>> from bokeh.plotting import line
>>> from bokeh.models import Axis
>>> p = line([1,2,3], [4,5,6])
>>> p.tags = ["my plot", 10]
>>> len(list(p.select({'tags': "my plot"})))
1
>>> len(list(p.select({'tags': ["my plot", 10]})))
1
'''
context = context or {}
for key, val in selector.items():
# test attributes
if isinstance(key, string_types):
# special case 'type'
if key == "type":
# type supports IN, check for that first
if isinstance(val, dict) and list(val.keys()) == [IN]:
if not any(isinstance(obj, x) for x in val[IN]): return False
# otherwise just check the type of the object against val
elif not isinstance(obj, val): return False
# special case 'tag'
elif key == 'tags':
if isinstance(val, string_types):
if val not in obj.tags: return False
else:
try:
if not set(val) & set(obj.tags): return False
except TypeError:
if val not in obj.tags: return False
# if the object doesn't have the attr, it doesn't match
elif not hasattr(obj, key): return False
# if the value to check is a dict, recurse
else:
attr = getattr(obj, key)
if callable(attr):
try:
if not attr(val, **context): return False
except:
return False
elif isinstance(val, dict):
if not match(attr, val, context): return False
else:
if attr != val: return False
# test OR conditionals
elif key is OR:
if not _or(obj, val): return False
# test operands
elif key in _operators:
if not _operators[key](obj, val): return False
else:
raise ValueError("malformed query selector")
return True
def find(objs, selector, context=None):
''' Query an object and all of its contained references
and yield objects that match the given selector.
Args:
obj (PlotObject) : object to query
selector (JSON-like) : query selector
See module docs for details
Yields:
PlotObject : objects that match the query
Examples:
'''
return (obj for obj in objs if match(obj, selector, context))
def _or(obj, selectors):
return any(match(obj, selector) for selector in selectors)
_operators = {
IN: lambda x, y: x in y,
GT: lambda x, y: x > y,
LT: lambda x, y: x < y,
EQ: lambda x, y: x == y,
GEQ: lambda x, y: x >= y,
LEQ: lambda x, y: x <= y,
NEQ: lambda x, y: x != y,
}
|
GeekTrainer/Flask
|
refs/heads/master
|
Work/Trivia - Module 5/env/Lib/site-packages/flask/testsuite/test_apps/flaskext/__init__.py
|
12133432
| |
PayBas/MediaToBBCode
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
adieu/django-nonrel
|
refs/heads/master
|
django/contrib/localflavor/kw/__init__.py
|
12133432
| |
nontas/antonakoscvpr2015
|
refs/heads/master
|
antonakoscvpr2015/__init__.py
|
12133432
| |
memo/tensorflow
|
refs/heads/master
|
tensorflow/examples/tutorials/word2vec/__init__.py
|
12133432
| |
phoebusliang/parallel-lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/modeltests/proxy_model_inheritance/__init__.py
|
12133432
| |
aliyun/oss-ftp
|
refs/heads/master
|
python36/unix/lib/urllib3/contrib/_securetransport/__init__.py
|
12133432
| |
tusbar/django-staticfiles
|
refs/heads/develop
|
staticfiles/tests/apps/no_label/models.py
|
12133432
| |
fffonion/you-get
|
refs/heads/develop
|
src/you_get/cli_wrapper/transcoder/__init__.py
|
12133432
| |
raphaelmerx/django
|
refs/heads/master
|
django/conf/locale/hr/__init__.py
|
12133432
| |
eduNEXT/edunext-platform
|
refs/heads/master
|
cms/djangoapps/api/v1/serializers/__init__.py
|
12133432
| |
dynamicapp/dynamicapp
|
refs/heads/master
|
lib/iOS/dependencies/cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/pdf.py
|
34
|
"""SCons.Tool.pdf
Common PDF Builder definition for various other Tool modules that use it.
Add an explicit action to run epstopdf to convert .eps files to .pdf
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/pdf.py 5023 2010/06/14 22:05:46 scons"
import SCons.Builder
import SCons.Tool
PDFBuilder = None
EpsPdfAction = SCons.Action.Action('$EPSTOPDFCOM', '$EPSTOPDFCOMSTR')
def generate(env):
try:
env['BUILDERS']['PDF']
except KeyError:
global PDFBuilder
if PDFBuilder is None:
PDFBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.PDFLaTeXScanner,
prefix = '$PDFPREFIX',
suffix = '$PDFSUFFIX',
emitter = {},
source_ext_match = None,
single_source=True)
env['BUILDERS']['PDF'] = PDFBuilder
env['PDFPREFIX'] = ''
env['PDFSUFFIX'] = '.pdf'
# put the epstopdf builder in this routine so we can add it after
# the pdftex builder so that one is the default for no source suffix
def generate2(env):
bld = env['BUILDERS']['PDF']
#bld.add_action('.ps', EpsPdfAction) # this is covered by direct Ghostcript action in gs.py
bld.add_action('.eps', EpsPdfAction)
env['EPSTOPDF'] = 'epstopdf'
env['EPSTOPDFFLAGS'] = SCons.Util.CLVar('')
env['EPSTOPDFCOM'] = '$EPSTOPDF $EPSTOPDFFLAGS ${SOURCE} --outfile=${TARGET}'
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
silly-wacky-3-town-toon/SOURCE-COD
|
refs/heads/master
|
otp/distributed/DistributedDirectoryAI.py
|
5
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedDirectoryAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedDirectoryAI")
def setParentingRules(self, todo0, todo1):
pass
|
Zlash65/erpnext
|
refs/heads/develop
|
erpnext/patches/v12_0/delete_priority_property_setter.py
|
8
|
import frappe
def execute():
frappe.db.sql("""
DELETE FROM `tabProperty Setter`
WHERE `tabProperty Setter`.doc_type='Issue'
AND `tabProperty Setter`.field_name='priority'
AND `tabProperty Setter`.property='options'
""")
|
kamenim/samba
|
refs/heads/master
|
python/samba/tests/xattr.py
|
30
|
# Unix SMB/CIFS implementation. Tests for xattr manipulation
# Copyright (C) Matthieu Patou <mat@matws.net> 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.xattr_native and samba.xattr_tdb."""
import samba.xattr_native, samba.xattr_tdb
from samba.xattr import copytree_with_xattrs
from samba.dcerpc import xattr
from samba.ndr import ndr_pack
from samba.tests import (
SkipTest,
TestCase,
TestCaseInTempDir,
)
import random
import shutil
import os
class XattrTests(TestCase):
def _tmpfilename(self):
random.seed()
path = os.environ['SELFTEST_PREFIX']
return os.path.join(path, "pytests"+str(int(100000*random.random())))
def _eadbpath(self):
return os.path.join(os.environ['SELFTEST_PREFIX'], "eadb.tdb")
def test_set_xattr_native(self):
if not samba.xattr_native.is_xattr_supported():
raise SkipTest()
ntacl = xattr.NTACL()
ntacl.version = 1
tempf = self._tmpfilename()
open(tempf, 'w').write("empty")
try:
samba.xattr_native.wrap_setxattr(tempf, "user.unittests",
ndr_pack(ntacl))
except IOError:
raise SkipTest("the filesystem where the tests are runned do not support XATTR")
os.unlink(tempf)
def test_set_and_get_native(self):
if not samba.xattr_native.is_xattr_supported():
raise SkipTest()
tempf = self._tmpfilename()
reftxt = "this is a test"
open(tempf, 'w').write("empty")
try:
samba.xattr_native.wrap_setxattr(tempf, "user.unittests", reftxt)
text = samba.xattr_native.wrap_getxattr(tempf, "user.unittests")
self.assertEquals(text, reftxt)
except IOError:
raise SkipTest("the filesystem where the tests are runned do not support XATTR")
os.unlink(tempf)
def test_set_xattr_tdb(self):
tempf = self._tmpfilename()
eadb_path = self._eadbpath()
ntacl = xattr.NTACL()
ntacl.version = 1
open(tempf, 'w').write("empty")
try:
samba.xattr_tdb.wrap_setxattr(eadb_path,
tempf, "user.unittests", ndr_pack(ntacl))
finally:
os.unlink(tempf)
os.unlink(eadb_path)
def test_set_tdb_not_open(self):
tempf = self._tmpfilename()
ntacl = xattr.NTACL()
ntacl.version = 1
open(tempf, 'w').write("empty")
try:
self.assertRaises(IOError, samba.xattr_tdb.wrap_setxattr,
os.path.join("nonexistent", "eadb.tdb"), tempf,
"user.unittests", ndr_pack(ntacl))
finally:
os.unlink(tempf)
def test_set_and_get_tdb(self):
tempf = self._tmpfilename()
eadb_path = self._eadbpath()
reftxt = "this is a test"
open(tempf, 'w').write("empty")
try:
samba.xattr_tdb.wrap_setxattr(eadb_path, tempf, "user.unittests",
reftxt)
text = samba.xattr_tdb.wrap_getxattr(eadb_path, tempf,
"user.unittests")
self.assertEquals(text, reftxt)
finally:
os.unlink(tempf)
os.unlink(eadb_path)
class TestCopyTreeWithXattrs(TestCaseInTempDir):
def test_simple(self):
os.chdir(self.tempdir)
os.mkdir("a")
os.mkdir("a/b")
os.mkdir("a/b/c")
f = open('a/b/c/d', 'w')
try:
f.write("foo")
finally:
f.close()
copytree_with_xattrs("a", "b")
shutil.rmtree("a")
shutil.rmtree("b")
|
ashishbaghudana/mthesis-ashish
|
refs/heads/develop
|
resources/tees/Detectors/UnmergingDetector.py
|
2
|
from SingleStageDetector import SingleStageDetector
from ExampleBuilders.UnmergingExampleBuilder import UnmergingExampleBuilder
from ExampleWriters.UnmergingExampleWriter import UnmergingExampleWriter
from Classifiers.SVMMultiClassClassifier import SVMMultiClassClassifier
from Evaluators.AveragingMultiClassEvaluator import AveragingMultiClassEvaluator
import itertools, sys, os
class UnmergingDetector(SingleStageDetector):
"""
Makes valid argument combinations for BioNLP type events.
"""
def __init__(self):
SingleStageDetector.__init__(self)
self.exampleBuilder = UnmergingExampleBuilder
self.exampleWriter = UnmergingExampleWriter()
self.Classifier = SVMMultiClassClassifier
self.evaluator = AveragingMultiClassEvaluator
self.tag = "unmerging-"
def buildExamples(self, model, datas, outputs, golds=[], exampleStyle=None, saveIdsToModel=False, parse=None):
if exampleStyle == None:
exampleStyle = model.getStr(self.tag+"example-style")
if parse == None:
parse = self.getStr(self.tag+"parse", model)
self.structureAnalyzer.load(model)
self.exampleBuilder.structureAnalyzer = self.structureAnalyzer
for data, output, gold in itertools.izip_longest(datas, outputs, golds, fillvalue=[]):
print >> sys.stderr, "Example generation for", output
if not isinstance(data, (list, tuple)): data = [data]
if not isinstance(gold, (list, tuple)): gold = [gold]
append = False
for dataSet, goldSet in itertools.izip_longest(data, gold, fillvalue=None):
if goldSet == None:
goldSet = dataSet
if dataSet != None:
self.exampleBuilder.run(dataSet, output, parse, None, exampleStyle, model.get(self.tag+"ids.classes",
True), model.get(self.tag+"ids.features", True), goldSet, append, saveIdsToModel,
structureAnalyzer=self.structureAnalyzer)
append = True
if saveIdsToModel:
model.save()
|
robbriggs/googletest
|
refs/heads/master
|
test/gtest_color_test.py
|
3259
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
kyroskoh/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_color_test.py
|
3259
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
wfleurant/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/mac/gyptest-postbuild.py
|
345
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that postbuild steps work.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='postbuilds')
test.build('test.gyp', test.ALL, chdir='postbuilds')
# See comment in test/subdirectory/gyptest-subdir-default.py
if test.format == 'xcode':
chdir = 'postbuilds/subdirectory'
else:
chdir = 'postbuilds'
# Created by the postbuild scripts
test.built_file_must_exist('el.a_touch',
type=test.STATIC_LIB,
chdir='postbuilds')
test.built_file_must_exist('el.a_gyp_touch',
type=test.STATIC_LIB,
chdir='postbuilds')
test.built_file_must_exist('nest_el.a_touch',
type=test.STATIC_LIB,
chdir=chdir)
test.built_file_must_exist(
'dyna.framework/Versions/A/dyna_touch',
chdir='postbuilds')
test.built_file_must_exist(
'dyna.framework/Versions/A/dyna_gyp_touch',
chdir='postbuilds')
test.built_file_must_exist(
'nest_dyna.framework/Versions/A/nest_dyna_touch',
chdir=chdir)
test.built_file_must_exist('dyna_standalone.dylib_gyp_touch',
type=test.SHARED_LIB,
chdir='postbuilds')
test.built_file_must_exist('copied_file.txt', chdir='postbuilds')
test.built_file_must_exist('copied_file_2.txt', chdir=chdir)
test.pass_test()
|
rtc-draper/googlemock
|
refs/heads/master
|
scripts/generator/gmock_gen.py
|
1159
|
#!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Driver for starting up Google Mock class generator."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
if __name__ == '__main__':
# Add the directory of this script to the path so we can import gmock_class.
sys.path.append(os.path.dirname(__file__))
from cpp import gmock_class
# Fix the docstring in case they require the usage.
gmock_class.__doc__ = gmock_class.__doc__.replace('gmock_class.py', __file__)
gmock_class.main()
|
LostItem/roundware-server
|
refs/heads/develop
|
roundware/notifications/admin.py
|
9
|
from __future__ import unicode_literals
from django.contrib import admin
from roundware.notifications.models import ModelNotification, ActionNotification
class ActionNotificationInline(admin.TabularInline):
model = ActionNotification
filter_horizontal = ['who']
fieldsets = (
("Notification Options", {'fields': ('active', 'action')}),
("Email Options", {'fields': ('who', 'subject', 'message')})
)
class ModelNotificationAdmin(admin.ModelAdmin):
inlines = [ActionNotificationInline]
list_filter = ('active', 'project')
list_display = ('__unicode__', 'active',)
list_editable = ('active',)
admin.site.register(ModelNotification, admin_class=ModelNotificationAdmin)
|
byterom/android_external_chromium_org
|
refs/heads/12.1
|
tools/perf/page_sets/intl_ja_zh.py
|
34
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class IntlJaZhPage(page_module.Page):
def __init__(self, url, page_set):
super(IntlJaZhPage, self).__init__(url=url, page_set=page_set)
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/intl_ja_zh.json'
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class IntlJaZhPageSet(page_set_module.PageSet):
""" Popular pages in Japanese and Chinese. """
def __init__(self):
super(IntlJaZhPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/intl_ja_zh.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
# Why: #5 Japanese site
'http://www.amazon.co.jp',
'http://mixi.jp/',
'http://dtiblog.com/',
'http://2ch.net/',
'http://jugem.jp/',
'http://hatena.ne.jp/',
'http://goo.ne.jp/',
# Why: #1 Japanese site
'http://www.yahoo.co.jp/',
# Why: #3 Japanese site
'http://fc2.com/ja/',
'http://kakaku.com/',
'http://zol.com.cn/',
'http://cn.yahoo.com/',
# Why: #1 Chinese site
'http://www.baidu.com/s?wd=%D0%C2%20%CE%C5',
# Why: #2 Chinese site
'http://www.qq.com/',
# Why: #3 Chinese site
'http://www.taobao.com/index_global.php',
# Why: #4 Chinese site
'http://www.sina.com.cn/',
# Why: #5 Chinese site
# pylint: disable=C0301
'http://www.google.com.hk/#q=%E9%82%84%E6%8F%90%E4%BE%9B&fp=c44d333e710cb480',
'http://udn.com/NEWS/mainpage.shtml',
'http://ruten.com.tw/'
]
for url in urls_list:
self.AddPage(IntlJaZhPage(url, self))
|
petecummings/django-blog-zinnia
|
refs/heads/develop
|
docs/extensions/zinnia_docs.py
|
12
|
"""
Extensions for the Sphinx documation of Zinnia
Inspired, stealed and needed for
cross linking the django documentation.
"""
import inspect
from django.db import models
from django.utils.html import strip_tags
from django.utils.encoding import force_unicode
def skip_model_member(app, what, name, obj, skip, options):
# These fields always fails !
if name in ('tags', 'image'):
return True
return skip
def process_model_docstring(app, what, name, obj, options, lines):
if inspect.isclass(obj) and issubclass(obj, models.Model):
for field in obj._meta.fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
lines.append(':param %s: %s' % (field.attname, help_text))
else:
lines.append(':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(':type %s: %s' % (field.attname,
type(field).__name__))
# Return the extended docstring
return lines
def setup(app):
app.add_crossref_type(
directivename = 'setting',
rolename = 'setting',
indextemplate = 'pair: %s; setting',
)
app.add_crossref_type(
directivename = 'templatetag',
rolename = 'ttag',
indextemplate = 'pair: %s; template tag'
)
app.add_crossref_type(
directivename = 'templatefilter',
rolename = 'tfilter',
indextemplate = 'pair: %s; template filter'
)
app.connect('autodoc-process-docstring',
process_model_docstring)
app.connect('autodoc-skip-member',
skip_model_member)
|
HKUST-SING/tensorflow
|
refs/heads/master
|
tensorflow/python/framework/graph_io.py
|
62
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for reading/writing graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
def write_graph(graph_or_graph_def, logdir, name, as_text=True):
"""Writes a graph proto to a file.
The graph is written as a binary proto unless `as_text` is `True`.
```python
v = tf.Variable(0, name='my_variable')
sess = tf.Session()
tf.train.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')
```
or
```python
v = tf.Variable(0, name='my_variable')
sess = tf.Session()
tf.train.write_graph(sess.graph, '/tmp/my-model', 'train.pbtxt')
```
Args:
graph_or_graph_def: A `Graph` or a `GraphDef` protocol buffer.
logdir: Directory where to write the graph. This can refer to remote
filesystems, such as Google Cloud Storage (GCS).
name: Filename for the graph.
as_text: If `True`, writes the graph as an ASCII proto.
Returns:
The path of the output proto file.
"""
if isinstance(graph_or_graph_def, ops.Graph):
graph_def = graph_or_graph_def.as_graph_def()
else:
graph_def = graph_or_graph_def
# gcs does not have the concept of directory at the moment.
if not file_io.file_exists(logdir) and not logdir.startswith('gs:'):
file_io.recursive_create_dir(logdir)
path = os.path.join(logdir, name)
if as_text:
file_io.atomic_write_string_to_file(path, str(graph_def))
else:
file_io.atomic_write_string_to_file(path, graph_def.SerializeToString())
return path
|
runt18/mojo
|
refs/heads/master
|
mojo/public/python/mojo_application/application_delegate.py
|
28
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Interface for the delegate of ApplicationImpl."""
import mojo_application.application_impl
import mojo_application.service_provider_impl
import shell_mojom
import mojo_system
# pylint: disable=unused-argument
class ApplicationDelegate:
def Initialize(self, shell, application):
"""
Called from ApplicationImpl's Initialize() method.
"""
pass
def OnAcceptConnection(self,
requestor_url,
resolved_url,
service_provider,
exposed_services):
"""
Called from ApplicationImpl's OnAcceptConnection() method. Returns a bool
indicating whether this connection should be accepted.
"""
return False
|
zoidbergwill/Diamond
|
refs/heads/master
|
src/collectors/nagios/nagios.py
|
31
|
# coding=utf-8
"""
Shells out to get nagios statistics, which may or may not require sudo access
#### Dependencies
* /usr/sbin/nagios3stats
"""
import diamond.collector
import subprocess
import os
from diamond.collector import str_to_bool
class NagiosStatsCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(NagiosStatsCollector,
self).get_default_config_help()
config_help.update({
'bin': 'Path to nagios3stats binary',
'vars': 'What vars to collect',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NagiosStatsCollector, self).get_default_config()
config.update({
'bin': '/usr/sbin/nagios3stats',
'vars': ['AVGACTHSTLAT',
'AVGACTSVCLAT',
'AVGACTHSTEXT',
'AVGACTSVCEXT',
'NUMHSTUP',
'NUMHSTDOWN',
'NUMHSTUNR',
'NUMSVCOK',
'NUMSVCWARN',
'NUMSVCUNKN',
'NUMSVCCRIT',
'NUMHSTACTCHK5M',
'NUMHSTPSVCHK5M',
'NUMSVCACTCHK5M',
'NUMSVCPSVCHK5M',
'NUMACTHSTCHECKS5M',
'NUMOACTHSTCHECKS5M',
'NUMCACHEDHSTCHECKS5M',
'NUMSACTHSTCHECKS5M',
'NUMPARHSTCHECKS5M',
'NUMSERHSTCHECKS5M',
'NUMPSVHSTCHECKS5M',
'NUMACTSVCCHECKS5M',
'NUMOACTSVCCHECKS5M',
'NUMCACHEDSVCCHECKS5M',
'NUMSACTSVCCHECKS5M',
'NUMPSVSVCCHECKS5M'],
'use_sudo': True,
'sudo_cmd': '/usr/bin/sudo',
'path': 'nagiosstats'
})
return config
def collect(self):
if ((not os.access(self.config['bin'], os.X_OK) or
(str_to_bool(self.config['use_sudo']) and
not os.access(self.config['sudo_cmd'], os.X_OK)))):
return
command = [self.config['bin'],
'--data', ",".join(self.config['vars']),
'--mrtg']
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
p = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0][:-1]
for i, v in enumerate(p.split("\n")):
metric_name = self.config['vars'][i]
metric_value = int(v)
self.publish(metric_name, metric_value)
|
time-river/wander
|
refs/heads/master
|
proxy/proxyclaw.py
|
1
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import redis
import claw
from base import redis_info
def main():
info = redis_info()
r = redis.StrictRedis(host=info['host'], port=info['port'], password=info['password'])
clawer = claw.Claw(redis=r, request_key='proxy.request', content_key="proxy.content")
clawer.main()
if __name__ == '__main__':
main()
|
ar4s/django
|
refs/heads/master
|
tests/field_subclassing/tests.py
|
16
|
from __future__ import unicode_literals
import inspect
from django.core import serializers
from django.test import TestCase
from .fields import Small
from .models import DataModel, MyModel, OtherModel
class CustomField(TestCase):
def test_defer(self):
d = DataModel.objects.create(data=[1, 2, 3])
self.assertIsInstance(d.data, list)
d = DataModel.objects.get(pk=d.pk)
self.assertIsInstance(d.data, list)
self.assertEqual(d.data, [1, 2, 3])
d = DataModel.objects.defer("data").get(pk=d.pk)
self.assertIsInstance(d.data, list)
self.assertEqual(d.data, [1, 2, 3])
# Refetch for save
d = DataModel.objects.defer("data").get(pk=d.pk)
d.save()
d = DataModel.objects.get(pk=d.pk)
self.assertIsInstance(d.data, list)
self.assertEqual(d.data, [1, 2, 3])
def test_custom_field(self):
# Creating a model with custom fields is done as per normal.
s = Small(1, 2)
self.assertEqual(str(s), "12")
m = MyModel.objects.create(name="m", data=s)
# Custom fields still have normal field's attributes.
self.assertEqual(m._meta.get_field("data").verbose_name, "small field")
# The m.data attribute has been initialised correctly. It's a Small
# object.
self.assertEqual((m.data.first, m.data.second), (1, 2))
# The data loads back from the database correctly and 'data' has the
# right type.
m1 = MyModel.objects.get(pk=m.pk)
self.assertIsInstance(m1.data, Small)
self.assertEqual(str(m1.data), "12")
# We can do normal filtering on the custom field (and will get an error
# when we use a lookup type that does not make sense).
s1 = Small(1, 3)
s2 = Small("a", "b")
self.assertQuerysetEqual(
MyModel.objects.filter(data__in=[s, s1, s2]), [
"m",
],
lambda m: m.name,
)
self.assertRaises(TypeError, lambda: MyModel.objects.filter(data__lt=s))
# Serialization works, too.
stream = serializers.serialize("json", MyModel.objects.all())
self.assertJSONEqual(stream, [{
"pk": m1.pk,
"model": "field_subclassing.mymodel",
"fields": {"data": "12", "name": "m"}
}])
obj = list(serializers.deserialize("json", stream))[0]
self.assertEqual(obj.object, m)
# Test retrieving custom field data
m.delete()
m1 = MyModel.objects.create(name="1", data=Small(1, 2))
MyModel.objects.create(name="2", data=Small(2, 3))
self.assertQuerysetEqual(
MyModel.objects.all(), [
"12",
"23",
],
lambda m: str(m.data),
ordered=False
)
def test_field_subclassing(self):
o = OtherModel.objects.create(data=Small("a", "b"))
o = OtherModel.objects.get()
self.assertEqual(o.data.first, "a")
self.assertEqual(o.data.second, "b")
def test_subfieldbase_plays_nice_with_module_inspect(self):
"""
Custom fields should play nice with python standard module inspect.
http://users.rcn.com/python/download/Descriptor.htm#properties
"""
# Even when looking for totally different properties, SubfieldBase's
# non property like behaviour made inspect crash. Refs #12568.
data = dict(inspect.getmembers(MyModel))
self.assertIn('__module__', data)
self.assertEqual(data['__module__'], 'field_subclassing.models')
|
MrCubanfrog/NorDB
|
refs/heads/master
|
nordb/database/sql2response.py
|
1
|
"""
This module contains all information for getting the response information out
of the database.
Functions and Classes
---------------------
"""
import datetime
import time
from nordb.core import usernameUtilities
from nordb.nordic.response import FapResponse, PazResponse
SELECT_RESPONSE_ID = (
"SELECT "
" file_name, source, stage, description, "
" format, author, id "
"FROM "
" response "
"WHERE "
" id = %s"
)
SELECT_FAP = (
"SELECT "
" frequency, amplitude, phase, amplitude_error, phase_error "
"FROM "
" fap, fap_response "
"WHERE "
" response_id = %s AND "
" fap.fap_id = fap_response.id "
"ORDER BY "
" frequency "
)
SELECT_PAZ = (
"SELECT "
" scale_factor "
"FROM "
" paz_response, response "
"WHERE "
" paz_response.response_id = response.id "
"AND "
" response.id = %s"
)
SELECT_POLES = (
"SELECT "
" real, imag, real_error, imag_error "
"FROM "
" pole, paz_response, response "
"WHERE "
" response.id = %s AND "
" pole.paz_id = paz_response.id AND "
" paz_response.response_id = response.id "
"ORDER BY "
" real "
)
SELECT_ZEROS = (
"SELECT "
" real, imag, real_error, imag_error "
"FROM "
" zero, paz_response, response "
"WHERE "
" response.id = %s AND "
" zero.paz_id = paz_response.id AND "
" paz_response.response_id = response.id "
"ORDER BY "
" ABS(real) DESC"
)
SELECT_RESPONSE = (
"SELECT "
" response.id "
"FROM "
" response, instrument, sitechan, station, sensor "
"WHERE "
" response.id = instrument.response_id AND "
" instrument.id = sensor.instrument_id AND "
" sensor.sitechan_id = sitechan.id AND "
" sitechan.station_id = station.id AND "
" station_code = %s AND "
" sitechan.channel_code = %s AND "
" ("
" (sensor.time <= %s AND sensor.endtime >= %s) "
" OR "
" (sensor.time <= %s AND sensor.endtime IS NULL) "
" )"
)
SELECT_RESPONSES = (
"SELECT "
" response.file_name, response.source, "
" response.stage, response.description, "
" response.format, response.author, response.id "
"FROM "
" response "
"WHERE "
" response.id IN %(response_ids)s"
)
SELECT_FAPS = (
"SELECT "
" frequency, amplitude, phase, amplitude_error, phase_error, "
" response_id "
"FROM "
" fap, fap_response "
"WHERE "
" response_id IN %(response_ids)s AND "
" fap.fap_id = fap_response.id "
"ORDER BY "
" frequency "
)
SELECT_PAZS = (
"SELECT "
" scale_factor, response_id "
"FROM "
" paz_response, response "
"WHERE "
" paz_response.response_id = response.id "
"AND "
" response.id IN %(response_ids)s"
)
SELECT_ALL_POLES = (
"SELECT "
" real, imag, real_error, imag_error, response_id "
"FROM "
" pole, paz_response, response "
"WHERE "
" response.id IN %(response_ids)s AND "
" pole.paz_id = paz_response.id AND "
" paz_response.response_id = response.id "
"ORDER BY "
" real "
)
SELECT_ALL_ZEROS = (
"SELECT "
" real, imag, real_error, imag_error, response_id "
"FROM "
" zero, paz_response, response "
"WHERE "
" response.id IN %(response_ids)s AND "
" zero.paz_id = paz_response.id AND "
" paz_response.response_id = response.id "
"ORDER BY "
" real"
)
def responses2instruments(instruments, db_conn = None):
"""
Function for attaching responses to instrument information
:param list instruments: List of instruments to which the responses will be attached to
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
response_ids = []
for instrument in instruments:
response_ids.append(instrument.response_id)
response_ids = tuple(response_ids)
if len(response_ids) == 0:
if db_conn is None:
conn.close()
return
cur = conn.cursor()
cur.execute(SELECT_RESPONSES, {'response_ids':response_ids})
ans = cur.fetchall()
responses = []
response_ids = []
for a in ans:
response_ids.append(a[-1])
response_ids = tuple(response_ids)
cur.execute(SELECT_FAPS, {'response_ids':response_ids})
fap_resp = cur.fetchall()
cur.execute(SELECT_PAZS, {'response_ids':response_ids})
paz_resp = cur.fetchall()
cur.execute(SELECT_ALL_POLES, {'response_ids':response_ids})
poles_resp = cur.fetchall()
cur.execute(SELECT_ALL_ZEROS, {'response_ids':response_ids})
zeros_resp = cur.fetchall()
for resp in ans:
for instrument in instruments:
if instrument.response_id == resp[-1]:
if resp[4] == 'fap':
faps = []
for f in fap_resp:
if f[-1] == resp[-1]:
faps.append(f[:-1])
instrument.response = FapResponse(resp, faps)
elif resp[4] == 'paz':
poles = []
zeros = []
for pole in poles_resp:
if pole[-1] == resp[-1]:
poles.append(pole[:-1])
for zero in zeros_resp:
if zero[-1] == resp[-1]:
zeros.append(zero[:-1])
for paz in paz_resp:
if paz[-1] == resp[-1]:
instrument.response = PazResponse(resp,
paz[0],
poles,
zeros)
break
if db_conn is None:
conn.close()
def getResponseFromDB(response_id, db_conn = None):
"""
Function for reading a response from database by id
:param int response_id: id of the Response wanted
:returns: :class:`PazResponse` or :class:`FapResponse` object
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
cur = conn.cursor()
response = None
cur.execute(SELECT_RESPONSE_ID, (response_id, ))
response_data = cur.fetchone()
if response_data is None:
return None
if response_data[FapResponse.RESPONSE_FORMAT] == 'fap':
cur.execute(SELECT_FAP, (response_id,))
fap = cur.fetchall()
response = FapResponse(response_data, fap)
elif response_data[FapResponse.RESPONSE_FORMAT] == 'paz':
cur.execute(SELECT_PAZ, (response_id,))
scale_factor = cur.fetchone()[0]
cur.execute(SELECT_POLES, (response_id,))
poles = cur.fetchall()
cur.execute(SELECT_ZEROS, (response_id,))
zeros = cur.fetchall()
response = PazResponse(response_data, scale_factor, poles, zeros)
if db_conn is None:
conn.close()
return response
def getResponse(station, channel, date=datetime.datetime.now(), db_conn = None):
"""
Function for getting response information from the database.
:param string station: Station code of the station
:param string channel: Channel code of the channel
:param datetime date: date for which you want the response
:returns: Response object
"""
if db_conn is None:
conn = usernameUtilities.log2nordb()
else:
conn = db_conn
cur = conn.cursor()
timestamp = time.mktime(date.timetuple())
cur.execute(SELECT_RESPONSE, (station, channel, timestamp, timestamp,
timestamp))
resp_id = cur.fetchone()
if resp_id is None:
return None
response = getResponseFromDB(resp_id[0], conn)
if db_conn is None:
conn.close()
return response
|
yubchen/QSublimeLinter
|
refs/heads/master
|
tests/__init__.py
|
10
|
# coding=utf8
#
# __init__.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Joshua Hagins
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module is a dummy for the root test module."""
|
Thoshh/wapad
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/webdesign/__init__.py
|
264
|
import warnings
from django.utils.deprecation import RemovedInDjango110Warning
default_app_config = 'django.contrib.webdesign.apps.WebDesignConfig'
warnings.warn(
"django.contrib.webdesign will be removed in Django 1.10. The "
"{% lorem %} tag is now included in the built-in tags.",
RemovedInDjango110Warning
)
|
lmtierney/selenium
|
refs/heads/master
|
py/test/selenium/__init__.py
|
2454
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
franosincic/edx-platform
|
refs/heads/master
|
common/djangoapps/util/model_utils.py
|
37
|
"""
Utilities for django models.
"""
import unicodedata
import re
from eventtracking import tracker
from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django_countries.fields import Country
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
USER_SETTINGS_CHANGED_EVENT_NAME = u'edx.user.settings.changed'
def get_changed_fields_dict(instance, model_class):
"""
Helper method for tracking field changes on a model.
Given a model instance and class, return a dict whose keys are that
instance's fields which differ from the last saved ones and whose values
are the old values of those fields. Related fields are not considered.
Args:
instance (Model instance): the model instance with changes that are
being tracked
model_class (Model class): the class of the model instance we are
tracking
Returns:
dict: a mapping of field names to current database values of those
fields, or an empty dict if the model is new
"""
try:
old_model = model_class.objects.get(pk=instance.pk)
except model_class.DoesNotExist:
# Object is new, so fields haven't technically changed. We'll return
# an empty dict as a default value.
return {}
else:
# We want to compare all of the scalar fields on the model, but none of
# the relations.
field_names = [f.name for f in model_class._meta.get_fields() if not f.is_relation] # pylint: disable=protected-access
changed_fields = {
field_name: getattr(old_model, field_name) for field_name in field_names
if getattr(old_model, field_name) != getattr(instance, field_name)
}
return changed_fields
def emit_field_changed_events(instance, user, db_table, excluded_fields=None, hidden_fields=None):
"""Emits a settings changed event for each field that has changed.
Note that this function expects that a `_changed_fields` dict has been set
as an attribute on `instance` (see `get_changed_fields_dict`.
Args:
instance (Model instance): the model instance that is being saved
user (User): the user that this instance is associated with
db_table (str): the name of the table that we're modifying
excluded_fields (list): a list of field names for which events should
not be emitted
hidden_fields (list): a list of field names specifying fields whose
values should not be included in the event (None will be used
instead)
Returns:
None
"""
def clean_field(field_name, value):
"""
Prepare a field to be emitted in a JSON serializable format. If
`field_name` is a hidden field, return None.
"""
if field_name in hidden_fields:
return None
# Country is not JSON serializable. Return the country code.
if isinstance(value, Country):
if value.code:
return value.code
else:
return None
return value
excluded_fields = excluded_fields or []
hidden_fields = hidden_fields or []
changed_fields = getattr(instance, '_changed_fields', {})
for field_name in changed_fields:
if field_name not in excluded_fields:
old_value = clean_field(field_name, changed_fields[field_name])
new_value = clean_field(field_name, getattr(instance, field_name))
emit_setting_changed_event(user, db_table, field_name, old_value, new_value)
# Remove the now inaccurate _changed_fields attribute.
if hasattr(instance, '_changed_fields'):
del instance._changed_fields
def truncate_fields(old_value, new_value):
"""
Truncates old_value and new_value for analytics event emission if necessary.
Args:
old_value(obj): the value before the change
new_value(obj): the new value being saved
Returns:
a dictionary with the following fields:
'old': the truncated old value
'new': the truncated new value
'truncated': the list of fields that have been truncated
"""
# Compute the maximum value length so that two copies can fit into the maximum event size
# in addition to all the other fields recorded.
max_value_length = settings.TRACK_MAX_EVENT / 4
serialized_old_value, old_was_truncated = _get_truncated_setting_value(old_value, max_length=max_value_length)
serialized_new_value, new_was_truncated = _get_truncated_setting_value(new_value, max_length=max_value_length)
truncated_values = []
if old_was_truncated:
truncated_values.append("old")
if new_was_truncated:
truncated_values.append("new")
return {'old': serialized_old_value, 'new': serialized_new_value, 'truncated': truncated_values}
def emit_setting_changed_event(user, db_table, setting_name, old_value, new_value):
"""Emits an event for a change in a setting.
Args:
user (User): the user that this setting is associated with.
db_table (str): the name of the table that we're modifying.
setting_name (str): the name of the setting being changed.
old_value (object): the value before the change.
new_value (object): the new value being saved.
Returns:
None
"""
truncated_fields = truncate_fields(old_value, new_value)
truncated_fields['setting'] = setting_name
truncated_fields['user_id'] = user.id
truncated_fields['table'] = db_table
tracker.emit(
USER_SETTINGS_CHANGED_EVENT_NAME,
truncated_fields
)
def _get_truncated_setting_value(value, max_length=None):
"""
Returns the truncated form of a setting value.
Returns:
truncated_value (object): the possibly truncated version of the value.
was_truncated (bool): returns true if the serialized value was truncated.
"""
if isinstance(value, basestring) and max_length is not None and len(value) > max_length:
return value[0:max_length], True
else:
return value, False
# Taken from Django 1.8 source code because it's not supported in 1.4
def slugify(value):
"""Converts value into a string suitable for readable URLs.
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, or hyphens. Converts to lowercase.
Also strips leading and trailing whitespace.
Args:
value (string): String to slugify.
"""
value = force_unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
|
gonboy/sl4a
|
refs/heads/master
|
python-build/python-libs/gdata/src/gdata/tlslite/utils/cryptomath.py
|
172
|
"""cryptomath module
This module has basic math/crypto code."""
import os
import math
import base64
import binascii
import sha
from compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
# Try to load cryptlib
try:
import cryptlib_py
try:
cryptlib_py.cryptInit()
except cryptlib_py.CryptException, e:
#If tlslite and cryptoIDlib are both present,
#they might each try to re-initialize this,
#so we're tolerant of that.
if e[0] != cryptlib_py.CRYPT_ERROR_INITED:
raise
cryptlibpyLoaded = True
except ImportError:
cryptlibpyLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Get os.urandom PRNG
try:
os.urandom(1)
def getRandomBytes(howMany):
return stringToBytes(os.urandom(howMany))
prngName = "os.urandom"
except:
# Else get cryptlib PRNG
if cryptlibpyLoaded:
def getRandomBytes(howMany):
randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED,
cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(randomKey,
cryptlib_py.CRYPT_CTXINFO_MODE,
cryptlib_py.CRYPT_MODE_OFB)
cryptlib_py.cryptGenerateKey(randomKey)
bytes = createByteArrayZeros(howMany)
cryptlib_py.cryptEncrypt(randomKey, bytes)
return bytes
prngName = "cryptlib"
else:
#Else get UNIX /dev/urandom PRNG
try:
devRandomFile = open("/dev/urandom", "rb")
def getRandomBytes(howMany):
return stringToBytes(devRandomFile.read(howMany))
prngName = "/dev/urandom"
except IOError:
#Else get Win32 CryptoAPI PRNG
try:
import win32prng
def getRandomBytes(howMany):
s = win32prng.getRandomBytes(howMany)
if len(s) != howMany:
raise AssertionError()
return stringToBytes(s)
prngName ="CryptoAPI"
except ImportError:
#Else no PRNG :-(
def getRandomBytes(howMany):
raise NotImplementedError("No Random Number Generator "\
"available.")
prngName = "None"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(bytes):
total = 0L
multiplier = 1L
for count in range(len(bytes)-1, -1, -1):
byte = bytes[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToBytes(n):
howManyBytes = numBytes(n)
bytes = createByteArrayZeros(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
bytes[count] = int(n % 256)
n >>= 8
return bytes
def bytesToBase64(bytes):
s = bytesToString(bytes)
return stringToBase64(s)
def base64ToBytes(s):
s = base64ToString(s)
return stringToBytes(s)
def numberToBase64(n):
bytes = numberToBytes(n)
return bytesToBase64(bytes)
def base64ToNumber(s):
bytes = base64ToBytes(s)
return bytesToNumber(bytes)
def stringToNumber(s):
bytes = stringToBytes(s)
return bytesToNumber(bytes)
def numberToString(s):
bytes = numberToBytes(s)
return bytesToString(bytes)
def base64ToString(s):
try:
return base64.decodestring(s)
except binascii.Error, e:
raise SyntaxError(e)
except binascii.Incomplete, e:
raise SyntaxError(e)
def stringToBase64(s):
return base64.encodestring(s).replace("\n", "")
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
bytes = stringToBytes(mpi[4:])
return bytesToNumber(bytes)
def numberToMPI(n):
bytes = numberToBytes(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
bytes = concatArrays(createByteArrayZeros(4+ext), bytes)
bytes[0] = (length >> 24) & 0xFF
bytes[1] = (length >> 16) & 0xFF
bytes[2] = (length >> 8) & 0xFF
bytes[3] = length & 0xFF
return bytesToString(bytes)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
def hashAndBase64(s):
return stringToBase64(sha.sha(s).digest())
def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce
bytes = getRandomBytes(numChars)
bytesStr = "".join([chr(b) for b in bytes])
return stringToBase64(bytesStr)[:numChars]
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
#This will break when python division changes, but we can't use // cause
#of Jython
return (a * b) / gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
#This will break when python division changes, but we can't use //
#cause of Jython
q = d / c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
#Copied from Bryan G. Olson's post to comp.lang.python
#Does left-to-right instead of pow()'s right-to-left,
#thus about 30% faster than the python built-in with small bases
def powMod(base, power, modulus):
nBitScan = 5
""" Return base**power mod modulus, using multi bit scanning
with nBitScan bits at a time."""
#TREV - Added support for negative exponents
negativeResult = False
if (power < 0):
power *= -1
negativeResult = True
exp2 = 2**nBitScan
mask = exp2 - 1
# Break power into a list of digits of nBitScan bits.
# The list is recursive so easy to read in reverse direction.
nibbles = None
while power:
nibbles = int(power & mask), nibbles
power = power >> nBitScan
# Make a table of powers of base up to 2**nBitScan - 1
lowPowers = [1]
for i in xrange(1, exp2):
lowPowers.append((lowPowers[i-1] * base) % modulus)
# To exponentiate by the first nibble, look it up in the table
nib, nibbles = nibbles
prod = lowPowers[nib]
# For the rest, square nBitScan times, then multiply by
# base^nibble
while nibbles:
nib, nibbles = nibbles
for i in xrange(nBitScan):
prod = (prod * prod) % modulus
if nib: prod = (prod * lowPowers[nib]) % modulus
#TREV - Added support for negative exponents
if negativeResult:
prodInv = invMod(prod, modulus)
#Check to make sure the inverse is correct
if (prod * prodInv) % modulus != 1:
raise AssertionError()
return prodInv
return prod
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = range(n)
for count in range(2, int(math.sqrt(n))):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print "*",
s, t = n-1, 0
while s % 2 == 0:
s, t = s/2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2L ** (bits-1)) * 3/2
high = 2L ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print ".",
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3/2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print ".",
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
|
Voluntarynet/BitmessageKit
|
refs/heads/master
|
BitmessageKit/Vendor/static-python/Lib/distutils/tests/setuptools_extension.py
|
149
|
from distutils.core import Extension as _Extension
from distutils.core import Distribution as _Distribution
def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls
_Distribution = _get_unpatched(_Distribution)
_Extension = _get_unpatched(_Extension)
try:
from Pyrex.Distutils.build_ext import build_ext
except ImportError:
have_pyrex = False
else:
have_pyrex = True
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
if not have_pyrex:
# convert .pyx extensions to .c
def __init__(self,*args,**kw):
_Extension.__init__(self,*args,**kw)
sources = []
for s in self.sources:
if s.endswith('.pyx'):
sources.append(s[:-3]+'c')
else:
sources.append(s)
self.sources = sources
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
import sys, distutils.core, distutils.extension
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
|
alajara/servo
|
refs/heads/master
|
components/style/counter_style/update_predefined.py
|
47
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. */
import os.path
import re
import urllib
def main(filename):
names = [
re.search('>([^>]+)(</dfn>|<a class="self-link")', line).group(1)
for line in urllib.urlopen("https://drafts.csswg.org/css-counter-styles/")
if 'data-dfn-for="<counter-style-name>"' in line
or 'data-dfn-for="<counter-style>"' in line
]
with open(filename, "wb") as f:
f.write("""\
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
predefined! {
""")
for name in names:
f.write(' "%s",\n' % name)
f.write('}\n')
if __name__ == "__main__":
main(os.path.join(os.path.dirname(__file__), "predefined.rs"))
|
wkrzemien/DIRAC
|
refs/heads/integration
|
Interfaces/scripts/dirac-admin-get-job-pilots.py
|
4
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-get-job-pilots
# Author : Stuart Paterson
########################################################################
"""
Retrieve info about pilots that have matched a given Job
"""
from __future__ import print_function
__RCSID__ = "$Id$"
# pylint: disable=wrong-import-position
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile] ... JobID' % Script.scriptName,
'Arguments:',
' JobID: DIRAC ID of the Job']))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for job in args:
try:
job = int(job)
except BaseException as x:
errorList.append((job, 'Expected integer for jobID'))
exitCode = 2
continue
result = diracAdmin.getJobPilots(job)
if not result['OK']:
errorList.append((job, result['Message']))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
|
apradhn/python_koans
|
refs/heads/master
|
python3/koans/a_package_folder/__init__.py
|
279
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
an_attribute = 1984
|
zerc/django
|
refs/heads/master
|
tests/forms_tests/widget_tests/base.py
|
192
|
from django.test import SimpleTestCase
class WidgetTest(SimpleTestCase):
beatles = (('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))
def check_html(self, widget, name, value, html='', attrs=None, **kwargs):
output = widget.render(name, value, attrs=attrs, **kwargs)
self.assertHTMLEqual(output, html)
|
arnif/CouchPotatoServer
|
refs/heads/master
|
libs/guessit/transfo/guess_release_group.py
|
6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.transfo import SingleNodeGuesser
from guessit.patterns import properties, canonical_form
import re
import logging
log = logging.getLogger(__name__)
CODECS = properties['videoCodec']
FORMATS = properties['format']
def adjust_metadata(md):
codec = canonical_form(md['videoCodec'])
if codec in FORMATS:
md['format'] = codec
del md['videoCodec']
return md
def guess_release_group(string):
group_names = [ r'\.(Xvid)-(?P<releaseGroup>.*?)[ \.]',
r'\.(DivX)-(?P<releaseGroup>.*?)[\. ]',
r'\.(DVDivX)-(?P<releaseGroup>.*?)[\. ]',
]
# first try to see whether we have both a known codec and a known release group
group_names = [ r'\.(?P<videoCodec>' + codec + r')-(?P<releaseGroup>.*?)[ \.]'
for codec in (CODECS + FORMATS) ]
for rexp in group_names:
match = re.search(rexp, string, re.IGNORECASE)
if match:
metadata = match.groupdict()
if canonical_form(metadata['releaseGroup']) in properties['releaseGroup']:
return adjust_metadata(metadata), (match.start(1), match.end(2))
# pick anything as releaseGroup as long as we have a codec in front
# this doesn't include a potential dash ('-') ending the release group
# eg: [...].X264-HiS@SiLUHD-English.[...]
group_names = [ r'\.(?P<videoCodec>' + codec + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]'
for codec in (CODECS + FORMATS) ]
for rexp in group_names:
match = re.search(rexp, string, re.IGNORECASE)
if match:
return adjust_metadata(match.groupdict()), (match.start(1), match.end(2))
return None, None
def process(mtree):
SingleNodeGuesser(guess_release_group, 0.8, log).process(mtree)
|
tscohen/chainer
|
refs/heads/master
|
cupy/padding/__init__.py
|
17
|
# flake8: NOQA
# "flake8: NOQA" to suppress warning "H104 File contains nothing but comments"
# TODO(okuta): Implement pad
|
madhuri2k/fantastic-spoon
|
refs/heads/master
|
yay0/lzyf.py
|
1
|
# Compressor for LZYF
import yay0, logging, struct
maxOffsets = [16, 32, 1024]
maxLengths = {16: 513, 32: 4, 1024: 17}
log = logging.getLogger("lzyf")
def create_lzyf(data):
c = compress(data)
out = bytearray(b'LZYF1000')
out.extend((16+len(c)).to_bytes(4, byteorder='big'))
out.extend(len(data).to_bytes(4, byteorder='big'))
out.extend(c)
return out
def crl(index, src, maxOffset, maxLength):
"""
Returns starting position in source before index from where the max runlength is detected.
"""
src_size = len(src)
if index > src_size:
return (-1, 0)
if (index+maxLength) > src_size:
maxLength = src_size - index
startPos = max(0, index-maxOffset)
endPos = index+maxLength-1
l = maxLength
# log.info("Looking from {} - {} ({}, {}) for upto {} bytes match at {}".format(startPos, endPos, index, maxOffset, maxLength, index))
# log.info("String to be matched: {}".format(src[index:index+maxLength]))
# log.info("Source to be searchd: {}".format(src[startPos:endPos]))
while l>1:
# log.info("At l {}".format(l))
if src[index:index+l] in src[startPos:index+l-1]:
p = src.rfind(src[index:index+l], startPos, index+l-1)
# log.info("Match at {} in range {} {}".format(p, startPos, index+l-1))
return (p,l)
l -= 1
return (-1, 0)
def ocrl(index, src, maxOffset, maxLength):
"""
Returns starting position in source before index from where the max runlength is detected.
"""
size = len(src)
if index >= size:
log.warn("Illegal index: {} or size: {} vs {}.".format(index, size, len(src)))
return -1
startPos = max(0, index-maxOffset) # index - 1
# stopPos = max(0, index-maxOffset)
runs = {}
while startPos < index :
# log.debug("With: {} {} Runs: {}".format(startPos, index, runs))
currRun = 0
pos = startPos
while src[pos] == src[index+currRun]:
currRun += 1
pos += 1
if currRun == maxLength:
log.debug("Found run of length {} == {}. Returning...".format(currRun, maxLength))
return (startPos, maxLength) #found best possible run
if (pos >= size) or ((index+currRun) >= size):
break
if (currRun > 0) and (currRun not in runs.keys()):
runs[currRun] = startPos
# log.debug("Result: {} Runs: {}".format(currRun, runs))
startPos += 1
if not runs:
log.debug("No suitable runs found.")
return (-1, 0)
else:
# Return the index from where the longest run was found
return (runs[max(runs.keys())], max(runs.keys()))
def rcrl(index, src, maxOffset, maxLength):
"""
Returns starting position in source before index from where the max runlength is detected.
"""
size = len(src)
if index>=size:
log.warn("Illegal index: {} or size: {} vs {}.".format(index, size, len(src)))
return -1
startPos = index - 1 # max(0, index-maxOffset)
stopPos = max(0, index-maxOffset)
runs = {}
while(startPos >= stopPos):
# log.debug("With: {} {} Runs: {}".format(startPos, index, runs))
currRun = 0
pos = startPos
while src[pos] == src[index+currRun]:
currRun += 1
pos += 1
if currRun == maxLength:
log.debug("Found run of length {} == {}. Returning...".format(currRun, maxLength))
return (startPos, maxLength) #found best possible run
if (pos >= size) or ((index+currRun) >= size):
break
if (currRun > 0) and (currRun not in runs.keys()):
runs[currRun] = startPos
# log.debug("Result: {} Runs: {}".format(currRun, runs))
startPos -= 1
if not runs:
log.debug("No suitable runs found.")
return (-1, 0)
else:
# Return the index from where the longest run was found
return (runs[max(runs.keys())], max(runs.keys()))
crl_func = rcrl
def checkRunlength(index, src, maxOffset, maxLength):
return crl_func(index, src, maxOffset, maxLength)
def compress(src):
src_size = len(src)
dst_size = 0
dst = bytearray()
src_pos = 0
rl = 0
ctrl_byte = 0
buf = bytearray()
log.info("Start Encode")
# Start a copy-run
buf.append(src[src_pos])
src_pos += 1
rl += 1
# print("Under Test!")
while src_pos < src_size:
pos0, len0 = checkRunlength(src_pos, src, maxOffsets[0], maxLengths[maxOffsets[0]])
pos1, len1 = checkRunlength(src_pos, src, maxOffsets[1], maxLengths[maxOffsets[1]])
pos2, len2 = checkRunlength(src_pos, src, maxOffsets[2], maxLengths[maxOffsets[2]])
if src_pos+1 < src_size:
pos3, len3 = checkRunlength(src_pos+1, src, maxOffsets[0], maxLengths[maxOffsets[0]])
pos4, len4 = checkRunlength(src_pos+1, src, maxOffsets[1], maxLengths[maxOffsets[1]])
pos5, len5 = checkRunlength(src_pos+1, src, maxOffsets[2], maxLengths[maxOffsets[2]])
# if src_pos+2 < src_size:
# pos6, len6 = checkRunlength(src_pos+2, src, maxOffsets[0], maxLengths[maxOffsets[0]])
# pos7, len7 = checkRunlength(src_pos+2, src, maxOffsets[1], maxLengths[maxOffsets[1]])
# pos8, len8 = checkRunlength(src_pos+2, src, maxOffsets[2], maxLengths[maxOffsets[2]])
# else:
# pos6, len6, pos7, len7, pos8, len8 = (-1, 0, -1, 0, -1, 0)
else:
pos3, len3, pos4, len4, pos5, len5, pos6, len6, pos7, len7, pos8, len8 = (-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0)
# if (max(len0, len1+1, len2) >= 2) and ((max(len0, len1+1, len2)+2) >= max(len3, len4+1, len5, len6-2, len7-1, len8-2)):
# if ((max(len0, len2) >= 2) and ((max(len0, len2)+2) >= max(len3, len5, len6-2, len8-2))):
# if ((rl == 0) and (len1 > 0)) or ((max(len0, len2) >= 2) and ((max(len0, len2)+2) >= max(len3, len5, len6-2, len8-2))):
if ((rl == 0) and (len1 > 0)) or ((max(len0, len2) >= 2) and ((max(len0, len2)+2) >= max(len3, len5))):
# output existing copy run, if any
if rl != 0:
log.info("Copy: C={}, dec[{}:{}] is enc[{}:{}]. Check rl {} vs {}, enc {} vs {}".format(
bin(rl), src_pos-rl, src_pos, dst_size+1, dst_size+1+rl, rl, len(buf), dst_size+1+len(buf), len(dst)+1+len(buf)))
dst.append(rl)
dst.extend(buf)
dst_size += len(buf) + 1
buf = bytearray()
rl = 0
if len0 > max(2*len1, len2):
# encode pos0, len0 using C
v = src_pos-pos0-1
ctrl_byte = 0x2000 | ((v & 0x0F) << 9) | ((len0-2) & 0x1FF)
log.info("0x20: C={}, dec[{}:{}] is dec[{}:{}]. Check off {} len {}({}) bytes {} enc {} vs {}".format(
bin(ctrl_byte), src_pos, src_pos+len0, pos0, pos0+len0, hex(v), hex(len0), hex(len0-2), ctrl_byte.to_bytes(2, byteorder='big'), dst_size+2, len(dst)+2))
dst.extend(ctrl_byte.to_bytes(2, byteorder='big'))
dst_size += 2
src_pos += len0
elif len2 >= (2*len1):
# encode pos2, len2 using B
v = src_pos - pos2 - 1
ctrl_byte = 0x4000 | ((v<<4) & 0x3FF0) | ((len2-2) & 0x0F)
log.info("0x40: C={}, dec[{}:{}] is dec[{}:{}]. Check off {} len {}({}) bytes {} enc {} vs {}".format(
bin(ctrl_byte), src_pos, src_pos+len2, pos2, pos2+len2, hex(v), hex(len2), hex(len2-2), ctrl_byte.to_bytes(2, byteorder='big'), dst_size+2, len(dst)+2))
dst.extend(ctrl_byte.to_bytes(2, byteorder='big'))
dst_size += 2
src_pos += len2
else:
# encode pos1, len1 using A
v = src_pos - pos1 - 1
ctrl_byte = 0x80 | ((v<<2) & 0x7c) | ((len1-1) & 0x03)
log.info("0x80: C={}, dec[{}:{}] is dec[{}:{}]. Check off {} len {}({}) byte {} enc {} vs {}".format(
bin(ctrl_byte), src_pos, src_pos+len1, pos1, pos1+len1, hex(v), hex(len1), hex(len1-1), hex(ctrl_byte), dst_size+1, len(dst)+1))
dst.append(ctrl_byte)
dst_size += 1
src_pos += len1
else:
# No or sub-optimal repeat pattern, add to or create copy run
buf.append(src[src_pos])
rl += 1
src_pos +=1
if rl == 0x1F:
log.info("Copy: C={}, dec[{}:{}] is enc[{}:{}]. Check rl {} vs {}, enc {} vs {}".format(
bin(rl), src_pos-rl, src_pos, dst_size+1, dst_size+1+rl, rl, len(buf), dst_size+1+len(buf), len(dst)+1+len(buf)))
dst.append(rl)
dst.extend(buf)
dst_size += len(buf) + 1
buf = bytearray()
rl = 0
if rl != 0:
log.info("Copy: C={}, dec[{}:{}] is enc[{}:{}]. Check rl {} vs {}, enc {} vs {}".format(
bin(rl), src_pos-rl, src_pos, dst_size+1, dst_size+1+rl, rl, len(buf), dst_size+1+len(buf), len(dst)+1+len(buf)))
dst.append(rl)
dst.extend(buf)
dst_size += len(buf) + 1
buf = bytearray()
rl = 0
dst.append(0)
dst_size += 1
log.info("Zero byte at {}({}). src[0:{}]".format(dst_size, len(dst), src_pos))
log.info("Encoded {} into {} bytes.".format(src_size, dst_size))
return dst
def analyzeRuns(data):
for i in range(len(data)):
p, l = checkRunlength(i, data, 1024, 513)
if l>1:
log.info("{}: Found run of {} at {}".format(i, l, p))
# i += l
|
nesdis/djongo
|
refs/heads/master
|
tests/django_tests/tests/v22/tests/migration_test_data_persistence/migrations/0002_add_book.py
|
133
|
from django.db import migrations
def add_book(apps, schema_editor):
apps.get_model("migration_test_data_persistence", "Book").objects.using(
schema_editor.connection.alias,
).create(
title="I Love Django",
)
class Migration(migrations.Migration):
dependencies = [("migration_test_data_persistence", "0001_initial")]
operations = [
migrations.RunPython(
add_book,
),
]
|
dlopes-samba/dlopes-maps-sambatech
|
refs/heads/master
|
django/contrib/gis/geos/tests/test_io.py
|
321
|
import binascii, ctypes, unittest
from django.contrib.gis.geos import GEOSGeometry, WKTReader, WKTWriter, WKBReader, WKBWriter, geos_version_info
class GEOSIOTest(unittest.TestCase):
def test01_wktreader(self):
# Creating a WKTReader instance
wkt_r = WKTReader()
wkt = 'POINT (5 23)'
# read() should return a GEOSGeometry
ref = GEOSGeometry(wkt)
g1 = wkt_r.read(wkt)
g2 = wkt_r.read(unicode(wkt))
for geom in (g1, g2):
self.assertEqual(ref, geom)
# Should only accept basestring objects.
self.assertRaises(TypeError, wkt_r.read, 1)
self.assertRaises(TypeError, wkt_r.read, buffer('foo'))
def test02_wktwriter(self):
# Creating a WKTWriter instance, testing its ptr property.
wkt_w = WKTWriter()
self.assertRaises(TypeError, wkt_w._set_ptr, WKTReader.ptr_type())
ref = GEOSGeometry('POINT (5 23)')
ref_wkt = 'POINT (5.0000000000000000 23.0000000000000000)'
self.assertEqual(ref_wkt, wkt_w.write(ref))
def test03_wkbreader(self):
# Creating a WKBReader instance
wkb_r = WKBReader()
hex = '000000000140140000000000004037000000000000'
wkb = buffer(binascii.a2b_hex(hex))
ref = GEOSGeometry(hex)
# read() should return a GEOSGeometry on either a hex string or
# a WKB buffer.
g1 = wkb_r.read(wkb)
g2 = wkb_r.read(hex)
for geom in (g1, g2):
self.assertEqual(ref, geom)
bad_input = (1, 5.23, None, False)
for bad_wkb in bad_input:
self.assertRaises(TypeError, wkb_r.read, bad_wkb)
def test04_wkbwriter(self):
wkb_w = WKBWriter()
# Representations of 'POINT (5 23)' in hex -- one normal and
# the other with the byte order changed.
g = GEOSGeometry('POINT (5 23)')
hex1 = '010100000000000000000014400000000000003740'
wkb1 = buffer(binascii.a2b_hex(hex1))
hex2 = '000000000140140000000000004037000000000000'
wkb2 = buffer(binascii.a2b_hex(hex2))
self.assertEqual(hex1, wkb_w.write_hex(g))
self.assertEqual(wkb1, wkb_w.write(g))
# Ensuring bad byteorders are not accepted.
for bad_byteorder in (-1, 2, 523, 'foo', None):
# Equivalent of `wkb_w.byteorder = bad_byteorder`
self.assertRaises(ValueError, wkb_w._set_byteorder, bad_byteorder)
# Setting the byteorder to 0 (for Big Endian)
wkb_w.byteorder = 0
self.assertEqual(hex2, wkb_w.write_hex(g))
self.assertEqual(wkb2, wkb_w.write(g))
# Back to Little Endian
wkb_w.byteorder = 1
# Now, trying out the 3D and SRID flags.
g = GEOSGeometry('POINT (5 23 17)')
g.srid = 4326
hex3d = '0101000080000000000000144000000000000037400000000000003140'
wkb3d = buffer(binascii.a2b_hex(hex3d))
hex3d_srid = '01010000A0E6100000000000000000144000000000000037400000000000003140'
wkb3d_srid = buffer(binascii.a2b_hex(hex3d_srid))
# Ensuring bad output dimensions are not accepted
for bad_outdim in (-1, 0, 1, 4, 423, 'foo', None):
# Equivalent of `wkb_w.outdim = bad_outdim`
self.assertRaises(ValueError, wkb_w._set_outdim, bad_outdim)
# These tests will fail on 3.0.0 because of a bug that was fixed in 3.1:
# http://trac.osgeo.org/geos/ticket/216
if not geos_version_info()['version'].startswith('3.0.'):
# Now setting the output dimensions to be 3
wkb_w.outdim = 3
self.assertEqual(hex3d, wkb_w.write_hex(g))
self.assertEqual(wkb3d, wkb_w.write(g))
# Telling the WKBWriter to inlcude the srid in the representation.
wkb_w.srid = True
self.assertEqual(hex3d_srid, wkb_w.write_hex(g))
self.assertEqual(wkb3d_srid, wkb_w.write(g))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSIOTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
xneby/tcs-web
|
refs/heads/master
|
tcsweb/wsgi.py
|
1
|
"""
WSGI config for tcsweb project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "tcsweb.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tcsweb.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
simonmulser/bitcoin
|
refs/heads/master
|
test/functional/decodescript.py
|
35
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from io import BytesIO
class DecodeScriptTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777'
push_public_key_hash = '14' + public_key_hash
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
|
SuperFriendBFG/PyBreakout
|
refs/heads/master
|
Game/Bricks/Brick.py
|
1
|
from Game.Shared import *
class Brick(GameObject):
def __init__(self, position, sprite, game):
self.__game = game
self.__hitPoints = 100
self.__lives = 1
super(Brick, self).__init__(position, GameConstants.BRICK_SIZE, sprite)
def getGame(self):
return self.__game
def isDestroyed(self):
return self.__lives <= 0
def getHitPoints(self):
return self.__hitPoints
def hit(self):
self.__lives -= 1
def getHitSound(self):
return GameConstants.SOUND_HIT_BRICK
|
ionrock/designate
|
refs/heads/master
|
functionaltests/api/v2/models/transfer_accepts_model.py
|
6
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functionaltests.common.models import BaseModel
from functionaltests.common.models import CollectionModel
from functionaltests.common.models import EntityModel
class TransferAcceptsData(BaseModel):
pass
class TransferAcceptsModel(EntityModel):
ENTITY_NAME = 'transfer_accept'
MODEL_TYPE = TransferAcceptsData
class TransferAcceptsListModel(CollectionModel):
COLLECTION_NAME = 'transfer_accepts'
MODEL_TYPE = TransferAcceptsData
|
darith27/wagtail
|
refs/heads/master
|
wagtail/tests/testapp/migrations/0001_initial.py
|
22
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
import modelcluster.contrib.taggit
import wagtail.wagtailimages.models
import wagtail.wagtailadmin.taggable
import modelcluster.fields
import wagtail.wagtailcore.fields
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0013_update_golive_expire_help_text'),
('wagtaildocs', '0002_initial_data'),
('taggit', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wagtailimages', '0005_make_filter_spec_unique'),
]
operations = [
migrations.CreateModel(
name='Advert',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('url', models.URLField(blank=True, null=True)),
('text', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AdvertPlacement',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('colour', models.CharField(max_length=255)),
('advert', models.ForeignKey(to='tests.Advert', related_name='+')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BusinessChild',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessIndex',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessSubIndex',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='CustomImageWithAdminFormFields',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('title', models.CharField(max_length=255, verbose_name='Title')),
('file', models.ImageField(width_field='width', height_field='height', upload_to=wagtail.wagtailimages.models.get_upload_to, verbose_name='File')),
('width', models.IntegerField(editable=False)),
('height', models.IntegerField(editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('caption', models.CharField(max_length=255)),
('not_editable_field', models.CharField(max_length=255)),
('tags', taggit.managers.TaggableManager(verbose_name='Tags', to='taggit.Tag', blank=True, through='taggit.TaggedItem', help_text=None)),
('uploaded_by_user', models.ForeignKey(null=True, blank=True, to=settings.AUTH_USER_MODEL, editable=False)),
],
options={
'abstract': False,
},
bases=(models.Model, wagtail.wagtailadmin.taggable.TagSearchable),
),
migrations.CreateModel(
name='CustomImageWithoutAdminFormFields',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('title', models.CharField(max_length=255, verbose_name='Title')),
('file', models.ImageField(width_field='width', height_field='height', upload_to=wagtail.wagtailimages.models.get_upload_to, verbose_name='File')),
('width', models.IntegerField(editable=False)),
('height', models.IntegerField(editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('caption', models.CharField(max_length=255)),
('not_editable_field', models.CharField(max_length=255)),
('tags', taggit.managers.TaggableManager(verbose_name='Tags', to='taggit.Tag', blank=True, through='taggit.TaggedItem', help_text=None)),
('uploaded_by_user', models.ForeignKey(null=True, blank=True, to=settings.AUTH_USER_MODEL, editable=False)),
],
options={
'abstract': False,
},
bases=(models.Model, wagtail.wagtailadmin.taggable.TagSearchable),
),
migrations.CreateModel(
name='EventIndex',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('date_from', models.DateField(verbose_name='Start date', null=True)),
('date_to', models.DateField(blank=True, help_text='Not required if event is on a single day', verbose_name='End date', null=True)),
('time_from', models.TimeField(blank=True, verbose_name='Start time', null=True)),
('time_to', models.TimeField(blank=True, verbose_name='End time', null=True)),
('audience', models.CharField(choices=[('public', 'Public'), ('private', 'Private')], max_length=255)),
('location', models.CharField(max_length=255)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('cost', models.CharField(max_length=255)),
('signup_link', models.URLField(blank=True)),
('feed_image', models.ForeignKey(to='wagtailimages.Image', null=True, related_name='+', blank=True, on_delete=django.db.models.deletion.SET_NULL)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPageCarouselItem',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', models.CharField(blank=True, max_length=255)),
('image', models.ForeignKey(to='wagtailimages.Image', null=True, related_name='+', blank=True, on_delete=django.db.models.deletion.SET_NULL)),
('link_document', models.ForeignKey(to='wagtaildocs.Document', null=True, related_name='+', blank=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageChooserModel',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('page', models.ForeignKey(to='tests.EventPage', help_text='more help text')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageRelatedLink',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(to='wagtaildocs.Document', null=True, related_name='+', blank=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageSpeaker',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('first_name', models.CharField(blank=True, max_length=255, verbose_name='Name')),
('last_name', models.CharField(blank=True, max_length=255, verbose_name='Surname')),
('image', models.ForeignKey(to='wagtailimages.Image', null=True, related_name='+', blank=True, on_delete=django.db.models.deletion.SET_NULL)),
('link_document', models.ForeignKey(to='wagtaildocs.Document', null=True, related_name='+', blank=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormField',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255)),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')], max_length=16)),
('required', models.BooleanField(default=True)),
('choices', models.CharField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', max_length=512)),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255)),
('help_text', models.CharField(blank=True, max_length=255)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('to_address', models.CharField(blank=True, help_text='Optional - form submissions will be emailed to this address', max_length=255)),
('from_address', models.CharField(blank=True, max_length=255)),
('subject', models.CharField(blank=True, max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PageChooserModel',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PageWithOldStyleRouteMethod',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('content', models.TextField()),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SimplePage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('content', models.TextField()),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SnippetChooserModel',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('advert', models.ForeignKey(to='tests.Advert', help_text='help text')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardChild',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardIndex',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='TaggedPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='TaggedPageTag',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(to='tests.TaggedPage', related_name='tagged_items')),
('tag', models.ForeignKey(to='taggit.Tag', related_name='tests_taggedpagetag_items')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='taggedpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(verbose_name='Tags', to='taggit.Tag', blank=True, through='tests.TaggedPageTag', help_text='A comma-separated list of tags.'),
preserve_default=True,
),
migrations.AddField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(to='wagtailcore.Page', help_text='help text'),
preserve_default=True,
),
migrations.AddField(
model_name='formfield',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.FormPage', related_name='form_fields'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagespeaker',
name='link_page',
field=models.ForeignKey(to='wagtailcore.Page', null=True, related_name='+', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagespeaker',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.EventPage', related_name='speakers'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='link_page',
field=models.ForeignKey(to='wagtailcore.Page', null=True, related_name='+', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.EventPage', related_name='related_links'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='link_page',
field=models.ForeignKey(to='wagtailcore.Page', null=True, related_name='+', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.EventPage', related_name='carousel_items'),
preserve_default=True,
),
migrations.AddField(
model_name='advertplacement',
name='page',
field=modelcluster.fields.ParentalKey(to='wagtailcore.Page', related_name='advert_placements'),
preserve_default=True,
),
]
|
bratsche/Neutron-Drive
|
refs/heads/master
|
google_appengine/lib/django_1_3/tests/regressiontests/delete_regress/models.py
|
23
|
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
class Award(models.Model):
name = models.CharField(max_length=25)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType)
content_object = generic.GenericForeignKey()
class AwardNote(models.Model):
award = models.ForeignKey(Award)
note = models.CharField(max_length=100)
class Person(models.Model):
name = models.CharField(max_length=25)
awards = generic.GenericRelation(Award)
class Book(models.Model):
pagecount = models.IntegerField()
class Toy(models.Model):
name = models.CharField(max_length=50)
class Child(models.Model):
name = models.CharField(max_length=50)
toys = models.ManyToManyField(Toy, through='PlayedWith')
class PlayedWith(models.Model):
child = models.ForeignKey(Child)
toy = models.ForeignKey(Toy)
date = models.DateField(db_column='date_col')
class PlayedWithNote(models.Model):
played = models.ForeignKey(PlayedWith)
note = models.TextField()
class Contact(models.Model):
label = models.CharField(max_length=100)
class Email(Contact):
email_address = models.EmailField(max_length=100)
class Researcher(models.Model):
contacts = models.ManyToManyField(Contact, related_name="research_contacts")
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name")
meal = models.CharField(max_length=20)
# Models for #15776
class Policy(models.Model):
policy_number = models.CharField(max_length=10)
class Version(models.Model):
policy = models.ForeignKey(Policy)
class Location(models.Model):
version = models.ForeignKey(Version, blank=True, null=True)
class Item(models.Model):
version = models.ForeignKey(Version)
location = models.ForeignKey(Location, blank=True, null=True)
|
flyher/pymo
|
refs/heads/master
|
android/pgs4a-0.9.6/python-install/lib/python2.7/UserDict.py
|
358
|
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return key in self.data
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
|
gfonk/ansible
|
refs/heads/devel
|
lib/ansible/galaxy/api.py
|
43
|
#!/usr/bin/env python
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
import json
from urllib2 import urlopen, quote as urlquote, HTTPError
from urlparse import urlparse
from ansible.errors import AnsibleError, AnsibleOptionsError
class GalaxyAPI(object):
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
SUPPORTED_VERSIONS = ['v1']
def __init__(self, galaxy, api_server):
self.galaxy = galaxy
try:
urlparse(api_server, scheme='https')
except:
raise AnsibleError("Invalid server API url passed: %s" % api_server)
server_version = self.get_server_api_version('%s/api/' % (api_server))
if not server_version:
raise AnsibleError("Could not retrieve server API version: %s" % api_server)
if server_version in self.SUPPORTED_VERSIONS:
self.baseurl = '%s/api/%s' % (api_server, server_version)
self.version = server_version # for future use
self.galaxy.display.vvvvv("Base API: %s" % self.baseurl)
else:
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
def get_server_api_version(self, api_server):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
"""
#TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1)
# also should set baseurl using supported_versions which has path
return 'v1'
try:
data = json.load(urlopen(api_server))
return data.get("current_version", 'v1')
except Exception as e:
# TODO: report error
return None
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name
"""
role_name = urlquote(role_name)
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
self.galaxy.display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except:
raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
self.galaxy.display.vvvv("- %s" % (url))
try:
data = json.load(urlopen(url))
if len(data["results"]) != 0:
return data["results"][0]
except:
# TODO: report on connection/availability errors
pass
return None
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
try:
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
data = json.load(urlopen(url))
results = data['results']
done = (data.get('next', None) == None)
while not done:
url = '%s%s' % (self.baseurl, data['next'])
self.galaxy.display.display(url)
data = json.load(urlopen(url))
results += data['results']
done = (data.get('next', None) == None)
return results
except:
return None
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = '%s/%s/?page_size' % (self.baseurl, what)
data = json.load(urlopen(url))
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next', None) == None)
while not done:
url = '%s%s' % (self.baseurl, data['next'])
self.galaxy.display.display(url)
data = json.load(urlopen(url))
results += data['results']
done = (data.get('next', None) == None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
def search_roles(self, search, platforms=None, categories=None):
search_url = self.baseurl + '/roles/?page=1'
if search:
search_url += '&search=' + urlquote(search)
if categories is None:
categories = []
elif isinstance(categories, basestring):
categories = categories.split(',')
for cat in categories:
search_url += '&chain__categories__name=' + urlquote(cat)
if platforms is None:
platforms = []
elif isinstance(platforms, basestring):
platforms = platforms.split(',')
for plat in platforms:
search_url += '&chain__platforms__name=' + urlquote(plat)
self.galaxy.display.debug("Executing query: %s" % search_url)
try:
data = json.load(urlopen(search_url))
except HTTPError as e:
raise AnsibleError("Unsuccessful request to server: %s" % str(e))
return data
|
Bashar/django
|
refs/heads/master
|
django/db/backends/postgresql_psycopg2/schema.py
|
63
|
from django.db.backends.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_sequence = "CREATE SEQUENCE %(sequence)s"
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s"
def quote_value(self, value):
# Inner import so backend fails nicely if it's not present
import psycopg2
return psycopg2.extensions.adapt(value)
def _alter_column_type_sql(self, table, column, type):
"""
Makes ALTER TYPE with SERIAL make sense.
"""
if type.lower() == "serial":
sequence_name = "%s_%s_seq" % (table, column)
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": "integer",
},
[],
),
[
(
self.sql_delete_sequence % {
"sequence": sequence_name,
},
[],
),
(
self.sql_create_sequence % {
"sequence": sequence_name,
},
[],
),
(
self.sql_alter_column % {
"table": table,
"changes": self.sql_alter_column_default % {
"column": column,
"default": "nextval('%s')" % sequence_name,
}
},
[],
),
(
self.sql_set_sequence_max % {
"table": table,
"column": column,
"sequence": sequence_name,
},
[],
),
],
)
else:
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(table, column, type)
|
ronaldahmed/SLAM-for-ugv
|
refs/heads/master
|
neural-navigation-with-lstm/MARCO/nltk/test/chktype.py
|
2
|
# Natural Language Toolkit: Test Code for Type Checking
#
# Copyright (C) 2001 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: chktype.py,v 1.1.1.2 2004/09/29 21:58:13 adastra Exp $
"""
Unit testing for L{nltk.chktype}.
"""
from nltk.chktype import *
from nltk.util import mark_stdout_newlines
import types
##//////////////////////////////////////////////////////
## Test code
##//////////////////////////////////////////////////////
def test_chktype(): r"""
Unit test cases for L{nltk.chktype}.
The amount of type checking performed is controlled by the type safety
level, which is set with L{type_safety_level}:
>>> old_level = type_safety_level(4)
C{chktype} is used by adding calls to C{chktype} at the top of a
function or method, checking the types of the inputs:
>>> def demo(x, f, s):
... assert chktype(1, x, int, long)
... assert chktype(2, f, float)
... assert chktype(3, s, str)
... return 'ok'
Calls with correct argument types proceed normally:
>>> demo(1, 1.0, 'hello')
'ok'
>>> demo(-5, 1.0, '')
'ok'
>>> demo(12L, 1.0, 'hello')
'ok'
Calls with invalid argument types raise exceptions. Define a test
function, to capture the exception string & collapse whitespace
(because doctest can't deal w/ multiline exception strings):
>>> def test(func, *args):
... try: func(*args)
... except TypeError, e:
... raise TypeError(' '.join(str(e).split()))
Now call the demo function with bad argument types:
>>> test(demo, 1.0, 1.0, 'hello')
Traceback (most recent call last):
TypeError: Argument 1 to demo() must have type: (int or long) (got a float)
>>> test(demo, 1, 1, 'hello')
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: float (got a int)
>>> test(demo, 1, 1L, 'hello')
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: float (got a long)
>>> test(demo, 1, 'x', 'hello')
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: float (got a str)
>>> test(demo, 'x', 1.0, 'hello')
Traceback (most recent call last):
...
TypeError: Argument 1 to demo() must have type: (int or long) (got a str)
>>> test(demo, 0, 0.0, 12)
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: str (got a int)
>>> test(demo, 0, 1.0, ['h', 'i'])
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: str (got a list)
>>> test(demo, [0], 1.0, 'hi')
Traceback (most recent call last):
...
TypeError: Argument 1 to demo() must have type: (int or long) (got a list)
>>> test(demo, 0, [1.0], 'hi')
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: float (got a list)
List Type Checks
================
>>> def demo(list1, list2, list3):
... assert chktype(1, list1, [])
... assert chktype(2, list2, [int])
... assert chktype(3, list3, [int, [str]])
... return 'ok'
These should be fine:
>>> demo([], [], [])
'ok'
>>> demo(['x'], [1], [1])
'ok'
>>> demo(['a', {}, (), 3], [1,2], [3,4])
'ok'
>>> demo([], [], [1, ['x'], 2, ['y', 'z']])
'ok'
These should raise exceptions:
>>> test(demo, (), [], [])
Traceback (most recent call last):
...
TypeError: Argument 1 to demo() must have type: list (got a tuple)
>>> test(demo, [], (), [])
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (list of int) (got a tuple)
>>> test(demo, [], [], ())
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (list of (int or (list of str))) (got a tuple)
>>> test(demo, {}, [], [])
Traceback (most recent call last):
...
TypeError: Argument 1 to demo() must have type: list (got a dict)
>>> test(demo, [], {}, [])
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (list of int) (got a dict)
>>> test(demo, [], [], {})
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (list of (int or (list of str))) (got a dict)
>>> test(demo, 1, [], [])
Traceback (most recent call last):
...
TypeError: Argument 1 to demo() must have type: list (got a int)
>>> test(demo, [], 1, [])
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (list of int) (got a int)
>>> test(demo, [], [], 1)
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (list of (int or (list of str))) (got a int)
>>> test(demo, [], [2,2,2.0], [])
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (list of int) (got a list)
>>> test(demo, [], [], [2,'x',2.0])
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (list of (int or (list of str))) (got a list)
>>> test(demo, [], [], [3, [3]])
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (list of (int or (list of str))) (got a list)
>>> test(demo, [], [], [3, ['x', ['y']]])
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (list of (int or (list of str))) (got a list)
Tuple Type checks:
==================
>>> def demo(tuple1, tuple2, tuple3):
... assert chktype(1, tuple1, ())
... assert chktype(2, tuple2, (int,))
... assert chktype(3, tuple3, (int, (str,)))
... return 'ok'
These should be fine:
>>> demo((), (), ())
'ok'
>>> demo(('x',), (1,), (1,))
'ok'
>>> demo(('a', {}, (), 3), (1,2), (3,4))
'ok'
>>> demo((), (), (1, ('x',), 2, ('y', 'z')))
'ok'
These should raise exceptions:
>>> test(demo, [], (), ())
Traceback (most recent call last):
...
TypeError: Argument 1 to demo() must have type: tuple (got a list)
>>> test(demo, (), [], ())
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (tuple of int) (got a list)
>>> test(demo, (), (), [])
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (tuple of (int or (tuple of str))) (got a list)
>>> test(demo, {}, (), ())
Traceback (most recent call last):
...
TypeError: Argument 1 to demo() must have type: tuple (got a dict)
>>> test(demo, (), {}, ())
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (tuple of int) (got a dict)
>>> test(demo, (), (), {})
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (tuple of (int or (tuple of str))) (got a dict)
>>> test(demo, 1, (), ())
Traceback (most recent call last):
...
TypeError: Argument 1 to demo() must have type: tuple (got a int)
>>> test(demo, (), 1, ())
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (tuple of int) (got a int)
>>> test(demo, (), (), 1)
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (tuple of (int or (tuple of str))) (got a int)
>>> test(demo, (), (2,2,2.0), ())
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (tuple of int) (got a tuple)
>>> test(demo, (), (), (2,'x',2.0))
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (tuple of (int or (tuple of str))) (got a tuple)
>>> test(demo, (), (), (3, (3,)))
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (tuple of (int or (tuple of str))) (got a tuple)
>>> test(demo, (), (), (3, ('x', ('y',))))
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (tuple of (int or (tuple of str))) (got a tuple)
Dict Type checks:
=================
>>> def demo(dict1, dict2, dict3, dict4):
... assert chktype(1, dict1, {})
... assert chktype(2, dict2, {int: [int]})
... assert chktype(3, dict3, {int: [str, int],
... float: [float]})
... assert chktype(4, dict4, {(int,): [(), []],
... ((),): [(int,)]})
... return 'ok'
These should be fine:
>>> demo({}, {}, {}, {})
'ok'
>>> demo({1:'x', 'x':1}, {}, {}, {})
'ok'
>>> demo({}, {1:2, 3:5}, {}, {})
'ok'
>>> demo({}, {}, {1:'x', 1:3, 1:0, 1.1:2.1, -.2:0.0}, {})
'ok'
>>> demo({}, {}, {}, {(2,3): ('x',2), (2,3): ['x',2], ((3,'x'),): (1,3)})
'ok'
These should raise exceptions:
>>> test(demo, [], {}, {}, {})
Traceback (most recent call last):
...
TypeError: Argument 1 to demo() must have type: dictionary (got a list)
>>> test(demo, {}, [], {}, {})
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (dictionary from int to int) (got a list)
>>> test(demo, {}, {}, [], {})
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (dictionary from float to float or from int to (str or int)) (got a list)
>>> test(demo, {}, {}, {}, [])
Traceback (most recent call last):
...
TypeError: Argument 4 to demo() must have type: (dictionary from (tuple of int) to (tuple or list) or from (tuple of tuple) to (tuple of int)) (got a list)
>>> test(demo, {}, {1:'x'}, {}, {})
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (dictionary from int to int) (got a dict)
>>> test(demo, {}, {'x':1}, {}, {})
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (dictionary from int to int) (got a dict)
>>> test(demo, {}, {'x':'x'}, {}, {})
Traceback (most recent call last):
...
TypeError: Argument 2 to demo() must have type: (dictionary from int to int) (got a dict)
>>> test(demo, {}, {}, {1:1.0}, {})
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (dictionary from float to float or from int to (str or int)) (got a dict)
>>> test(demo, {}, {}, {1.0:1}, {})
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (dictionary from float to float or from int to (str or int)) (got a dict)
>>> test(demo, {}, {}, {1.0:'x'}, {})
Traceback (most recent call last):
...
TypeError: Argument 3 to demo() must have type: (dictionary from float to float or from int to (str or int)) (got a dict)
>>> test(demo, {}, {}, {}, {(): 2})
Traceback (most recent call last):
...
TypeError: Argument 4 to demo() must have type: (dictionary from (tuple of int) to (tuple or list) or from (tuple of tuple) to (tuple of int)) (got a dict)
>>> test(demo, {}, {}, {}, {3: ()})
Traceback (most recent call last):
...
TypeError: Argument 4 to demo() must have type: (dictionary from (tuple of int) to (tuple or list) or from (tuple of tuple) to (tuple of int)) (got a dict)
>>> test(demo, {}, {}, {}, {((),): [33]})
Traceback (most recent call last):
...
TypeError: Argument 4 to demo() must have type: (dictionary from (tuple of int) to (tuple or list) or from (tuple of tuple) to (tuple of int)) (got a dict)
"""
#######################################################################
# Test Runner
#######################################################################
import sys, os, os.path
if __name__ == '__main__': sys.path[0] = None
import unittest, doctest, trace
def testsuite(reload_module=False):
import doctest, nltk.test.chktype
if reload_module: reload(nltk.test.chktype)
return doctest.DocTestSuite(nltk.test.chktype)
def test(verbosity=2, reload_module=False):
runner = unittest.TextTestRunner(verbosity=verbosity)
runner.run(testsuite(reload_module))
if __name__ == '__main__':
test(reload_module=True)
|
grepme/CMPUT410Lab01
|
refs/heads/master
|
virt_env/virt1/lib/python2.7/site-packages/SQLAlchemy-0.9.8-py2.7-linux-x86_64.egg/sqlalchemy/dialects/mysql/base.py
|
22
|
# mysql/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql
:name: MySQL
Supported Versions and Features
-------------------------------
SQLAlchemy supports MySQL starting with version 4.1 through modern releases.
However, no heroic measures are taken to work around major missing
SQL features - if your server version does not support sub-selects, for
example, they won't work in SQLAlchemy either.
See the official MySQL documentation for detailed information about features
supported in any given server release.
.. _mysql_connection_timeouts:
Connection Timeouts
-------------------
MySQL features an automatic connection close behavior, for connections that
have been idle for eight hours or more. To circumvent having this issue, use
the ``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
.. _mysql_storage_engines:
CREATE TABLE arguments including Storage Engines
------------------------------------------------
MySQL's CREATE TABLE syntax includes a wide array of special options,
including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``,
``INSERT_METHOD``, and many more.
To accommodate the rendering of these arguments, specify the form
``mysql_argument_name="value"``. For example, to specify a table with
``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8``, and ``KEY_BLOCK_SIZE``
of ``1024``::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8',
mysql_key_block_size="1024"
)
The MySQL dialect will normally transfer any keyword specified as
``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
``CREATE TABLE`` statement. A handful of these names will render with a space
instead of an underscore; to support this, the MySQL dialect has awareness of
these particular names, which include ``DATA DIRECTORY``
(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g.
``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g.
``mysql_index_directory``).
The most common argument is ``mysql_engine``, which refers to the storage
engine for the table. Historically, MySQL server installations would default
to ``MyISAM`` for this value, although newer versions may be defaulting
to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support
of transactions and foreign keys.
A :class:`.Table` that is created in a MySQL database with a storage engine
of ``MyISAM`` will be essentially non-transactional, meaning any
INSERT/UPDATE/DELETE statement referring to this table will be invoked as
autocommit. It also will have no support for foreign key constraints; while
the ``CREATE TABLE`` statement accepts foreign key options, when using the
``MyISAM`` storage engine these arguments are discarded. Reflecting such a
table will also produce no foreign key constraint information.
For fully atomic transactions as well as support for foreign key
constraints, all participating ``CREATE TABLE`` statements must specify a
transactional engine, which in the vast majority of cases is ``InnoDB``.
.. seealso::
`The InnoDB Storage Engine
<http://dev.mysql.com/doc/refman/5.0/en/innodb-storage-engine.html>`_ -
on the MySQL website.
Case Sensitivity and Table Reflection
-------------------------------------
MySQL has inconsistent support for case-sensitive identifier
names, basing support on specific details of the underlying
operating system. However, it has been observed that no matter
what case sensitivity behavior is present, the names of tables in
foreign key declarations are *always* received from the database
as all-lower case, making it impossible to accurately reflect a
schema where inter-related tables use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as
all lower case both within SQLAlchemy as well as on the MySQL
database itself, especially if database reflection features are
to be used.
.. _mysql_isolation_level:
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level``
parameter which results in the command ``SET SESSION
TRANSACTION ISOLATION LEVEL <level>`` being invoked for
every new connection. Valid values for this parameter are
``READ COMMITTED``, ``READ UNCOMMITTED``,
``REPEATABLE READ``, and ``SERIALIZABLE``::
engine = create_engine(
"mysql://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
.. versionadded:: 0.7.6
AUTO_INCREMENT Behavior
-----------------------
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
the first :class:`.Integer` primary key column which is not marked as a
foreign key::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by passing ``False`` to the
:paramref:`~.Column.autoincrement` argument of :class:`.Column`. This flag
can also be used to enable auto-increment on a secondary column in a
multi-column key for some storage engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
Ansi Quoting Style
------------------
MySQL features two varieties of identifier "quoting style", one using
backticks and the other using quotes, e.g. ```some_identifier``` vs.
``"some_identifier"``. All MySQL dialects detect which version
is in use by checking the value of ``sql_mode`` when a connection is first
established with a particular :class:`.Engine`. This quoting style comes
into play when rendering table and column names as well as when reflecting
existing database structures. The detection is entirely automatic and
no special configuration is needed to use either quoting style.
.. versionchanged:: 0.6 detection of ANSI quoting style is entirely automatic,
there's no longer any end-user ``create_engine()`` options in this regard.
MySQL SQL Extensions
--------------------
Many of the MySQL SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid MySQL statement can be executed as a string as well.
Some limited direct support for MySQL extensions to SQL is currently
available.
* SELECT pragma::
select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., mysql_limit=10)
rowcount Support
----------------
SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS``
flag, or whatever is equivalent for the target dialect, upon connection.
This setting is currently hardcoded.
.. seealso::
:attr:`.ResultProxy.rowcount`
CAST Support
------------
MySQL documents the CAST operator as available in version 4.0.2. When using
the SQLAlchemy :func:`.cast` function, SQLAlchemy
will not render the CAST token on MySQL before this version, based on server
version detection, instead rendering the internal expression directly.
CAST may still not be desirable on an early MySQL version post-4.0.2, as it
didn't add all datatype support until 4.1.1. If your application falls into
this narrow area, the behavior of CAST can be controlled using the
:ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below::
from sqlalchemy.sql.expression import Cast
from sqlalchemy.ext.compiler import compiles
@compiles(Cast, 'mysql')
def _check_mysql_version(element, compiler, **kw):
if compiler.dialect.server_version_info < (4, 1, 0):
return compiler.process(element.clause, **kw)
else:
return compiler.visit_cast(element, **kw)
The above function, which only needs to be declared once
within an application, overrides the compilation of the
:func:`.cast` construct to check for version 4.1.0 before
fully rendering CAST; else the internal element of the
construct is rendered directly.
.. _mysql_indexes:
MySQL Specific Index Options
----------------------------
MySQL-specific extensions to the :class:`.Index` construct are available.
Index Length
~~~~~~~~~~~~~
MySQL provides an option to create index entries with a certain length, where
"length" refers to the number of characters or bytes in each value which will
become part of the index. SQLAlchemy provides this feature via the
``mysql_length`` parameter::
Index('my_index', my_table.c.data, mysql_length=10)
Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
'b': 9})
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument *must* be
either an integer (and, thus, specify the same prefix length value for all
columns of the index) or a dict in which keys are column names and values are
prefix length values for corresponding columns. MySQL only allows a length for
a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, VARBINARY and
BLOB.
.. versionadded:: 0.8.2 ``mysql_length`` may now be specified as a dictionary
for use with composite indexes.
Index Types
~~~~~~~~~~~~~
Some MySQL storage engines permit you to specify an index type when creating
an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash')
As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
type for your MySQL storage engine.
More information can be found at:
http://dev.mysql.com/doc/refman/5.0/en/create-index.html
http://dev.mysql.com/doc/refman/5.0/en/create-table.html
.. _mysql_foreign_keys:
MySQL Foreign Keys
------------------
MySQL's behavior regarding foreign keys has some important caveats.
Foreign Key Arguments to Avoid
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY",
or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with
:class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of
these keywords being rendered in a DDL expression, which will then raise an
error on MySQL. In order to use these keywords on a foreign key while having
them ignored on a MySQL backend, use a custom compile rule::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import ForeignKeyConstraint
@compiles(ForeignKeyConstraint, "mysql")
def process(element, compiler, **kw):
element.deferrable = element.initially = None
return compiler.visit_foreign_key_constraint(element, **kw)
.. versionchanged:: 0.9.0 - the MySQL backend no longer silently ignores
the ``deferrable`` or ``initially`` keyword arguments of
:class:`.ForeignKeyConstraint` and :class:`.ForeignKey`.
The "MATCH" keyword is in fact more insidious, and is explicitly disallowed
by SQLAlchemy in conjunction with the MySQL backend. This argument is
silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON
DELETE options also being ignored by the backend. Therefore MATCH should
never be used with the MySQL backend; as is the case with DEFERRABLE and
INITIALLY, custom compilation rules can be used to correct a MySQL
ForeignKeyConstraint at DDL definition time.
.. versionadded:: 0.9.0 - the MySQL backend will raise a
:class:`.CompileError` when the ``match`` keyword is used with
:class:`.ForeignKeyConstraint` or :class:`.ForeignKey`.
Reflection of Foreign Key Constraints
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Not all MySQL storage engines support foreign keys. When using the
very common ``MyISAM`` MySQL storage engine, the information loaded by table
reflection will not include foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
.. seealso::
:ref:`mysql_storage_engines`
.. _mysql_timestamp_null:
TIMESTAMP Columns and NULL
--------------------------
MySQL enforces that a column which specifies the TIMESTAMP datatype implicitly
includes a default value of CURRENT_TIMESTAMP, even though this is not
stated, and additionally sets the column as NOT NULL, the opposite behavior
vs. that of all other datatypes::
mysql> CREATE TABLE ts_test (
-> a INTEGER,
-> b INTEGER NOT NULL,
-> c TIMESTAMP,
-> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-> e TIMESTAMP NULL);
Query OK, 0 rows affected (0.03 sec)
mysql> SHOW CREATE TABLE ts_test;
+---------+-----------------------------------------------------
| Table | Create Table
+---------+-----------------------------------------------------
| ts_test | CREATE TABLE `ts_test` (
`a` int(11) DEFAULT NULL,
`b` int(11) NOT NULL,
`c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
`e` timestamp NULL DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
Above, we see that an INTEGER column defaults to NULL, unless it is specified
with NOT NULL. But when the column is of type TIMESTAMP, an implicit
default of CURRENT_TIMESTAMP is generated which also coerces the column
to be a NOT NULL, even though we did not specify it as such.
Therefore, the usual "NOT NULL" clause *does not apply* to a TIMESTAMP
column; MySQL selects this implicitly. SQLAlchemy therefore does not render
NOT NULL for a TIMESTAMP column on MySQL. However, it *does* render
NULL when we specify nullable=True, or if we leave nullable absent, as it
also defaults to True. This is to accommodate the essentially
reverse behavior of the NULL flag for TIMESTAMP::
from sqlalchemy import MetaData, TIMESTAMP, Integer, Table, Column, text
m = MetaData()
t = Table('ts_test', m,
Column('a', Integer),
Column('b', Integer, nullable=False),
Column('c', TIMESTAMP),
Column('d', TIMESTAMP, nullable=False),
Column('e', TIMESTAMP, nullable=True)
)
from sqlalchemy import create_engine
e = create_engine("mysql://scott:tiger@localhost/test", echo=True)
m.create_all(e)
In the output, we can see that the TIMESTAMP column receives a different
treatment for NULL / NOT NULL vs. that of the INTEGER::
CREATE TABLE ts_test (
a INTEGER,
b INTEGER NOT NULL,
c TIMESTAMP NULL,
d TIMESTAMP,
e TIMESTAMP NULL
)
MySQL above receives the NULL/NOT NULL constraint as is stated in our
original :class:`.Table`::
mysql> SHOW CREATE TABLE ts_test;
+---------+---------------------------
| Table | Create Table
+---------+---------------------------
| ts_test | CREATE TABLE `ts_test` (
`a` int(11) DEFAULT NULL,
`b` int(11) NOT NULL,
`c` timestamp NULL DEFAULT NULL,
`d` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
`e` timestamp NULL DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
Be sure to always favor the ``SHOW CREATE TABLE`` output over the
SQLAlchemy-emitted DDL when checking table definitions, as MySQL's
rules can be hard to predict.
"""
import datetime
import re
import sys
from ... import schema as sa_schema
from ... import exc, log, sql, util
from ...sql import compiler
from array import array as _array
from ...engine import reflection
from ...engine import default
from ... import types as sqltypes
from ...util import topological
from ...types import DATE, BOOLEAN, \
BLOB, BINARY, VARBINARY
RESERVED_WORDS = set(
['accessible', 'add', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check',
'collate', 'column', 'condition', 'constraint', 'continue', 'convert',
'create', 'cross', 'current_date', 'current_time', 'current_timestamp',
'current_user', 'cursor', 'database', 'databases', 'day_hour',
'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal',
'declare', 'default', 'delayed', 'delete', 'desc', 'describe',
'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop',
'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists',
'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8',
'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group',
'having', 'high_priority', 'hour_microsecond', 'hour_minute',
'hour_second', 'if', 'ignore', 'in', 'index', 'infile', 'inner', 'inout',
'insensitive', 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8',
'integer', 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys',
'kill', 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines',
'load', 'localtime', 'localtimestamp', 'lock', 'long', 'longblob',
'longtext', 'loop', 'low_priority', 'master_ssl_verify_server_cert',
'match', 'mediumblob', 'mediumint', 'mediumtext', 'middleint',
'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural',
'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize',
'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile',
'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads',
'read_only', 'read_write', 'real', 'references', 'regexp', 'release',
'rename', 'repeat', 'replace', 'require', 'restrict', 'return',
'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond',
'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial',
'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning',
'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl',
'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob',
'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo',
'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use',
'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary',
'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with',
'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0
'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1
'accessible', 'linear', 'master_ssl_verify_server_cert', 'range',
'read_only', 'read_write', # 5.1
'general', 'ignore_server_ids', 'master_heartbeat_period', 'maxvalue',
'resignal', 'signal', 'slow', # 5.5
'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot',
'partition', 'sql_after_gtids', 'sql_before_gtids', # 5.6
])
AUTOCOMMIT_RE = re.compile(
r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)',
re.I | re.UNICODE)
SET_RE = re.compile(
r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w',
re.I | re.UNICODE)
class _NumericType(object):
"""Base for MySQL numeric types.
This is the base both for NUMERIC as well as INTEGER, hence
it's a mixin.
"""
def __init__(self, unsigned=False, zerofill=False, **kw):
self.unsigned = unsigned
self.zerofill = zerofill
super(_NumericType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self,
to_inspect=[_NumericType, sqltypes.Numeric])
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and \
(
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(
precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
def __repr__(self):
return util.generic_repr(self, to_inspect=[_FloatType,
_NumericType,
sqltypes.Float])
class _IntegerType(_NumericType, sqltypes.Integer):
def __init__(self, display_width=None, **kw):
self.display_width = display_width
super(_IntegerType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self, to_inspect=[_IntegerType,
_NumericType,
sqltypes.Integer])
class _StringType(sqltypes.String):
"""Base for MySQL string types."""
def __init__(self, charset=None, collation=None,
ascii=False, binary=False, unicode=False,
national=False, **kw):
self.charset = charset
# allow collate= or collation=
kw.setdefault('collation', kw.pop('collate', collation))
self.ascii = ascii
self.unicode = unicode
self.binary = binary
self.national = national
super(_StringType, self).__init__(**kw)
def __repr__(self):
return util.generic_repr(self,
to_inspect=[_StringType, sqltypes.String])
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
__visit_name__ = 'NUMERIC'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a NUMERIC.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(NUMERIC, self).__init__(precision=precision,
scale=scale, asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
"""MySQL DECIMAL type."""
__visit_name__ = 'DECIMAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DECIMAL.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DECIMAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DOUBLE(_FloatType):
"""MySQL DOUBLE type."""
__visit_name__ = 'DOUBLE'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a DOUBLE.
.. note::
The :class:`.DOUBLE` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType, sqltypes.REAL):
"""MySQL REAL type."""
__visit_name__ = 'REAL'
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
"""Construct a REAL.
.. note::
The :class:`.REAL` type by default converts from float
to Decimal, using a truncation that defaults to 10 digits.
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
to change this scale, or ``asdecimal=False`` to return values
directly as Python floating points.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(REAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""MySQL FLOAT type."""
__visit_name__ = 'FLOAT'
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
"""Construct a FLOAT.
:param precision: Total digits in this number. If scale and precision
are both None, values are stored to limits allowed by the server.
:param scale: The number of digits after the decimal point.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(FLOAT, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
def bind_processor(self, dialect):
return None
class INTEGER(_IntegerType, sqltypes.INTEGER):
"""MySQL INTEGER type."""
__visit_name__ = 'INTEGER'
def __init__(self, display_width=None, **kw):
"""Construct an INTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(INTEGER, self).__init__(display_width=display_width, **kw)
class BIGINT(_IntegerType, sqltypes.BIGINT):
"""MySQL BIGINTEGER type."""
__visit_name__ = 'BIGINT'
def __init__(self, display_width=None, **kw):
"""Construct a BIGINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(BIGINT, self).__init__(display_width=display_width, **kw)
class MEDIUMINT(_IntegerType):
"""MySQL MEDIUMINTEGER type."""
__visit_name__ = 'MEDIUMINT'
def __init__(self, display_width=None, **kw):
"""Construct a MEDIUMINTEGER
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
class TINYINT(_IntegerType):
"""MySQL TINYINT type."""
__visit_name__ = 'TINYINT'
def __init__(self, display_width=None, **kw):
"""Construct a TINYINT.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(TINYINT, self).__init__(display_width=display_width, **kw)
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
"""MySQL SMALLINTEGER type."""
__visit_name__ = 'SMALLINT'
def __init__(self, display_width=None, **kw):
"""Construct a SMALLINTEGER.
:param display_width: Optional, maximum display width for this number.
:param unsigned: a boolean, optional.
:param zerofill: Optional. If true, values will be stored as strings
left-padded with zeros. Note that this does not effect the values
returned by the underlying database API, which continue to be
numeric.
"""
super(SMALLINT, self).__init__(display_width=display_width, **kw)
class BIT(sqltypes.TypeEngine):
"""MySQL BIT type.
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
MSTinyInteger() type.
"""
__visit_name__ = 'BIT'
def __init__(self, length=None):
"""Construct a BIT.
:param length: Optional, number of bits.
"""
self.length = length
def result_processor(self, dialect, coltype):
"""Convert a MySQL's 64 bit, variable length binary string to a long.
TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
already do this, so this logic should be moved to those dialects.
"""
def process(value):
if value is not None:
v = 0
for i in map(ord, value):
v = v << 8 | i
return v
return value
return process
class TIME(sqltypes.TIME):
"""MySQL TIME type. """
__visit_name__ = 'TIME'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8 The MySQL-specific TIME
type as well as fractional seconds support.
"""
super(TIME, self).__init__(timezone=timezone)
self.fsp = fsp
def result_processor(self, dialect, coltype):
time = datetime.time
def process(value):
# convert from a timedelta value
if value is not None:
microseconds = value.microseconds
seconds = value.seconds
minutes = seconds // 60
return time(minutes // 60,
minutes % 60,
seconds - minutes * 60,
microsecond=microseconds)
else:
return None
return process
class TIMESTAMP(sqltypes.TIMESTAMP):
"""MySQL TIMESTAMP type.
"""
__visit_name__ = 'TIMESTAMP'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL TIMESTAMP type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIMESTAMP type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
with fractional seconds support.
"""
super(TIMESTAMP, self).__init__(timezone=timezone)
self.fsp = fsp
class DATETIME(sqltypes.DATETIME):
"""MySQL DATETIME type.
"""
__visit_name__ = 'DATETIME'
def __init__(self, timezone=False, fsp=None):
"""Construct a MySQL DATETIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6.4 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the DATETIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
.. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
with fractional seconds support.
"""
super(DATETIME, self).__init__(timezone=timezone)
self.fsp = fsp
class YEAR(sqltypes.TypeEngine):
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
__visit_name__ = 'YEAR'
def __init__(self, display_width=None):
self.display_width = display_width
class TEXT(_StringType, sqltypes.TEXT):
"""MySQL TEXT type, for text up to 2^16 characters."""
__visit_name__ = 'TEXT'
def __init__(self, length=None, **kw):
"""Construct a TEXT.
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class TINYTEXT(_StringType):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
__visit_name__ = 'TINYTEXT'
def __init__(self, **kwargs):
"""Construct a TINYTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(TINYTEXT, self).__init__(**kwargs)
class MEDIUMTEXT(_StringType):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
__visit_name__ = 'MEDIUMTEXT'
def __init__(self, **kwargs):
"""Construct a MEDIUMTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(MEDIUMTEXT, self).__init__(**kwargs)
class LONGTEXT(_StringType):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
__visit_name__ = 'LONGTEXT'
def __init__(self, **kwargs):
"""Construct a LONGTEXT.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(LONGTEXT, self).__init__(**kwargs)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""MySQL VARCHAR type, for variable-length character data."""
__visit_name__ = 'VARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct a VARCHAR.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param national: Optional. If true, use the server's configured
national character set.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
__visit_name__ = 'CHAR'
def __init__(self, length=None, **kwargs):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
@classmethod
def _adapt_string_for_cast(self, type_):
# copy the given string type into a CHAR
# for the purposes of rendering a CAST expression
type_ = sqltypes.to_instance(type_)
if isinstance(type_, sqltypes.CHAR):
return type_
elif isinstance(type_, _StringType):
return CHAR(
length=type_.length,
charset=type_.charset,
collation=type_.collation,
ascii=type_.ascii,
binary=type_.binary,
unicode=type_.unicode,
national=False # not supported in CAST
)
else:
return CHAR(length=type_.length)
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
"""MySQL NVARCHAR type.
For variable-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NVARCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NVARCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NVARCHAR, self).__init__(length=length, **kwargs)
class NCHAR(_StringType, sqltypes.NCHAR):
"""MySQL NCHAR type.
For fixed-length character data in the server's configured national
character set.
"""
__visit_name__ = 'NCHAR'
def __init__(self, length=None, **kwargs):
"""Construct an NCHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
kwargs['national'] = True
super(NCHAR, self).__init__(length=length, **kwargs)
class TINYBLOB(sqltypes._Binary):
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
__visit_name__ = 'TINYBLOB'
class MEDIUMBLOB(sqltypes._Binary):
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
__visit_name__ = 'MEDIUMBLOB'
class LONGBLOB(sqltypes._Binary):
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
__visit_name__ = 'LONGBLOB'
class _EnumeratedValues(_StringType):
def _init_values(self, values, kw):
self.quoting = kw.pop('quoting', 'auto')
if self.quoting == 'auto' and len(values):
# What quoting character are we using?
q = None
for e in values:
if len(e) == 0:
self.quoting = 'unquoted'
break
elif q is None:
q = e[0]
if len(e) == 1 or e[0] != q or e[-1] != q:
self.quoting = 'unquoted'
break
else:
self.quoting = 'quoted'
if self.quoting == 'quoted':
util.warn_deprecated(
'Manually quoting %s value literals is deprecated. Supply '
'unquoted values and use the quoting= option in cases of '
'ambiguity.' % self.__class__.__name__)
values = self._strip_values(values)
self._enumerated_values = values
length = max([len(v) for v in values] + [0])
return values, length
@classmethod
def _strip_values(cls, values):
strip_values = []
for a in values:
if a[0:1] == '"' or a[0:1] == "'":
# strip enclosing quotes and unquote interior
a = a[1:-1].replace(a[0] * 2, a[0])
strip_values.append(a)
return strip_values
class ENUM(sqltypes.Enum, _EnumeratedValues):
"""MySQL ENUM type."""
__visit_name__ = 'ENUM'
def __init__(self, *enums, **kw):
"""Construct an ENUM.
E.g.::
Column('myenum', ENUM("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values will be
quoted when generating the schema according to the quoting flag (see
below).
:param strict: Defaults to False: ensure that a given value is in this
ENUM's range of permissible values when inserting or updating rows.
Note that MySQL will not raise a fatal error if you attempt to store
an out of range value- an alternate value will be stored instead.
(See MySQL ENUM documentation.)
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
"""
values, length = self._init_values(enums, kw)
self.strict = kw.pop('strict', False)
kw.pop('metadata', None)
kw.pop('schema', None)
kw.pop('name', None)
kw.pop('quote', None)
kw.pop('native_enum', None)
kw.pop('inherit_schema', None)
_StringType.__init__(self, length=length, **kw)
sqltypes.Enum.__init__(self, *values)
def __repr__(self):
return util.generic_repr(
self, to_inspect=[ENUM, _StringType, sqltypes.Enum])
def bind_processor(self, dialect):
super_convert = super(ENUM, self).bind_processor(dialect)
def process(value):
if self.strict and value is not None and value not in self.enums:
raise exc.InvalidRequestError('"%s" not a valid value for '
'this enum' % value)
if super_convert:
return super_convert(value)
else:
return value
return process
def adapt(self, cls, **kw):
if issubclass(cls, ENUM):
kw['strict'] = self.strict
return sqltypes.Enum.adapt(self, cls, **kw)
class SET(_EnumeratedValues):
"""MySQL SET type."""
__visit_name__ = 'SET'
def __init__(self, *values, **kw):
"""Construct a SET.
E.g.::
Column('myset', SET("foo", "bar", "baz"))
:param values: The range of valid values for this SET. Values will be
quoted when generating the schema according to the quoting flag (see
below).
.. versionchanged:: 0.9.0 quoting is applied automatically to
:class:`.mysql.SET` in the same way as for :class:`.mysql.ENUM`.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
:param collation: Optional, a column-level collation for this string
value. Takes precedence to 'binary' short-hand.
:param ascii: Defaults to False: short-hand for the ``latin1``
character set, generates ASCII in schema.
:param unicode: Defaults to False: short-hand for the ``ucs2``
character set, generates UNICODE in schema.
:param binary: Defaults to False: short-hand, pick the binary
collation type that matches the column's character set. Generates
BINARY in schema. This does not affect the type of data stored,
only the collation of character data.
:param quoting: Defaults to 'auto': automatically determine enum value
quoting. If all enum values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
directly when generating the schema - this usage is deprecated.
'unquoted': values in enums are not quoted, they will be escaped and
surrounded by single quotes when generating the schema.
Previous versions of this type always required manually quoted
values to be supplied; future versions will always quote the string
literals for you. This is a transitional option.
.. versionadded:: 0.9.0
"""
values, length = self._init_values(values, kw)
self.values = tuple(values)
kw.setdefault('length', length)
super(SET, self).__init__(**kw)
def result_processor(self, dialect, coltype):
def process(value):
# The good news:
# No ',' quoting issues- commas aren't allowed in SET values
# The bad news:
# Plenty of driver inconsistencies here.
if isinstance(value, set):
# ..some versions convert '' to an empty set
if not value:
value.add('')
return value
# ...and some versions return strings
if value is not None:
return set(value.split(','))
else:
return value
return process
def bind_processor(self, dialect):
super_convert = super(SET, self).bind_processor(dialect)
def process(value):
if value is None or isinstance(
value, util.int_types + util.string_types):
pass
else:
if None in value:
value = set(value)
value.remove(None)
value.add('')
value = ','.join(value)
if super_convert:
return super_convert(value)
else:
return value
return process
# old names
MSTime = TIME
MSSet = SET
MSEnum = ENUM
MSLongBlob = LONGBLOB
MSMediumBlob = MEDIUMBLOB
MSTinyBlob = TINYBLOB
MSBlob = BLOB
MSBinary = BINARY
MSVarBinary = VARBINARY
MSNChar = NCHAR
MSNVarChar = NVARCHAR
MSChar = CHAR
MSString = VARCHAR
MSLongText = LONGTEXT
MSMediumText = MEDIUMTEXT
MSTinyText = TINYTEXT
MSText = TEXT
MSYear = YEAR
MSTimeStamp = TIMESTAMP
MSBit = BIT
MSSmallInteger = SMALLINT
MSTinyInteger = TINYINT
MSMediumInteger = MEDIUMINT
MSBigInteger = BIGINT
MSNumeric = NUMERIC
MSDecimal = DECIMAL
MSDouble = DOUBLE
MSReal = REAL
MSFloat = FLOAT
MSInteger = INTEGER
colspecs = {
_IntegerType: _IntegerType,
_NumericType: _NumericType,
_FloatType: _FloatType,
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
ischema_names = {
'bigint': BIGINT,
'binary': BINARY,
'bit': BIT,
'blob': BLOB,
'boolean': BOOLEAN,
'char': CHAR,
'date': DATE,
'datetime': DATETIME,
'decimal': DECIMAL,
'double': DOUBLE,
'enum': ENUM,
'fixed': DECIMAL,
'float': FLOAT,
'int': INTEGER,
'integer': INTEGER,
'longblob': LONGBLOB,
'longtext': LONGTEXT,
'mediumblob': MEDIUMBLOB,
'mediumint': MEDIUMINT,
'mediumtext': MEDIUMTEXT,
'nchar': NCHAR,
'nvarchar': NVARCHAR,
'numeric': NUMERIC,
'set': SET,
'smallint': SMALLINT,
'text': TEXT,
'time': TIME,
'timestamp': TIMESTAMP,
'tinyblob': TINYBLOB,
'tinyint': TINYINT,
'tinytext': TINYTEXT,
'varbinary': VARBINARY,
'varchar': VARCHAR,
'year': YEAR,
}
class MySQLExecutionContext(default.DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_RE.match(statement)
class MySQLCompiler(compiler.SQLCompiler):
render_table_with_column_in_update_from = True
"""Overridden from base SQLCompiler value"""
extract_map = compiler.SQLCompiler.extract_map.copy()
extract_map.update({'milliseconds': 'millisecond'})
def visit_random_func(self, fn, **kw):
return "rand%s" % self.function_argspec(fn)
def visit_utc_timestamp_func(self, fn, **kw):
return "UTC_TIMESTAMP"
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
def visit_concat_op_binary(self, binary, operator, **kw):
return "concat(%s, %s)" % (self.process(binary.left),
self.process(binary.right))
def visit_match_op_binary(self, binary, operator, **kw):
return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % \
(self.process(binary.left), self.process(binary.right))
def get_from_hint_text(self, table, text):
return text
def visit_typeclause(self, typeclause):
type_ = typeclause.type.dialect_impl(self.dialect)
if isinstance(type_, sqltypes.Integer):
if getattr(type_, 'unsigned', False):
return 'UNSIGNED INTEGER'
else:
return 'SIGNED INTEGER'
elif isinstance(type_, sqltypes.TIMESTAMP):
return 'DATETIME'
elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime,
sqltypes.Date, sqltypes.Time)):
return self.dialect.type_compiler.process(type_)
elif isinstance(type_, sqltypes.String) \
and not isinstance(type_, (ENUM, SET)):
adapted = CHAR._adapt_string_for_cast(type_)
return self.dialect.type_compiler.process(adapted)
elif isinstance(type_, sqltypes._Binary):
return 'BINARY'
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(
type_).replace('NUMERIC', 'DECIMAL')
else:
return None
def visit_cast(self, cast, **kwargs):
# No cast until 4, no decimals until 5.
if not self.dialect._supports_cast:
return self.process(cast.clause.self_group())
type_ = self.process(cast.typeclause)
if type_ is None:
return self.process(cast.clause.self_group())
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
def render_literal_value(self, value, type_):
value = super(MySQLCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace('\\', '\\\\')
return value
def get_select_precolumns(self, select):
"""Add special MySQL keywords in place of DISTINCT.
.. note::
this usage is deprecated. :meth:`.Select.prefix_with`
should be used for special keywords at the start
of a SELECT.
"""
if isinstance(select._distinct, util.string_types):
return select._distinct.upper() + " "
elif select._distinct:
return "DISTINCT "
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return ''.join(
(self.process(join.left, asfrom=True, **kwargs),
(join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "),
self.process(join.right, asfrom=True, **kwargs),
" ON ",
self.process(join.onclause, **kwargs)))
def for_update_clause(self, select):
if select._for_update_arg.read:
return " LOCK IN SHARE MODE"
else:
return " FOR UPDATE"
def limit_clause(self, select):
# MySQL supports:
# LIMIT <limit>
# LIMIT <offset>, <limit>
# and in server versions > 3.3:
# LIMIT <limit> OFFSET <offset>
# The latter is more readable for offsets but we're stuck with the
# former until we can refine dialects by server revision.
limit, offset = select._limit, select._offset
if (limit, offset) == (None, None):
return ''
elif offset is not None:
# As suggested by the MySQL docs, need to apply an
# artificial limit if one wasn't provided
# http://dev.mysql.com/doc/refman/5.0/en/select.html
if limit is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
"18446744073709551615")
else:
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
self.process(sql.literal(limit)))
else:
# No offset provided, so just use the limit
return ' \n LIMIT %s' % (self.process(sql.literal(limit)),)
def update_limit_clause(self, update_stmt):
limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None)
if limit:
return "LIMIT %s" % limit
else:
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
for t in [from_table] + list(extra_froms))
def update_from_clause(self, update_stmt, from_table,
extra_froms, from_hints, **kw):
return None
# ug. "InnoDB needs indexes on foreign keys and referenced keys [...].
# Starting with MySQL 4.1.2, these indexes are created automatically.
# In older versions, the indexes must be created explicitly or the
# creation of foreign key constraints fails."
class MySQLDDLCompiler(compiler.DDLCompiler):
def create_table_constraints(self, table):
"""Get table constraints."""
constraint_string = super(
MySQLDDLCompiler, self).create_table_constraints(table)
# why self.dialect.name and not 'mysql'? because of drizzle
is_innodb = 'engine' in table.dialect_options[self.dialect.name] and \
table.dialect_options[self.dialect.name][
'engine'].lower() == 'innodb'
auto_inc_column = table._autoincrement_column
if is_innodb and \
auto_inc_column is not None and \
auto_inc_column is not list(table.primary_key)[0]:
if constraint_string:
constraint_string += ", \n\t"
constraint_string += "KEY %s (%s)" % (
self.preparer.quote(
"idx_autoinc_%s" % auto_inc_column.name
),
self.preparer.format_column(auto_inc_column)
)
return constraint_string
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
colspec = [self.preparer.format_column(column),
self.dialect.type_compiler.process(column.type)
]
default = self.get_column_default_string(column)
if default is not None:
colspec.append('DEFAULT ' + default)
is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP)
if not column.nullable and not is_timestamp:
colspec.append('NOT NULL')
# see: http://docs.sqlalchemy.org/en/latest/dialects/
# mysql.html#mysql_timestamp_null
elif column.nullable and is_timestamp and default is None:
colspec.append('NULL')
if column is column.table._autoincrement_column and \
column.server_default is None:
colspec.append('AUTO_INCREMENT')
return ' '.join(colspec)
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
opts = dict(
(
k[len(self.dialect.name) + 1:].upper(),
v
)
for k, v in table.kwargs.items()
if k.startswith('%s_' % self.dialect.name)
)
for opt in topological.sort([
('DEFAULT_CHARSET', 'COLLATE'),
('DEFAULT_CHARACTER_SET', 'COLLATE'),
('PARTITION_BY', 'PARTITIONS'), # only for test consistency
], opts):
arg = opts[opt]
if opt in _options_of_type_string:
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
'DEFAULT_CHARSET',
'DEFAULT_COLLATE', 'PARTITION_BY'):
opt = opt.replace('_', ' ')
joiner = '='
if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET',
'CHARACTER SET', 'COLLATE',
'PARTITION BY', 'PARTITIONS'):
joiner = ' '
table_opts.append(joiner.join((opt, arg)))
return ' '.join(table_opts)
def visit_create_index(self, create):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
table = preparer.format_table(index.table)
columns = [self.sql_compiler.process(expr, include_table=False,
literal_binds=True)
for expr in index.expressions]
name = self._prepared_index_name(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s " % (name, table)
length = index.dialect_options['mysql']['length']
if length is not None:
if isinstance(length, dict):
# length value can be a (column_name --> integer value)
# mapping specifying the prefix length for each column of the
# index
columns = ', '.join(
'%s(%d)' % (expr, length[col.name]) if col.name in length
else
(
'%s(%d)' % (expr, length[expr]) if expr in length
else '%s' % expr
)
for col, expr in zip(index.expressions, columns)
)
else:
# or can be an integer value specifying the same
# prefix length for all columns of the index
columns = ', '.join(
'%s(%d)' % (col, length)
for col in columns
)
else:
columns = ', '.join(columns)
text += '(%s)' % columns
using = index.dialect_options['mysql']['using']
if using is not None:
text += " USING %s" % (preparer.quote(using))
return text
def visit_primary_key_constraint(self, constraint):
text = super(MySQLDDLCompiler, self).\
visit_primary_key_constraint(constraint)
using = constraint.dialect_options['mysql']['using']
if using:
text += " USING %s" % (self.preparer.quote(using))
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(index,
include_schema=False),
self.preparer.format_table(index.table))
def visit_drop_constraint(self, drop):
constraint = drop.element
if isinstance(constraint, sa_schema.ForeignKeyConstraint):
qual = "FOREIGN KEY "
const = self.preparer.format_constraint(constraint)
elif isinstance(constraint, sa_schema.PrimaryKeyConstraint):
qual = "PRIMARY KEY "
const = ""
elif isinstance(constraint, sa_schema.UniqueConstraint):
qual = "INDEX "
const = self.preparer.format_constraint(constraint)
else:
qual = ""
const = self.preparer.format_constraint(constraint)
return "ALTER TABLE %s DROP %s%s" % \
(self.preparer.format_table(constraint.table),
qual, const)
def define_constraint_match(self, constraint):
if constraint.match is not None:
raise exc.CompileError(
"MySQL ignores the 'MATCH' keyword while at the same time "
"causes ON UPDATE/ON DELETE clauses to be ignored.")
return ""
class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _extend_numeric(self, type_, spec):
"Extend a numeric-type declaration with MySQL specific extensions."
if not self._mysql_type(type_):
return spec
if type_.unsigned:
spec += ' UNSIGNED'
if type_.zerofill:
spec += ' ZEROFILL'
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL CHARACTER SET /
COLLATE annotations and MySQL specific extensions.
"""
def attr(name):
return getattr(type_, name, defaults.get(name))
if attr('charset'):
charset = 'CHARACTER SET %s' % attr('charset')
elif attr('ascii'):
charset = 'ASCII'
elif attr('unicode'):
charset = 'UNICODE'
else:
charset = None
if attr('collation'):
collation = 'COLLATE %s' % type_.collation
elif attr('binary'):
collation = 'BINARY'
else:
collation = None
if attr('national'):
# NATIONAL (aka NCHAR/NVARCHAR) trumps charsets.
return ' '.join([c for c in ('NATIONAL', spec, collation)
if c is not None])
return ' '.join([c for c in (spec, charset, collation)
if c is not None])
def _mysql_type(self, type_):
return isinstance(type_, (_StringType, _NumericType))
def visit_NUMERIC(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale': type_.scale})
def visit_DECIMAL(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale': type_.scale})
def visit_DOUBLE(self, type_):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_,
"DOUBLE(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale': type_.scale})
else:
return self._extend_numeric(type_, 'DOUBLE')
def visit_REAL(self, type_):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_,
"REAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision,
'scale': type_.scale})
else:
return self._extend_numeric(type_, 'REAL')
def visit_FLOAT(self, type_):
if self._mysql_type(type_) and \
type_.scale is not None and \
type_.precision is not None:
return self._extend_numeric(
type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale))
elif type_.precision is not None:
return self._extend_numeric(type_,
"FLOAT(%s)" % (type_.precision,))
else:
return self._extend_numeric(type_, "FLOAT")
def visit_INTEGER(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "INTEGER(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "BIGINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "MEDIUMINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "MEDIUMINT")
def visit_TINYINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"TINYINT(%s)" % type_.display_width)
else:
return self._extend_numeric(type_, "TINYINT")
def visit_SMALLINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"SMALLINT(%(display_width)s)" %
{'display_width': type_.display_width}
)
else:
return self._extend_numeric(type_, "SMALLINT")
def visit_BIT(self, type_):
if type_.length is not None:
return "BIT(%s)" % type_.length
else:
return "BIT"
def visit_DATETIME(self, type_):
if getattr(type_, 'fsp', None):
return "DATETIME(%d)" % type_.fsp
else:
return "DATETIME"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
if getattr(type_, 'fsp', None):
return "TIME(%d)" % type_.fsp
else:
return "TIME"
def visit_TIMESTAMP(self, type_):
if getattr(type_, 'fsp', None):
return "TIMESTAMP(%d)" % type_.fsp
else:
return "TIMESTAMP"
def visit_YEAR(self, type_):
if type_.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % type_.display_width
def visit_TEXT(self, type_):
if type_.length:
return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
else:
return self._extend_string(type_, {}, "TEXT")
def visit_TINYTEXT(self, type_):
return self._extend_string(type_, {}, "TINYTEXT")
def visit_MEDIUMTEXT(self, type_):
return self._extend_string(type_, {}, "MEDIUMTEXT")
def visit_LONGTEXT(self, type_):
return self._extend_string(type_, {}, "LONGTEXT")
def visit_VARCHAR(self, type_):
if type_.length:
return self._extend_string(
type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_CHAR(self, type_):
if type_.length:
return self._extend_string(type_, {}, "CHAR(%(length)s)" %
{'length': type_.length})
else:
return self._extend_string(type_, {}, "CHAR")
def visit_NVARCHAR(self, type_):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
return self._extend_string(
type_, {'national': True},
"VARCHAR(%(length)s)" % {'length': type_.length})
else:
raise exc.CompileError(
"NVARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_NCHAR(self, type_):
# We'll actually generate the equiv.
# "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
return self._extend_string(
type_, {'national': True},
"CHAR(%(length)s)" % {'length': type_.length})
else:
return self._extend_string(type_, {'national': True}, "CHAR")
def visit_VARBINARY(self, type_):
return "VARBINARY(%d)" % type_.length
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_enum(self, type_):
if not type_.native_enum:
return super(MySQLTypeCompiler, self).visit_enum(type_)
else:
return self._visit_enumerated_values("ENUM", type_, type_.enums)
def visit_BLOB(self, type_):
if type_.length:
return "BLOB(%d)" % type_.length
else:
return "BLOB"
def visit_TINYBLOB(self, type_):
return "TINYBLOB"
def visit_MEDIUMBLOB(self, type_):
return "MEDIUMBLOB"
def visit_LONGBLOB(self, type_):
return "LONGBLOB"
def _visit_enumerated_values(self, name, type_, enumerated_values):
quoted_enums = []
for e in enumerated_values:
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(type_, {}, "%s(%s)" % (
name, ",".join(quoted_enums))
)
def visit_ENUM(self, type_):
return self._visit_enumerated_values("ENUM", type_,
type_._enumerated_values)
def visit_SET(self, type_):
return self._visit_enumerated_values("SET", type_,
type_._enumerated_values)
def visit_BOOLEAN(self, type):
return "BOOL"
class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect, server_ansiquotes=False, **kw):
if not server_ansiquotes:
quote = "`"
else:
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
dialect,
initial_quote=quote,
escape_quote=quote)
def _quote_free_identifiers(self, *ids):
"""Unilaterally identifier-quote any number of strings."""
return tuple([self.quote_identifier(i) for i in ids if i is not None])
@log.class_logger
class MySQLDialect(default.DefaultDialect):
"""Details of the MySQL dialect.
Not used directly in application code.
"""
name = 'mysql'
supports_alter = True
# identifiers are 64, however aliases can be 255...
max_identifier_length = 255
max_index_name_length = 64
supports_native_enum = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_multivalues_insert = True
default_paramstyle = 'format'
colspecs = colspecs
statement_compiler = MySQLCompiler
ddl_compiler = MySQLDDLCompiler
type_compiler = MySQLTypeCompiler
ischema_names = ischema_names
preparer = MySQLIdentifierPreparer
# default SQL compilation settings -
# these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
construct_arguments = [
(sa_schema.Table, {
"*": None
}),
(sql.Update, {
"limit": None
}),
(sa_schema.PrimaryKeyConstraint, {
"using": None
}),
(sa_schema.Index, {
"using": None,
"length": None,
})
]
def __init__(self, isolation_level=None, **kwargs):
kwargs.pop('use_ansiquotes', None) # legacy
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('SELECT @@tx_isolation')
val = cursor.fetchone()[0]
cursor.close()
if util.py3k and isinstance(val, bytes):
val = val.decode()
return val.upper().replace("-", " ")
def do_commit(self, dbapi_connection):
"""Execute a COMMIT."""
# COMMIT/ROLLBACK were introduced in 3.23.15.
# Yes, we have at least one user who has to talk to these old
# versions!
#
# Ignore commit/rollback if support isn't present, otherwise even
# basic operations via autocommit fail.
try:
dbapi_connection.commit()
except:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_rollback(self, dbapi_connection):
"""Execute a ROLLBACK."""
try:
dbapi_connection.rollback()
except:
if self.server_version_info < (3, 23, 15):
args = sys.exc_info()[1].args
if args and args[0] == 1064:
return
raise
def do_begin_twophase(self, connection, xid):
connection.execute(sql.text("XA BEGIN :xid"), xid=xid)
def do_prepare_twophase(self, connection, xid):
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA PREPARE :xid"), xid=xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
connection.execute(sql.text("XA END :xid"), xid=xid)
connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
connection.execute(sql.text("XA COMMIT :xid"), xid=xid)
def do_recover_twophase(self, connection):
resultset = connection.execute("XA RECOVER")
return [row['data'][0:row['gtrid_length']] for row in resultset]
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
return self._extract_error_code(e) in \
(2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
return False
def _compat_fetchall(self, rp, charset=None):
"""Proxy result rows to smooth over MySQL-Python driver
inconsistencies."""
return [_DecodingRowProxy(row, charset) for row in rp.fetchall()]
def _compat_fetchone(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
return _DecodingRowProxy(rp.fetchone(), charset)
def _compat_first(self, rp, charset=None):
"""Proxy a result row to smooth over MySQL-Python driver
inconsistencies."""
return _DecodingRowProxy(rp.first(), charset)
def _extract_error_code(self, exception):
raise NotImplementedError()
def _get_default_schema_name(self, connection):
return connection.execute('SELECT DATABASE()').scalar()
def has_table(self, connection, table_name, schema=None):
# SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly
# on macosx (and maybe win?) with multibyte table names.
#
# TODO: if this is not a problem on win, make the strategy swappable
# based on platform. DESCRIBE is slower.
# [ticket:726]
# full_name = self.identifier_preparer.format_table(table,
# use_schema=True)
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
st = "DESCRIBE %s" % full_name
rs = None
try:
try:
rs = connection.execute(st)
have = rs.fetchone() is not None
rs.close()
return have
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
return False
raise
finally:
if rs:
rs.close()
def initialize(self, connection):
self._connection_charset = self._detect_charset(connection)
self._detect_ansiquotes(connection)
if self._server_ansiquotes:
# if ansiquotes == True, build a new IdentifierPreparer
# with the new setting
self.identifier_preparer = self.preparer(
self, server_ansiquotes=self._server_ansiquotes)
default.DefaultDialect.initialize(self, connection)
@property
def _supports_cast(self):
return self.server_version_info is None or \
self.server_version_info >= (4, 0, 2)
@reflection.cache
def get_schema_names(self, connection, **kw):
rp = connection.execute("SHOW schemas")
return [r[0] for r in rp]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
rp = connection.execute(
"SHOW TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0] for
row in self._compat_fetchall(rp, charset=charset)]
else:
rp = connection.execute(
"SHOW FULL TABLES FROM %s" %
self.identifier_preparer.quote_identifier(current_schema))
return [row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] == 'BASE TABLE']
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if self.server_version_info < (5, 0, 2):
raise NotImplementedError
if schema is None:
schema = self.default_schema_name
if self.server_version_info < (5, 0, 2):
return self.get_table_names(connection, schema)
charset = self._connection_charset
rp = connection.execute(
"SHOW FULL TABLES FROM %s" %
self.identifier_preparer.quote_identifier(schema))
return [row[0]
for row in self._compat_fetchall(rp, charset=charset)
if row[1] in ('VIEW', 'SYSTEM VIEW')]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
return parsed_state.table_options
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
return parsed_state.columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
for key in parsed_state.keys:
if key['type'] == 'PRIMARY':
# There can be only one.
cols = [s[0] for s in key['columns']]
return {'constrained_columns': cols, 'name': None}
return {'constrained_columns': [], 'name': None}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
default_schema = None
fkeys = []
for spec in parsed_state.constraints:
# only FOREIGN KEYs
ref_name = spec['table'][-1]
ref_schema = len(spec['table']) > 1 and \
spec['table'][-2] or schema
if not ref_schema:
if default_schema is None:
default_schema = \
connection.dialect.default_schema_name
if schema == default_schema:
ref_schema = schema
loc_names = spec['local']
ref_names = spec['foreign']
con_kw = {}
for opt in ('onupdate', 'ondelete'):
if spec.get(opt, False):
con_kw[opt] = spec[opt]
fkey_d = {
'name': spec['name'],
'constrained_columns': loc_names,
'referred_schema': ref_schema,
'referred_table': ref_name,
'referred_columns': ref_names,
'options': con_kw
}
fkeys.append(fkey_d)
return fkeys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
indexes = []
for spec in parsed_state.keys:
unique = False
flavor = spec['type']
if flavor == 'PRIMARY':
continue
if flavor == 'UNIQUE':
unique = True
elif flavor in (None, 'FULLTEXT', 'SPATIAL'):
pass
else:
self.logger.info(
"Converting unknown KEY type %s to a plain KEY" % flavor)
pass
index_d = {}
index_d['name'] = spec['name']
index_d['column_names'] = [s[0] for s in spec['columns']]
index_d['unique'] = unique
index_d['type'] = flavor
indexes.append(index_d)
return indexes
@reflection.cache
def get_unique_constraints(self, connection, table_name,
schema=None, **kw):
parsed_state = self._parsed_state_or_create(
connection, table_name, schema, **kw)
return [
{
'name': key['name'],
'column_names': [col[0] for col in key['columns']]
}
for key in parsed_state.keys
if key['type'] == 'UNIQUE'
]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
charset = self._connection_charset
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, view_name))
sql = self._show_create_table(connection, None, charset,
full_name=full_name)
return sql
def _parsed_state_or_create(self, connection, table_name,
schema=None, **kw):
return self._setup_parser(
connection,
table_name,
schema,
info_cache=kw.get('info_cache', None)
)
@util.memoized_property
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
The deferred creation ensures that the dialect has
retrieved server version information first.
"""
if (self.server_version_info < (4, 1) and self._server_ansiquotes):
# ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1
preparer = self.preparer(self, server_ansiquotes=False)
else:
preparer = self.identifier_preparer
return MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
charset = self._connection_charset
parser = self._tabledef_parser
full_name = '.'.join(self.identifier_preparer._quote_free_identifiers(
schema, table_name))
sql = self._show_create_table(connection, None, charset,
full_name=full_name)
if sql.startswith('CREATE ALGORITHM'):
# Adapt views to something table-like.
columns = self._describe_table(connection, None, charset,
full_name=full_name)
sql = parser._describe_to_create(table_name, columns)
return parser.parse(sql, charset)
def _detect_charset(self, connection):
raise NotImplementedError()
def _detect_casing(self, connection):
"""Sniff out identifier case sensitivity.
Cached per-connection. This value can not change without a server
restart.
"""
# http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html
charset = self._connection_charset
row = self._compat_first(connection.execute(
"SHOW VARIABLES LIKE 'lower_case_table_names'"),
charset=charset)
if not row:
cs = 0
else:
# 4.0.15 returns OFF or ON according to [ticket:489]
# 3.23 doesn't, 4.0.27 doesn't..
if row[1] == 'OFF':
cs = 0
elif row[1] == 'ON':
cs = 1
else:
cs = int(row[1])
return cs
def _detect_collations(self, connection):
"""Pull the active COLLATIONS list from the server.
Cached per-connection.
"""
collations = {}
if self.server_version_info < (4, 1, 0):
pass
else:
charset = self._connection_charset
rs = connection.execute('SHOW COLLATION')
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
def _detect_ansiquotes(self, connection):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
row = self._compat_first(
connection.execute("SHOW VARIABLES LIKE 'sql_mode'"),
charset=self._connection_charset)
if not row:
mode = ''
else:
mode = row[1] or ''
# 4.0
if mode.isdigit():
mode_no = int(mode)
mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or ''
self._server_ansiquotes = 'ANSI_QUOTES' in mode
# as of MySQL 5.0.1
self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode
def _show_create_table(self, connection, table, charset=None,
full_name=None):
"""Run SHOW CREATE TABLE for a ``Table``."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "SHOW CREATE TABLE %s" % full_name
rp = None
try:
rp = connection.execute(st)
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
row = self._compat_first(rp, charset=charset)
if not row:
raise exc.NoSuchTableError(full_name)
return row[1].strip()
return sql
def _describe_table(self, connection, table, charset=None,
full_name=None):
"""Run DESCRIBE for a ``Table`` and return processed rows."""
if full_name is None:
full_name = self.identifier_preparer.format_table(table)
st = "DESCRIBE %s" % full_name
rp, rows = None, None
try:
try:
rp = connection.execute(st)
except exc.DBAPIError as e:
if self._extract_error_code(e.orig) == 1146:
raise exc.NoSuchTableError(full_name)
else:
raise
rows = self._compat_fetchall(rp, charset=charset)
finally:
if rp:
rp.close()
return rows
class ReflectedState(object):
"""Stores raw information about a SHOW CREATE TABLE statement."""
def __init__(self):
self.columns = []
self.table_options = {}
self.table_name = None
self.keys = []
self.constraints = []
@log.class_logger
class MySQLTableDefinitionParser(object):
"""Parses the results of a SHOW CREATE TABLE statement."""
def __init__(self, dialect, preparer):
self.dialect = dialect
self.preparer = preparer
self._prep_regexes()
def parse(self, show_create, charset):
state = ReflectedState()
state.charset = charset
for line in re.split(r'\r?\n', show_create):
if line.startswith(' ' + self.preparer.initial_quote):
self._parse_column(line, state)
# a regular table options line
elif line.startswith(') '):
self._parse_table_options(line, state)
# an ANSI-mode table options line
elif line == ')':
pass
elif line.startswith('CREATE '):
self._parse_table_name(line, state)
# Not present in real reflection, but may be if
# loading from a file.
elif not line:
pass
else:
type_, spec = self._parse_constraints(line)
if type_ is None:
util.warn("Unknown schema content: %r" % line)
elif type_ == 'key':
state.keys.append(spec)
elif type_ == 'constraint':
state.constraints.append(spec)
else:
pass
return state
def _parse_constraints(self, line):
"""Parse a KEY or CONSTRAINT line.
:param line: A line of SHOW CREATE TABLE output
"""
# KEY
m = self._re_key.match(line)
if m:
spec = m.groupdict()
# convert columns into name, length pairs
spec['columns'] = self._parse_keyexprs(spec['columns'])
return 'key', spec
# CONSTRAINT
m = self._re_constraint.match(line)
if m:
spec = m.groupdict()
spec['table'] = \
self.preparer.unformat_identifiers(spec['table'])
spec['local'] = [c[0]
for c in self._parse_keyexprs(spec['local'])]
spec['foreign'] = [c[0]
for c in self._parse_keyexprs(spec['foreign'])]
return 'constraint', spec
# PARTITION and SUBPARTITION
m = self._re_partition.match(line)
if m:
# Punt!
return 'partition', line
# No match.
return (None, line)
def _parse_table_name(self, line, state):
"""Extract the table name.
:param line: The first line of SHOW CREATE TABLE
"""
regex, cleanup = self._pr_name
m = regex.match(line)
if m:
state.table_name = cleanup(m.group('name'))
def _parse_table_options(self, line, state):
"""Build a dictionary of all reflected table-level options.
:param line: The final line of SHOW CREATE TABLE output.
"""
options = {}
if not line or line == ')':
pass
else:
rest_of_line = line[:]
for regex, cleanup in self._pr_options:
m = regex.search(rest_of_line)
if not m:
continue
directive, value = m.group('directive'), m.group('val')
if cleanup:
value = cleanup(value)
options[directive.lower()] = value
rest_of_line = regex.sub('', rest_of_line)
for nope in ('auto_increment', 'data directory', 'index directory'):
options.pop(nope, None)
for opt, val in options.items():
state.table_options['%s_%s' % (self.dialect.name, opt)] = val
def _parse_column(self, line, state):
"""Extract column details.
Falls back to a 'minimal support' variant if full parse fails.
:param line: Any column-bearing line from SHOW CREATE TABLE
"""
spec = None
m = self._re_column.match(line)
if m:
spec = m.groupdict()
spec['full'] = True
else:
m = self._re_column_loose.match(line)
if m:
spec = m.groupdict()
spec['full'] = False
if not spec:
util.warn("Unknown column definition %r" % line)
return
if not spec['full']:
util.warn("Incomplete reflection of column definition %r" % line)
name, type_, args, notnull = \
spec['name'], spec['coltype'], spec['arg'], spec['notnull']
try:
col_type = self.dialect.ischema_names[type_]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
col_type = sqltypes.NullType
# Column type positional arguments eg. varchar(32)
if args is None or args == '':
type_args = []
elif args[0] == "'" and args[-1] == "'":
type_args = self._re_csv_str.findall(args)
else:
type_args = [int(v) for v in self._re_csv_int.findall(args)]
# Column type keyword options
type_kw = {}
for kw in ('unsigned', 'zerofill'):
if spec.get(kw, False):
type_kw[kw] = True
for kw in ('charset', 'collate'):
if spec.get(kw, False):
type_kw[kw] = spec[kw]
if issubclass(col_type, _EnumeratedValues):
type_args = _EnumeratedValues._strip_values(type_args)
type_instance = col_type(*type_args, **type_kw)
col_args, col_kw = [], {}
# NOT NULL
col_kw['nullable'] = True
if spec.get('notnull', False):
col_kw['nullable'] = False
# AUTO_INCREMENT
if spec.get('autoincr', False):
col_kw['autoincrement'] = True
elif issubclass(col_type, sqltypes.Integer):
col_kw['autoincrement'] = False
# DEFAULT
default = spec.get('default', None)
if default == 'NULL':
# eliminates the need to deal with this later.
default = None
col_d = dict(name=name, type=type_instance, default=default)
col_d.update(col_kw)
state.columns.append(col_d)
def _describe_to_create(self, table_name, columns):
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
DESCRIBE is a much simpler reflection and is sufficient for
reflecting views for runtime use. This method formats DDL
for columns only- keys are omitted.
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
SHOW FULL COLUMNS FROM rows must be rearranged for use with
this function.
"""
buffer = []
for row in columns:
(name, col_type, nullable, default, extra) = \
[row[i] for i in (0, 1, 2, 4, 5)]
line = [' ']
line.append(self.preparer.quote_identifier(name))
line.append(col_type)
if not nullable:
line.append('NOT NULL')
if default:
if 'auto_increment' in default:
pass
elif (col_type.startswith('timestamp') and
default.startswith('C')):
line.append('DEFAULT')
line.append(default)
elif default == 'NULL':
line.append('DEFAULT')
line.append(default)
else:
line.append('DEFAULT')
line.append("'%s'" % default.replace("'", "''"))
if extra:
line.append(extra)
buffer.append(' '.join(line))
return ''.join([('CREATE TABLE %s (\n' %
self.preparer.quote_identifier(table_name)),
',\n'.join(buffer),
'\n) '])
def _parse_keyexprs(self, identifiers):
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
return self._re_keyexprs.findall(identifiers)
def _prep_regexes(self):
"""Pre-compile regular expressions."""
self._re_columns = []
self._pr_options = []
_final = self.preparer.final_quote
quotes = dict(zip(('iq', 'fq', 'esc_fq'),
[re.escape(s) for s in
(self.preparer.initial_quote,
_final,
self.preparer._escape_identifier(_final))]))
self._pr_name = _pr_compile(
r'^CREATE (?:\w+ +)?TABLE +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
self.preparer._unescape_identifier)
# `col`,`col2`(32),`col3`(15) DESC
#
# Note: ASC and DESC aren't reflected, so we'll punt...
self._re_keyexprs = _re_compile(
r'(?:'
r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
# 123 or 123,456
self._re_csv_int = _re_compile(r'\d+')
# `colname` <type> [type opts]
# (NOT NULL | NULL)
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
# COMMENT 'comment'
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
# STORAGE (DISK|MEMORY)
self._re_column = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
r'(?: +(?P<unsigned>UNSIGNED))?'
r'(?: +(?P<zerofill>ZEROFILL))?'
r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
r'(?: +COLLATE +(?P<collate>[\w_]+))?'
r'(?: +(?P<notnull>NOT NULL))?'
r'(?: +DEFAULT +(?P<default>'
r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
r'(?: +ON UPDATE \w+)?)'
r'))?'
r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
r'(?: +STORAGE +(?P<storage>\w+))?'
r'(?: +(?P<extra>.*))?'
r',?$'
% quotes
)
# Fallback, try to parse as little as possible
self._re_column_loose = _re_compile(
r' '
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'(?P<coltype>\w+)'
r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
r'.*?(?P<notnull>NOT NULL)?'
% quotes
)
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
# KEY_BLOCK_SIZE size | WITH PARSER name
self._re_key = _re_compile(
r' '
r'(?:(?P<type>\S+) )?KEY'
r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
r'(?: +USING +(?P<using_pre>\S+))?'
r' +\((?P<columns>.+?)\)'
r'(?: +USING +(?P<using_post>\S+))?'
r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?'
r'(?: +WITH PARSER +(?P<parser>\S+))?'
r',?$'
% quotes
)
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
# REFERENCES `remote` (`remote_col`)
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
# ON DELETE CASCADE ON UPDATE RESTRICT
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
self._re_constraint = _re_compile(
r' '
r'CONSTRAINT +'
r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
r'FOREIGN KEY +'
r'\((?P<local>[^\)]+?)\) REFERENCES +'
r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s'
r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
r'\((?P<foreign>[^\)]+?)\)'
r'(?: +(?P<match>MATCH \w+))?'
r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
% kw
)
# PARTITION
#
# punt!
self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
# Table-level options (COLLATE, ENGINE, etc.)
# Do the string options first, since they have quoted
# strings we need to get rid of.
for option in _options_of_type_string:
self._add_option_string(option)
for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
'AVG_ROW_LENGTH', 'CHARACTER SET',
'DEFAULT CHARSET', 'CHECKSUM',
'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
'KEY_BLOCK_SIZE'):
self._add_option_word(option)
self._add_option_regex('UNION', r'\([^\)]+\)')
self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
self._add_option_regex(
'RAID_TYPE',
r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
_optional_equals = r'(?:\s*(?:=\s*)|\s+)'
def _add_option_string(self, directive):
regex = (r'(?P<directive>%s)%s'
r"'(?P<val>(?:[^']|'')*?)'(?!')" %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(
regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
))
def _add_option_word(self, directive):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>\w+)' %
(re.escape(directive), self._optional_equals))
self._pr_options.append(_pr_compile(regex))
def _add_option_regex(self, directive, regex):
regex = (r'(?P<directive>%s)%s'
r'(?P<val>%s)' %
(re.escape(directive), self._optional_equals, regex))
self._pr_options.append(_pr_compile(regex))
_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
'PASSWORD', 'CONNECTION')
class _DecodingRowProxy(object):
"""Return unicode-decoded values based on type inspection.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
self.charset = charset
def __getitem__(self, index):
item = self.rowproxy[index]
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr):
item = getattr(self.rowproxy, attr)
if isinstance(item, _array):
item = item.tostring()
if self.charset and isinstance(item, util.binary_type):
return item.decode(self.charset)
else:
return item
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup)
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE)
|
rickerc/neutron_audit
|
refs/heads/cis-havana-staging
|
neutron/plugins/hyperv/model.py
|
21
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Alessandro Pilotti, Cloudbase Solutions Srl
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from neutron.db.models_v2 import model_base
class VlanAllocation(model_base.BASEV2):
"""Represents allocation state of vlan_id on physical network."""
__tablename__ = 'hyperv_vlan_allocations'
physical_network = Column(String(64), nullable=False, primary_key=True)
vlan_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, physical_network, vlan_id):
self.physical_network = physical_network
self.vlan_id = vlan_id
self.allocated = False
class NetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical realization."""
__tablename__ = 'hyperv_network_bindings'
network_id = Column(String(36),
ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = Column(String(32), nullable=False)
physical_network = Column(String(64))
segmentation_id = Column(Integer)
def __init__(self, network_id, network_type, physical_network,
segmentation_id):
self.network_id = network_id
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
|
ademmers/ansible
|
refs/heads/devel
|
test/units/module_utils/urls/test_urls.py
|
74
|
# -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils import urls
from ansible.module_utils._text import to_native
import pytest
def test_build_ssl_validation_error(mocker):
mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=False)
mocker.patch.object(urls, 'HAS_URLLIB3_PYOPENSSLCONTEXT', new=False)
mocker.patch.object(urls, 'HAS_URLLIB3_SSL_WRAP_SOCKET', new=False)
with pytest.raises(urls.SSLValidationError) as excinfo:
urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None)
assert 'python >= 2.7.9' in to_native(excinfo.value)
assert 'the python executable used' in to_native(excinfo.value)
assert 'urllib3' in to_native(excinfo.value)
assert 'python >= 2.6' in to_native(excinfo.value)
assert 'validate_certs=False' in to_native(excinfo.value)
mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=True)
with pytest.raises(urls.SSLValidationError) as excinfo:
urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None)
assert 'validate_certs=False' in to_native(excinfo.value)
mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=False)
mocker.patch.object(urls, 'HAS_URLLIB3_PYOPENSSLCONTEXT', new=True)
mocker.patch.object(urls, 'HAS_URLLIB3_SSL_WRAP_SOCKET', new=True)
mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=True)
with pytest.raises(urls.SSLValidationError) as excinfo:
urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None)
assert 'urllib3' not in to_native(excinfo.value)
with pytest.raises(urls.SSLValidationError) as excinfo:
urls.build_ssl_validation_error('hostname', 'port', 'paths', exc='BOOM')
assert 'BOOM' in to_native(excinfo.value)
def test_maybe_add_ssl_handler(mocker):
mocker.patch.object(urls, 'HAS_SSL', new=False)
with pytest.raises(urls.NoSSLError):
urls.maybe_add_ssl_handler('https://ansible.com/', True)
mocker.patch.object(urls, 'HAS_SSL', new=True)
url = 'https://user:passwd@ansible.com/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == 'ansible.com'
assert handler.port == 443
url = 'https://ansible.com:4433/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == 'ansible.com'
assert handler.port == 4433
url = 'https://user:passwd@ansible.com:4433/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == 'ansible.com'
assert handler.port == 4433
url = 'https://ansible.com/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == 'ansible.com'
assert handler.port == 443
url = 'http://ansible.com/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler is None
url = 'https://[2a00:16d8:0:7::205]:4443/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == '2a00:16d8:0:7::205'
assert handler.port == 4443
url = 'https://[2a00:16d8:0:7::205]/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == '2a00:16d8:0:7::205'
assert handler.port == 443
def test_basic_auth_header():
header = urls.basic_auth_header('user', 'passwd')
assert header == b'Basic dXNlcjpwYXNzd2Q='
def test_ParseResultDottedDict():
url = 'https://ansible.com/blog'
parts = urls.urlparse(url)
dotted_parts = urls.ParseResultDottedDict(parts._asdict())
assert parts[0] == dotted_parts.scheme
assert dotted_parts.as_list() == list(parts)
def test_unix_socket_patch_httpconnection_connect(mocker):
unix_conn = mocker.patch.object(urls.UnixHTTPConnection, 'connect')
conn = urls.httplib.HTTPConnection('ansible.com')
with urls.unix_socket_patch_httpconnection_connect():
conn.connect()
assert unix_conn.call_count == 1
|
oxfordinternetinstitute/scriptingcourse
|
refs/heads/master
|
DSR-week 4/EX_wk4_regex_exampleCode.py
|
1
|
#!/usr/bin/env python
# encoding: utf-8
# Wikilinks downloader and parser
# The purpose of this script is to take a page in Wikipedia, parse the links within
# and store them in a database.
#
# It demonstrates the joint use of:
# BeautifulStoneSoup - an xml parser
# sqlalchemy - an interface to the lightweight sqlite
# regular expressions
# Object-oriented classes
#
# Author: Bernie Hogan
# Version: 1.1
# February 11, 2014
from BeautifulSoup import BeautifulStoneSoup #drop beautifulsoup.py into your directory or pip
from sqlalchemy import * # pip install sqlalchemy
import urllib2,urllib
import re, os
import sys
def getWikiPage(page):
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', "OIItestWikibot/1.0")]
url = "http://en.wikipedia.org/wiki/Special:Export/%s" % urllib.quote(page)
return opener.open(url).read()
def getTextFromWikiPage(fullpage):
# x = page.decode('utf-8','ignore')
# x = x.encode('ascii','ignore')
soup = BeautifulStoneSoup(fullpage)
print soup.mediawiki.page.revision.id
return soup.mediawiki.page.text
def getWikiLinks(cookedtext):
# Here is the regular expression. Note, it is not robust.
# SIMPLE
wikileeks = re.compile(r'\[\[.*?]\]')
# print wikileeks.findall(cookedtext)
# LESS SIMPLE - http links
# wikileeks = re.compile("http://[\w\./?&=%]*")
return wikileeks.findall(cookedtext)
def getLinksFromPage(wikipage):
page = getWikiPage(wikipage)
text = getTextFromWikiPage(page)
return getWikiLinks(text)
class newdb:
def __init__ (self,dbname):
dbexists = False
if os.path.exists(os.getcwd() + os.sep + dbname + ".db"):
dbexists = True
self.db = create_engine('sqlite:///%s.db' % dbname)
self.connection = self.db.connect()
self.metadata = MetaData(self.db)
self.link_table = Table('page',
self.metadata,
Column('ArticleName', String(256)),
Column('Wikilink', String(256)),
Column('LinkKey', String(256), unique=True,
primary_key=True),keep_existing=True,)
if not dbexists:
self.link_table.create()
def insertLink(self,articlename,wikilink):
ins = self.link_table.insert(prefixes=['OR IGNORE']).values(ArticleName=articlename,
Wikilink=wikilink,LinkKey = articlename + "::" + wikilink)
self.connection.execute(ins)
def getLinksFromDB(self,getthisrow):
sel = select([self.link_table.c.Wikilink,self.link_table.c.ArticleName],
self.link_table.c.ArticleName == getthisrow)
result = self.connection.execute(sel)
for row in result:
print row
db = newdb("wikilinks")
page = "Canada"
# Here we are getting the list of links
links = getLinksFromPage(page)
# Now we clean them up.
# First, get rid of the brackets.
links = [x[2:-2] for x in links]
# Second, get rid of the text after the pipe
links = [x.split("|")[0] for x in links]
for c,i in enumerate(links):
db.insertLink(page,i)
db.getLinksFromDB(page)
|
jerroydmoore/YARB
|
refs/heads/master
|
nlu/__init__.py
|
12133432
| |
reinout/django
|
refs/heads/master
|
tests/forms_tests/tests/__init__.py
|
12133432
| |
shownomercy/django
|
refs/heads/master
|
tests/fixtures/__init__.py
|
12133432
| |
blstream/ut-arena
|
refs/heads/master
|
ut_arena_py_api/apps/__init__.py
|
12133432
| |
mugurrus/ally-py-common
|
refs/heads/master
|
patch-praha/acl/core_patch/__init__.py
|
12133432
| |
zhangjunli177/sahara
|
refs/heads/master
|
sahara/plugins/cdh/client/__init__.py
|
12133432
| |
google-research/understanding-transfer-learning
|
refs/heads/main
|
imagenet_train.py
|
1
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.tensorboard
from tqdm import tqdm
import gin
import libutil
import libdata
import libmodel
import libtrain
import configs
def main():
parser = libutil.flags.get_argparser()
args = parser.parse_args()
my_cfg = configs.Registry.get_config(args.config_key)
libutil.setup.setup(args, my_cfg)
run_train(args)
@gin.configurable('data')
def get_data(name=gin.REQUIRED, configs=None):
return libdata.torchvision_get_data(name, configs)
@gin.configurable('train', blacklist=['args'])
def run_train(args, batch_size=gin.REQUIRED, epochs=gin.REQUIRED, finetune_from=None, mixup_alpha=0.7, data_workers=32):
loaded_data, data_meta = get_data()
model = libmodel.build_model(num_classes=data_meta['num_classes'])
if finetune_from:
libtrain.load_finetune_init(model, finetune_from)
else:
logging.info('No finetune init weights specified.')
cel = nn.CrossEntropyLoss()
def criterion(pred, target, lam):
"""Criterion suitable for mixup training."""
return (-F.log_softmax(pred, dim=1) * torch.zeros(pred.size()).cuda().scatter_(1, target.data.view(-1, 1), lam.view(-1, 1))).sum(dim=1).mean()
optimizer = libtrain.make_optimizer(model)
lr_scheduler = libtrain.make_lr_scheduler(optimizer)
weight_regularizers = libtrain.make_weight_regularizers(model)
train_loader = torch.utils.data.DataLoader(
loaded_data['train'], batch_size=batch_size, shuffle=True,
num_workers=data_workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
loaded_data['test'], batch_size=batch_size, shuffle=False,
num_workers=data_workers, pin_memory=False)
data_to_eval = [('test', val_loader)]
if loaded_data['eval_mode_train'] is not None:
eval_mode_train_loader = torch.utils.data.DataLoader(
loaded_data['eval_mode_train'], batch_size=batch_size, shuffle=False,
num_workers=data_workers, pin_memory=False)
data_to_eval.append(('train', eval_mode_train_loader))
tb_writer = torch.utils.tensorboard.SummaryWriter(os.path.join(args.work_dir, 'train_tensorboard'))
def eval_and_save_ckpt(epoch, best_acc1):
for name, loader in data_to_eval:
logging.info(f'E{epoch:03d} Evaluating {name}...')
results = libtrain.imagenet_test_epoch(loader, model, cel)
logging.info(f'E{epoch:03d} eval-{name}: Acc@1 {results["top1"]:.3f} Loss {results["loss"]:.4f}')
tb_writer.add_scalar(f'eval/{name}_acc', results['top1'], epoch)
if name == 'test':
is_best = results['top1'] > best_acc1
best_acc1 = max(results['top1'], best_acc1)
libtrain.save_checkpoint({
'epoch': epoch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer' : optimizer.state_dict(),
}, is_best=is_best, ckpt_dir=os.path.join(args.work_dir, 'checkpoints'))
return best_acc1
best_acc1 = eval_and_save_ckpt(epoch=0, best_acc1=-float('inf'))
for epoch in range(epochs):
for i_grp, param_group in enumerate(optimizer.param_groups):
tb_writer.add_scalar(f'learning_rate/group{i_grp}', param_group['lr'], epoch + 1)
# train for one epoch
train_epoch(train_loader, model, criterion, optimizer, epoch, mixup_alpha, weight_regularizers, tb_writer)
best_acc1 = eval_and_save_ckpt(epoch=epoch+1, best_acc1=best_acc1)
if lr_scheduler is not None:
lr_scheduler.step()
tb_writer.flush()
tb_writer.close()
def train_epoch(train_loader, model, criterion, optimizer, epoch, mixup_alpha, weight_regularizers, tb_writer=None):
batch_time = libtrain.AverageMeter()
data_time = libtrain.AverageMeter()
losses = libtrain.AverageMeter()
top1 = libtrain.AverageMeter()
# switch to train mode
model.train()
end = time.time()
the_tqdm = tqdm(train_loader, disable=None, desc=f'Train E{epoch+1:03d}')
for i, (inputs, targets) in enumerate(the_tqdm):
# measure data loading time
data_time.update(time.time() - end)
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
inputs, targets_a, targets_b, lam = libutil.mixup.mixup_data(inputs, targets, mixup_alpha, use_cuda=True)
# compute output
output = model(inputs)
loss_func = libutil.mixup.mixup_criterion(targets_a, targets_b, lam)
loss = loss_func(criterion, output)
loss += libtrain.eval_weight_regularizers(weight_regularizers)
# measure accuracy and record loss
acc1 = libtrain.calc_accuracy(output, targets, topk=(1,))[0]
losses.update(loss.item(), inputs.size(0))
top1.update(acc1.item(), inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
the_tqdm.set_description(f'Train E{epoch+1:03d} Acc={top1.avg:.3f} Loss={losses.avg:.4f}')
logging.info(f'E{epoch:03d} train: Acc@1 {top1.avg:.3f} Loss {losses.avg:.4f}')
if tb_writer is not None:
tb_writer.add_scalar('train/loss', losses.avg, epoch + 1)
tb_writer.add_scalar('train/acc', top1.avg, epoch + 1)
if __name__ == '__main__':
main()
|
zdszxp/gamesrc
|
refs/heads/master
|
Trdlib/src/boost_1_60_0/libs/python/test/staticmethod.py
|
46
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from staticmethod_ext import *
>>> class X1(X):
... pass
>>> x = X(16)
>>> x1 = X1(17)
>>> x1.count()
2
>>> x.count()
2
>>> X1.count()
2
>>> X.count()
2
>>> x1.magic()
7654321
>>> x.magic()
7654321
>>> X1.magic()
7654321
>>> X.magic()
7654321
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
anantk17/flask-python
|
refs/heads/master
|
db_repository/versions/002_migration.py
|
134
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
post = Table('post', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('body', String(length=140)),
Column('timestamp', DateTime),
Column('user_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['post'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['post'].drop()
|
tmpgit/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyCompatibilityInspection/raiseStatement.py
|
45
|
try:
a
except :
<warning descr="Python version 3.0, 3.1, 3.2, 3.3, 3.4, 3.5 do not support this syntax.">raise ImportError, ImportWarning</warning>
<warning descr="Python version 3.0, 3.1, 3.2, 3.3, 3.4, 3.5 do not support this syntax. Raise with no arguments can only be used in an except block">raise</warning>
|
pawelmhm/scrapy
|
refs/heads/master
|
tests/CrawlerProcess/default_name_resolver.py
|
5
|
import scrapy
from scrapy.crawler import CrawlerProcess
class IPv6Spider(scrapy.Spider):
"""
Raises a twisted.internet.error.DNSLookupError:
the default name resolver does not handle IPv6 addresses.
"""
name = "ipv6_spider"
start_urls = ["http://[::1]"]
if __name__ == "__main__":
process = CrawlerProcess(settings={"RETRY_ENABLED": False})
process.crawl(IPv6Spider)
process.start()
|
dcolligan/server
|
refs/heads/master
|
oidc-provider/simple_op/src/provider/authn/user_pass.py
|
4
|
import json
from oic.utils.http_util import Response
from provider.authn import make_cls_from_name, AuthnModule
class UserPass(AuthnModule):
url_endpoint = "/user_pass/verify"
def __init__(
self, db, template_env, template="user_pass.jinja2", **kwargs):
super(UserPass, self).__init__(None)
self.template_env = template_env
self.template = template
cls = make_cls_from_name(db["class"])
self.user_db = cls(**db["kwargs"])
self.kwargs = kwargs
self.kwargs.setdefault("page_header", "Log in")
self.kwargs.setdefault("user_label", "Username")
self.kwargs.setdefault("passwd_label", "Password")
self.kwargs.setdefault("submit_btn", "Log in")
def __call__(self, *args, **kwargs):
template = self.template_env.get_template(self.template)
return Response(template.render(action=self.url_endpoint,
state=json.dumps(kwargs),
**self.kwargs))
def verify(self, *args, **kwargs):
username = kwargs["username"]
if username in self.user_db and self.user_db[username] == kwargs[
"password"]:
return username, True
else:
return self.FAILED_AUTHN
|
wbbradley/django-webtest
|
refs/heads/master
|
django_webtest_tests/testapp_tests/middleware.py
|
15
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
class UserMiddleware(object):
def process_request(self, request):
request.user.processed = True
|
wwitzel3/awx
|
refs/heads/devel
|
awx/main/models/credential/__init__.py
|
1
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
from collections import OrderedDict
import functools
import logging
import os
import re
import stat
import tempfile
import six
# Jinja2
from jinja2 import Template
# Django
from django.db import models
from django.utils.translation import ugettext_lazy as _, ugettext_noop
from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
# AWX
from awx.api.versioning import reverse
from awx.main.fields import (ImplicitRoleField, CredentialInputField,
CredentialTypeInputField,
CredentialTypeInjectorField)
from awx.main.utils import decrypt_field
from awx.main.utils.safe_yaml import safe_dump
from awx.main.validators import validate_ssh_private_key
from awx.main.models.base import * # noqa
from awx.main.models.mixins import ResourceMixin
from awx.main.models.rbac import (
ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
ROLE_SINGLETON_SYSTEM_AUDITOR,
)
from awx.main.utils import encrypt_field
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS
from . import injectors as builtin_injectors
__all__ = ['Credential', 'CredentialType', 'V1Credential', 'build_safe_env']
logger = logging.getLogger('awx.main.models.credential')
HIDDEN_PASSWORD = '**********'
def build_safe_env(env):
'''
Build environment dictionary, hiding potentially sensitive information
such as passwords or keys.
'''
hidden_re = re.compile(r'API|TOKEN|KEY|SECRET|PASS', re.I)
urlpass_re = re.compile(r'^.*?://[^:]+:(.*?)@.*?$')
safe_env = dict(env)
for k, v in safe_env.items():
if k == 'AWS_ACCESS_KEY_ID':
continue
elif k.startswith('ANSIBLE_') and not k.startswith('ANSIBLE_NET'):
continue
elif hidden_re.search(k):
safe_env[k] = HIDDEN_PASSWORD
elif type(v) == str and urlpass_re.match(v):
safe_env[k] = urlpass_re.sub(HIDDEN_PASSWORD, v)
return safe_env
class V1Credential(object):
#
# API v1 backwards compat; as long as we continue to support the
# /api/v1/credentials/ endpoint, we'll keep these definitions around.
# The credential serializers are smart enough to detect the request
# version and use *these* fields for constructing the serializer if the URL
# starts with /api/v1/
#
PASSWORD_FIELDS = ('password', 'security_token', 'ssh_key_data',
'ssh_key_unlock', 'become_password',
'vault_password', 'secret', 'authorize_password')
KIND_CHOICES = [
('ssh', 'Machine'),
('net', 'Network'),
('scm', 'Source Control'),
('aws', 'Amazon Web Services'),
('vmware', 'VMware vCenter'),
('satellite6', 'Red Hat Satellite 6'),
('cloudforms', 'Red Hat CloudForms'),
('gce', 'Google Compute Engine'),
('azure_rm', 'Microsoft Azure Resource Manager'),
('openstack', 'OpenStack'),
('rhv', 'Red Hat Virtualization'),
('insights', 'Insights'),
('tower', 'Ansible Tower'),
]
FIELDS = {
'kind': models.CharField(
max_length=32,
choices=[
(kind[0], _(kind[1]))
for kind in KIND_CHOICES
],
default='ssh',
),
'cloud': models.BooleanField(
default=False,
editable=False,
),
'host': models.CharField(
blank=True,
default='',
max_length=1024,
verbose_name=_('Host'),
help_text=_('The hostname or IP address to use.'),
),
'username': models.CharField(
blank=True,
default='',
max_length=1024,
verbose_name=_('Username'),
help_text=_('Username for this credential.'),
),
'password': models.CharField(
blank=True,
default='',
max_length=1024,
verbose_name=_('Password'),
help_text=_('Password for this credential (or "ASK" to prompt the '
'user for machine credentials).'),
),
'security_token': models.CharField(
blank=True,
default='',
max_length=1024,
verbose_name=_('Security Token'),
help_text=_('Security Token for this credential'),
),
'project': models.CharField(
blank=True,
default='',
max_length=100,
verbose_name=_('Project'),
help_text=_('The identifier for the project.'),
),
'domain': models.CharField(
blank=True,
default='',
max_length=100,
verbose_name=_('Domain'),
help_text=_('The identifier for the domain.'),
),
'ssh_key_data': models.TextField(
blank=True,
default='',
verbose_name=_('SSH private key'),
help_text=_('RSA or DSA private key to be used instead of password.'),
),
'ssh_key_unlock': models.CharField(
max_length=1024,
blank=True,
default='',
verbose_name=_('SSH key unlock'),
help_text=_('Passphrase to unlock SSH private key if encrypted (or '
'"ASK" to prompt the user for machine credentials).'),
),
'become_method': models.CharField(
max_length=32,
blank=True,
default='',
choices=CHOICES_PRIVILEGE_ESCALATION_METHODS,
help_text=_('Privilege escalation method.')
),
'become_username': models.CharField(
max_length=1024,
blank=True,
default='',
help_text=_('Privilege escalation username.'),
),
'become_password': models.CharField(
max_length=1024,
blank=True,
default='',
help_text=_('Password for privilege escalation method.')
),
'vault_password': models.CharField(
max_length=1024,
blank=True,
default='',
help_text=_('Vault password (or "ASK" to prompt the user).'),
),
'authorize': models.BooleanField(
default=False,
help_text=_('Whether to use the authorize mechanism.'),
),
'authorize_password': models.CharField(
max_length=1024,
blank=True,
default='',
help_text=_('Password used by the authorize mechanism.'),
),
'client': models.CharField(
max_length=128,
blank=True,
default='',
help_text=_('Client Id or Application Id for the credential'),
),
'secret': models.CharField(
max_length=1024,
blank=True,
default='',
help_text=_('Secret Token for this credential'),
),
'subscription': models.CharField(
max_length=1024,
blank=True,
default='',
help_text=_('Subscription identifier for this credential'),
),
'tenant': models.CharField(
max_length=1024,
blank=True,
default='',
help_text=_('Tenant identifier for this credential'),
)
}
class Credential(PasswordFieldsModel, CommonModelNameNotUnique, ResourceMixin):
'''
A credential contains information about how to talk to a remote resource
Usually this is a SSH key location, and possibly an unlock password.
If used with sudo, a sudo password should be set if required.
'''
class Meta:
app_label = 'main'
ordering = ('name',)
unique_together = (('organization', 'name', 'credential_type'))
PASSWORD_FIELDS = ['inputs']
credential_type = models.ForeignKey(
'CredentialType',
related_name='credentials',
null=False,
help_text=_('Specify the type of credential you want to create. Refer '
'to the Ansible Tower documentation for details on each type.')
)
organization = models.ForeignKey(
'Organization',
null=True,
default=None,
blank=True,
on_delete=models.CASCADE,
related_name='credentials',
)
inputs = CredentialInputField(
blank=True,
default={},
help_text=_('Enter inputs using either JSON or YAML syntax. Use the '
'radio button to toggle between the two. Refer to the '
'Ansible Tower documentation for example syntax.')
)
admin_role = ImplicitRoleField(
parent_role=[
'singleton:' + ROLE_SINGLETON_SYSTEM_ADMINISTRATOR,
'organization.credential_admin_role',
],
)
use_role = ImplicitRoleField(
parent_role=[
'admin_role',
]
)
read_role = ImplicitRoleField(parent_role=[
'singleton:' + ROLE_SINGLETON_SYSTEM_AUDITOR,
'organization.auditor_role',
'use_role',
'admin_role',
])
def __getattr__(self, item):
if item != 'inputs':
if item in V1Credential.FIELDS:
return self.inputs.get(item, V1Credential.FIELDS[item].default)
elif item in self.inputs:
return self.inputs[item]
raise AttributeError(item)
def __setattr__(self, item, value):
if item in V1Credential.FIELDS and item in self.credential_type.defined_fields:
if value:
self.inputs[item] = value
elif item in self.inputs:
del self.inputs[item]
return
super(Credential, self).__setattr__(item, value)
@property
def kind(self):
# TODO 3.3: remove the need for this helper property by removing its
# usage throughout the codebase
type_ = self.credential_type
if type_.kind != 'cloud':
return type_.kind
for field in V1Credential.KIND_CHOICES:
kind, name = field
if name == type_.name:
return kind
@property
def cloud(self):
return self.credential_type.kind == 'cloud'
def get_absolute_url(self, request=None):
return reverse('api:credential_detail', kwargs={'pk': self.pk}, request=request)
#
# TODO: the SSH-related properties below are largely used for validation
# and for determining passwords necessary for job/ad-hoc launch
#
# These are SSH-specific; should we move them elsewhere?
#
@property
def needs_ssh_password(self):
return self.credential_type.kind == 'ssh' and self.password == 'ASK'
@property
def has_encrypted_ssh_key_data(self):
if self.pk:
ssh_key_data = decrypt_field(self, 'ssh_key_data')
else:
ssh_key_data = self.ssh_key_data
try:
pem_objects = validate_ssh_private_key(ssh_key_data)
for pem_object in pem_objects:
if pem_object.get('key_enc', False):
return True
except ValidationError:
pass
return False
@property
def needs_ssh_key_unlock(self):
if self.credential_type.kind == 'ssh' and self.ssh_key_unlock in ('ASK', ''):
return self.has_encrypted_ssh_key_data
return False
@property
def needs_become_password(self):
return self.credential_type.kind == 'ssh' and self.become_password == 'ASK'
@property
def needs_vault_password(self):
return self.credential_type.kind == 'vault' and self.vault_password == 'ASK'
@property
def passwords_needed(self):
needed = []
for field in ('ssh_password', 'become_password', 'ssh_key_unlock'):
if getattr(self, 'needs_%s' % field):
needed.append(field)
if self.needs_vault_password:
if self.inputs.get('vault_id'):
needed.append('vault_password.{}'.format(self.inputs.get('vault_id')))
else:
needed.append('vault_password')
return needed
def _password_field_allows_ask(self, field):
return field in self.credential_type.askable_fields
def save(self, *args, **kwargs):
self.PASSWORD_FIELDS = self.credential_type.secret_fields
if self.pk:
cred_before = Credential.objects.get(pk=self.pk)
inputs_before = cred_before.inputs
# Look up the currently persisted value so that we can replace
# $encrypted$ with the actual DB-backed value
for field in self.PASSWORD_FIELDS:
if self.inputs.get(field) == '$encrypted$':
self.inputs[field] = inputs_before[field]
super(Credential, self).save(*args, **kwargs)
def encrypt_field(self, field, ask):
if not hasattr(self, field):
return None
encrypted = encrypt_field(self, field, ask=ask)
if encrypted:
self.inputs[field] = encrypted
elif field in self.inputs:
del self.inputs[field]
def mark_field_for_save(self, update_fields, field):
if field in self.credential_type.secret_fields:
# If we've encrypted a v1 field, we actually want to persist
# self.inputs
field = 'inputs'
super(Credential, self).mark_field_for_save(update_fields, field)
def display_inputs(self):
field_val = self.inputs.copy()
for k, v in field_val.items():
if force_text(v).startswith('$encrypted$'):
field_val[k] = '$encrypted$'
return field_val
def unique_hash(self, display=False):
'''
Credential exclusivity is not defined solely by the related
credential type (due to vault), so this produces a hash
that can be used to evaluate exclusivity
'''
if display:
type_alias = self.credential_type.name
else:
type_alias = self.credential_type_id
if self.kind == 'vault' and self.inputs.get('vault_id', None):
if display:
fmt_str = six.text_type('{} (id={})')
else:
fmt_str = six.text_type('{}_{}')
return fmt_str.format(type_alias, self.inputs.get('vault_id'))
return six.text_type(type_alias)
@staticmethod
def unique_dict(cred_qs):
ret = {}
for cred in cred_qs:
ret[cred.unique_hash()] = cred
return ret
class CredentialType(CommonModelNameNotUnique):
'''
A reusable schema for a credential.
Used to define a named credential type with fields (e.g., an API key) and
output injectors (i.e., an environment variable that uses the API key).
'''
defaults = OrderedDict()
class Meta:
app_label = 'main'
ordering = ('kind', 'name')
unique_together = (('name', 'kind'),)
KIND_CHOICES = (
('ssh', _('Machine')),
('vault', _('Vault')),
('net', _('Network')),
('scm', _('Source Control')),
('cloud', _('Cloud')),
('insights', _('Insights')),
)
kind = models.CharField(
max_length=32,
choices=KIND_CHOICES
)
managed_by_tower = models.BooleanField(
default=False,
editable=False
)
inputs = CredentialTypeInputField(
blank=True,
default={},
help_text=_('Enter inputs using either JSON or YAML syntax. Use the '
'radio button to toggle between the two. Refer to the '
'Ansible Tower documentation for example syntax.')
)
injectors = CredentialTypeInjectorField(
blank=True,
default={},
help_text=_('Enter injectors using either JSON or YAML syntax. Use the '
'radio button to toggle between the two. Refer to the '
'Ansible Tower documentation for example syntax.')
)
def get_absolute_url(self, request=None):
# Page does not exist in API v1
if request.version == 'v1':
return reverse('api:credential_type_detail', kwargs={'pk': self.pk})
return reverse('api:credential_type_detail', kwargs={'pk': self.pk}, request=request)
@property
def unique_by_kind(self):
return self.kind != 'cloud'
@property
def defined_fields(self):
return [field.get('id') for field in self.inputs.get('fields', [])]
@property
def secret_fields(self):
return [
field['id'] for field in self.inputs.get('fields', [])
if field.get('secret', False) is True
]
@property
def askable_fields(self):
return [
field['id'] for field in self.inputs.get('fields', [])
if field.get('ask_at_runtime', False) is True
]
def default_for_field(self, field_id):
for field in self.inputs.get('fields', []):
if field['id'] == field_id:
if 'choices' in field:
return field['choices'][0]
return {'string': '', 'boolean': False, 'become_method': ''}[field['type']]
@classmethod
def default(cls, f):
func = functools.partial(f, cls)
cls.defaults[f.__name__] = func
return func
@classmethod
def setup_tower_managed_defaults(cls, persisted=True):
for default in cls.defaults.values():
default_ = default()
if persisted:
if CredentialType.objects.filter(name=default_.name, kind=default_.kind).count():
continue
logger.debug(_(
"adding %s credential type" % default_.name
))
default_.save()
@classmethod
def from_v1_kind(cls, kind, data={}):
match = None
kind = kind or 'ssh'
kind_choices = dict(V1Credential.KIND_CHOICES)
requirements = {}
if kind == 'ssh':
if data.get('vault_password'):
requirements['kind'] = 'vault'
else:
requirements['kind'] = 'ssh'
elif kind in ('net', 'scm', 'insights'):
requirements['kind'] = kind
elif kind in kind_choices:
requirements.update(dict(
kind='cloud',
name=kind_choices[kind]
))
if requirements:
requirements['managed_by_tower'] = True
match = cls.objects.filter(**requirements)[:1].get()
return match
def inject_credential(self, credential, env, safe_env, args, safe_args, private_data_dir):
"""
Inject credential data into the environment variables and arguments
passed to `ansible-playbook`
:param credential: a :class:`awx.main.models.Credential` instance
:param env: a dictionary of environment variables used in
the `ansible-playbook` call. This method adds
additional environment variables based on
custom `env` injectors defined on this
CredentialType.
:param safe_env: a dictionary of environment variables stored
in the database for the job run
(`UnifiedJob.job_env`); secret values should
be stripped
:param args: a list of arguments passed to
`ansible-playbook` in the style of
`subprocess.call(args)`. This method appends
additional arguments based on custom
`extra_vars` injectors defined on this
CredentialType.
:param safe_args: a list of arguments stored in the database for
the job run (`UnifiedJob.job_args`); secret
values should be stripped
:param private_data_dir: a temporary directory to store files generated
by `file` injectors (like config files or key
files)
"""
if not self.injectors:
if self.managed_by_tower and credential.kind in dir(builtin_injectors):
injected_env = {}
getattr(builtin_injectors, credential.kind)(credential, injected_env, private_data_dir)
env.update(injected_env)
safe_env.update(build_safe_env(injected_env))
return
class TowerNamespace:
pass
tower_namespace = TowerNamespace()
# maintain a normal namespace for building the ansible-playbook arguments (env and args)
namespace = {'tower': tower_namespace}
# maintain a sanitized namespace for building the DB-stored arguments (safe_env and safe_args)
safe_namespace = {'tower': tower_namespace}
# build a normal namespace with secret values decrypted (for
# ansible-playbook) and a safe namespace with secret values hidden (for
# DB storage)
for field_name, value in credential.inputs.items():
if type(value) is bool:
# boolean values can't be secret/encrypted
safe_namespace[field_name] = namespace[field_name] = value
continue
if field_name in self.secret_fields:
value = decrypt_field(credential, field_name)
safe_namespace[field_name] = '**********'
elif len(value):
safe_namespace[field_name] = value
if len(value):
namespace[field_name] = value
# default missing boolean fields to False
for field in self.inputs.get('fields', []):
if field['type'] == 'boolean' and field['id'] not in credential.inputs.keys():
namespace[field['id']] = safe_namespace[field['id']] = False
file_tmpls = self.injectors.get('file', {})
# If any file templates are provided, render the files and update the
# special `tower` template namespace so the filename can be
# referenced in other injectors
for file_label, file_tmpl in file_tmpls.items():
data = Template(file_tmpl).render(**namespace)
_, path = tempfile.mkstemp(dir=private_data_dir)
with open(path, 'w') as f:
f.write(data.encode('utf-8'))
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
# determine if filename indicates single file or many
if file_label.find('.') == -1:
tower_namespace.filename = path
else:
if not hasattr(tower_namespace, 'filename'):
tower_namespace.filename = TowerNamespace()
file_label = file_label.split('.')[1]
setattr(tower_namespace.filename, file_label, path)
injector_field = self._meta.get_field('injectors')
for env_var, tmpl in self.injectors.get('env', {}).items():
try:
injector_field.validate_env_var_allowed(env_var)
except ValidationError as e:
logger.error(six.text_type(
'Ignoring prohibited env var {}, reason: {}'
).format(env_var, e))
continue
env[env_var] = Template(tmpl).render(**namespace)
safe_env[env_var] = Template(tmpl).render(**safe_namespace)
if 'INVENTORY_UPDATE_ID' not in env:
# awx-manage inventory_update does not support extra_vars via -e
extra_vars = {}
for var_name, tmpl in self.injectors.get('extra_vars', {}).items():
extra_vars[var_name] = Template(tmpl).render(**namespace)
def build_extra_vars_file(vars, private_dir):
handle, path = tempfile.mkstemp(dir = private_dir)
f = os.fdopen(handle, 'w')
f.write(safe_dump(vars))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
path = build_extra_vars_file(extra_vars, private_data_dir)
if extra_vars:
args.extend(['-e', '@%s' % path])
safe_args.extend(['-e', '@%s' % path])
@CredentialType.default
def ssh(cls):
return cls(
kind='ssh',
name=ugettext_noop('Machine'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True,
'ask_at_runtime': True
}, {
'id': 'ssh_key_data',
'label': ugettext_noop('SSH Private Key'),
'type': 'string',
'format': 'ssh_private_key',
'secret': True,
'multiline': True
}, {
'id': 'ssh_key_unlock',
'label': ugettext_noop('Private Key Passphrase'),
'type': 'string',
'secret': True,
'ask_at_runtime': True
}, {
'id': 'become_method',
'label': ugettext_noop('Privilege Escalation Method'),
'type': 'become_method',
'help_text': ugettext_noop('Specify a method for "become" operations. This is '
'equivalent to specifying the --become-method '
'Ansible parameter.')
}, {
'id': 'become_username',
'label': ugettext_noop('Privilege Escalation Username'),
'type': 'string',
}, {
'id': 'become_password',
'label': ugettext_noop('Privilege Escalation Password'),
'type': 'string',
'secret': True,
'ask_at_runtime': True
}],
'dependencies': {
'ssh_key_unlock': ['ssh_key_data'],
}
}
)
@CredentialType.default
def scm(cls):
return cls(
kind='scm',
name=ugettext_noop('Source Control'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True
}, {
'id': 'ssh_key_data',
'label': ugettext_noop('SCM Private Key'),
'type': 'string',
'format': 'ssh_private_key',
'secret': True,
'multiline': True
}, {
'id': 'ssh_key_unlock',
'label': ugettext_noop('Private Key Passphrase'),
'type': 'string',
'secret': True
}],
'dependencies': {
'ssh_key_unlock': ['ssh_key_data'],
}
}
)
@CredentialType.default
def vault(cls):
return cls(
kind='vault',
name=ugettext_noop('Vault'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'vault_password',
'label': ugettext_noop('Vault Password'),
'type': 'string',
'secret': True,
'ask_at_runtime': True
}, {
'id': 'vault_id',
'label': ugettext_noop('Vault Identifier'),
'type': 'string',
'format': 'vault_id',
'help_text': ugettext_noop('Specify an (optional) Vault ID. This is '
'equivalent to specifying the --vault-id '
'Ansible parameter for providing multiple Vault '
'passwords. Note: this feature only works in '
'Ansible 2.4+.')
}],
'required': ['vault_password'],
}
)
@CredentialType.default
def net(cls):
return cls(
kind='net',
name=ugettext_noop('Network'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True,
}, {
'id': 'ssh_key_data',
'label': ugettext_noop('SSH Private Key'),
'type': 'string',
'format': 'ssh_private_key',
'secret': True,
'multiline': True
}, {
'id': 'ssh_key_unlock',
'label': ugettext_noop('Private Key Passphrase'),
'type': 'string',
'secret': True,
}, {
'id': 'authorize',
'label': ugettext_noop('Authorize'),
'type': 'boolean',
}, {
'id': 'authorize_password',
'label': ugettext_noop('Authorize Password'),
'type': 'string',
'secret': True,
}],
'dependencies': {
'ssh_key_unlock': ['ssh_key_data'],
'authorize_password': ['authorize'],
},
'required': ['username'],
}
)
@CredentialType.default
def aws(cls):
return cls(
kind='cloud',
name=ugettext_noop('Amazon Web Services'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'username',
'label': ugettext_noop('Access Key'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Secret Key'),
'type': 'string',
'secret': True,
}, {
'id': 'security_token',
'label': ugettext_noop('STS Token'),
'type': 'string',
'secret': True,
'help_text': ugettext_noop('Security Token Service (STS) is a web service '
'that enables you to request temporary, '
'limited-privilege credentials for AWS Identity '
'and Access Management (IAM) users.'),
}],
'required': ['username', 'password']
}
)
@CredentialType.default
def openstack(cls):
return cls(
kind='cloud',
name=ugettext_noop('OpenStack'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password (API Key)'),
'type': 'string',
'secret': True,
}, {
'id': 'host',
'label': ugettext_noop('Host (Authentication URL)'),
'type': 'string',
'help_text': ugettext_noop('The host to authenticate with. For example, '
'https://openstack.business.com/v2.0/')
}, {
'id': 'project',
'label': ugettext_noop('Project (Tenant Name)'),
'type': 'string',
}, {
'id': 'domain',
'label': ugettext_noop('Domain Name'),
'type': 'string',
'help_text': ugettext_noop('OpenStack domains define administrative boundaries. '
'It is only needed for Keystone v3 authentication '
'URLs. Refer to Ansible Tower documentation for '
'common scenarios.')
}],
'required': ['username', 'password', 'host', 'project']
}
)
@CredentialType.default
def vmware(cls):
return cls(
kind='cloud',
name=ugettext_noop('VMware vCenter'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'host',
'label': ugettext_noop('VCenter Host'),
'type': 'string',
'help_text': ugettext_noop('Enter the hostname or IP address that corresponds '
'to your VMware vCenter.')
}, {
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True,
}],
'required': ['host', 'username', 'password']
}
)
@CredentialType.default
def satellite6(cls):
return cls(
kind='cloud',
name=ugettext_noop('Red Hat Satellite 6'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'host',
'label': ugettext_noop('Satellite 6 URL'),
'type': 'string',
'help_text': ugettext_noop('Enter the URL that corresponds to your Red Hat '
'Satellite 6 server. For example, https://satellite.example.org')
}, {
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True,
}],
'required': ['host', 'username', 'password'],
}
)
@CredentialType.default
def cloudforms(cls):
return cls(
kind='cloud',
name=ugettext_noop('Red Hat CloudForms'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'host',
'label': ugettext_noop('CloudForms URL'),
'type': 'string',
'help_text': ugettext_noop('Enter the URL for the virtual machine that '
'corresponds to your CloudForms instance. '
'For example, https://cloudforms.example.org')
}, {
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True,
}],
'required': ['host', 'username', 'password'],
}
)
@CredentialType.default
def gce(cls):
return cls(
kind='cloud',
name=ugettext_noop('Google Compute Engine'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'username',
'label': ugettext_noop('Service Account Email Address'),
'type': 'string',
'help_text': ugettext_noop('The email address assigned to the Google Compute '
'Engine service account.')
}, {
'id': 'project',
'label': 'Project',
'type': 'string',
'help_text': ugettext_noop('The Project ID is the GCE assigned identification. '
'It is often constructed as three words or two words '
'followed by a three-digit number. Examples: project-id-000 '
'and another-project-id')
}, {
'id': 'ssh_key_data',
'label': ugettext_noop('RSA Private Key'),
'type': 'string',
'format': 'ssh_private_key',
'secret': True,
'multiline': True,
'help_text': ugettext_noop('Paste the contents of the PEM file associated '
'with the service account email.')
}],
'required': ['username', 'ssh_key_data'],
}
)
@CredentialType.default
def azure_rm(cls):
return cls(
kind='cloud',
name=ugettext_noop('Microsoft Azure Resource Manager'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'subscription',
'label': ugettext_noop('Subscription ID'),
'type': 'string',
'help_text': ugettext_noop('Subscription ID is an Azure construct, which is '
'mapped to a username.')
}, {
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True,
}, {
'id': 'client',
'label': ugettext_noop('Client ID'),
'type': 'string'
}, {
'id': 'secret',
'label': ugettext_noop('Client Secret'),
'type': 'string',
'secret': True,
}, {
'id': 'tenant',
'label': ugettext_noop('Tenant ID'),
'type': 'string'
}, {
'id': 'cloud_environment',
'label': ugettext_noop('Azure Cloud Environment'),
'type': 'string',
'help_text': ugettext_noop('Environment variable AZURE_CLOUD_ENVIRONMENT when'
' using Azure GovCloud or Azure stack.')
}],
'required': ['subscription'],
}
)
@CredentialType.default
def insights(cls):
return cls(
kind='insights',
name=ugettext_noop('Insights'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True
}],
'required': ['username', 'password'],
},
injectors={
'extra_vars': {
"scm_username": "{{username}}",
"scm_password": "{{password}}",
},
},
)
@CredentialType.default
def rhv(cls):
return cls(
kind='cloud',
name=ugettext_noop('Red Hat Virtualization'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'host',
'label': ugettext_noop('Host (Authentication URL)'),
'type': 'string',
'help_text': ugettext_noop('The host to authenticate with.')
}, {
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True,
}, {
'id': 'ca_file',
'label': ugettext_noop('CA File'),
'type': 'string',
'help_text': ugettext_noop('Absolute file path to the CA file to use (optional)')
}],
'required': ['host', 'username', 'password'],
},
injectors={
# The duplication here is intentional; the ovirt4 inventory plugin
# writes a .ini file for authentication, while the ansible modules for
# ovirt4 use a separate authentication process that support
# environment variables; by injecting both, we support both
'file': {
'template': '\n'.join([
'[ovirt]',
'ovirt_url={{host}}',
'ovirt_username={{username}}',
'ovirt_password={{password}}',
'{% if ca_file %}ovirt_ca_file={{ca_file}}{% endif %}'])
},
'env': {
'OVIRT_INI_PATH': '{{tower.filename}}',
'OVIRT_URL': '{{host}}',
'OVIRT_USERNAME': '{{username}}',
'OVIRT_PASSWORD': '{{password}}'
}
},
)
@CredentialType.default
def tower(cls):
return cls(
kind='cloud',
name=ugettext_noop('Ansible Tower'),
managed_by_tower=True,
inputs={
'fields': [{
'id': 'host',
'label': ugettext_noop('Ansible Tower Hostname'),
'type': 'string',
'help_text': ugettext_noop('The Ansible Tower base URL to authenticate with.')
}, {
'id': 'username',
'label': ugettext_noop('Username'),
'type': 'string'
}, {
'id': 'password',
'label': ugettext_noop('Password'),
'type': 'string',
'secret': True,
}, {
'id': 'verify_ssl',
'label': ugettext_noop('Verify SSL'),
'type': 'boolean',
'secret': False
}],
'required': ['host', 'username', 'password'],
},
injectors={
'env': {
'TOWER_HOST': '{{host}}',
'TOWER_USERNAME': '{{username}}',
'TOWER_PASSWORD': '{{password}}',
'TOWER_VERIFY_SSL': '{{verify_ssl}}'
}
},
)
|
ClearCorp-dev/odoo
|
refs/heads/8.0
|
addons/hr_recruitment/wizard/__init__.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_recruitment_create_partner_job
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hainm/elyxer
|
refs/heads/master
|
src/elyxer/maths/symbol.py
|
2
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# eLyXer -- convert LyX source files to HTML output.
#
# Copyright (C) 2009 Alex Fernández
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --end--
# Alex 20101218
# eLyXer big symbol generation.
from elyxer.util.trace import Trace
from elyxer.util.docparams import *
from elyxer.conf.config import *
from elyxer.maths.bits import *
class BigSymbol(object):
"A big symbol generator."
symbols = FormulaConfig.bigsymbols
def __init__(self, symbol):
"Create the big symbol."
self.symbol = symbol
def getpieces(self):
"Get an array with all pieces."
if not self.symbol in self.symbols:
return [self.symbol]
if self.smalllimit():
return [self.symbol]
return self.symbols[self.symbol]
def smalllimit(self):
"Decide if the limit should be a small, one-line symbol."
if not DocumentParameters.displaymode:
return True
if len(self.symbols[self.symbol]) == 1:
return True
return Options.simplemath
class BigBracket(BigSymbol):
"A big bracket generator."
def __init__(self, size, bracket, alignment='l'):
"Set the size and symbol for the bracket."
self.size = size
self.original = bracket
self.alignment = alignment
self.pieces = None
if bracket in FormulaConfig.bigbrackets:
self.pieces = FormulaConfig.bigbrackets[bracket]
def getpiece(self, index):
"Return the nth piece for the bracket."
function = getattr(self, 'getpiece' + unicode(len(self.pieces)))
return function(index)
def getpiece1(self, index):
"Return the only piece for a single-piece bracket."
return self.pieces[0]
def getpiece3(self, index):
"Get the nth piece for a 3-piece bracket: parenthesis or square bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[-1]
return self.pieces[1]
def getpiece4(self, index):
"Get the nth piece for a 4-piece bracket: curly bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[3]
if index == (self.size - 1)/2:
return self.pieces[2]
return self.pieces[1]
def getcell(self, index):
"Get the bracket piece as an array cell."
piece = self.getpiece(index)
span = 'span class="bracket align-' + self.alignment + '"'
return TaggedBit().constant(piece, span)
def getcontents(self):
"Get the bracket as an array or as a single bracket."
if self.size == 1 or not self.pieces:
return self.getsinglebracket()
rows = []
for index in range(self.size):
cell = self.getcell(index)
rows.append(TaggedBit().complete([cell], 'span class="arrayrow"'))
return [TaggedBit().complete(rows, 'span class="array"')]
def getsinglebracket(self):
"Return the bracket as a single sign."
if self.original == '.':
return [TaggedBit().constant('', 'span class="emptydot"')]
return [TaggedBit().constant(self.original, 'span class="symbol"')]
|
bowang/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/sparse_serialization_ops_test.py
|
99
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SerializeSparse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SerializeSparseTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def testSerializeDeserializeMany(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = sparse_ops.serialize_sparse(sp_input1)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testFeedSerializeDeserializeMany(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = sparse_ops.serialize_sparse(sp_input1)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized, {sp_input0: input0_val,
sp_input1: input1_val})
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testSerializeManyDeserializeManyRoundTrip(self):
with self.test_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
serialized = sparse_ops.serialize_many_sparse(sparse_tensor)
deserialized = sparse_ops.deserialize_many_sparse(
serialized, dtype=dtypes.string)
serialized_value, deserialized_value = sess.run(
[serialized, deserialized],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(serialized_value.shape, (4, 3))
self.assertAllEqual(deserialized_value.indices, indices_value)
self.assertAllEqual(deserialized_value.values, values_value)
self.assertAllEqual(deserialized_value.dense_shape, shape_value)
def testDeserializeFailsWrongType(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = sparse_ops.serialize_sparse(sp_input1)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int64)
with self.assertRaisesOpError(
r"Requested SparseTensor of type int64 but "
r"SparseTensor\[0\].values.dtype\(\) == int32"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
def testDeserializeFailsInconsistentRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = sparse_ops.serialize_sparse(sp_input1)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
def testDeserializeFailsInvalidProto(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = ["a", "b", "c"]
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(
r"Could not parse serialized_sparse\[1, 0\]"):
sess.run(sp_deserialized, {sp_input0: input0_val})
if __name__ == "__main__":
test.main()
|
CSCI-462-01-2017/bedrock
|
refs/heads/master
|
bedrock/thunderbird/tests/__init__.py
|
12133432
| |
tgalal/yowsup
|
refs/heads/master
|
yowsup/config/transforms/__init__.py
|
12133432
| |
TridevGuha/django
|
refs/heads/master
|
django/contrib/sitemaps/management/commands/__init__.py
|
12133432
| |
gangadharkadam/tailorfrappe
|
refs/heads/master
|
frappe/tests/__init__.py
|
39
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
def insert_test_data(doctype, sort_fn=None):
import frappe.model
data = get_test_doclist(doctype)
if sort_fn:
data = sorted(data, key=sort_fn)
for doclist in data:
frappe.insert(doclist)
def get_test_doclist(doctype, name=None):
"""get test doclist, collection of doclists"""
import os, frappe
from frappe import conf
from frappe.modules.utils import peval_doclist
from frappe.modules import scrub
doctype = scrub(doctype)
doctype_path = os.path.join(os.path.dirname(os.path.abspath(conf.__file__)),
conf.test_data_path, doctype)
if name:
with open(os.path.join(doctype_path, scrub(name) + ".json"), 'r') as txtfile:
doclist = peval_doclist(txtfile.read())
return doclist
else:
all_doclists = []
for fname in filter(lambda n: n.endswith(".json"), os.listdir(doctype_path)):
with open(os.path.join(doctype_path, scrub(fname)), 'r') as txtfile:
all_doclists.append(peval_doclist(txtfile.read()))
return all_doclists
|
mflu/openvstorage_centos
|
refs/heads/master
|
ovs/extensions/hypervisor/hypervisors/kvm.py
|
2
|
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for the KVM hypervisor client
"""
from ovs.extensions.hypervisor.apis.kvm.sdk import Sdk
class KVM(object):
"""
Represents the hypervisor client for KVM
"""
def __init__(self, ip, username, password):
"""
Initializes the object with credentials and connection information
"""
_ = password
self.sdk = Sdk(ip, username)
def get_state(self, vmid):
"""
Dummy method
"""
return self.sdk.get_power_state(vmid)
def create_vm_from_template(self, name, source_vm, disks, storage_ip, mountpoint, wait=True):
"""
create vm from template
TODO:
storage_ip and mountpoint refer to target Storage Driver
but on kvm storagedriver.storage_ip is 127.0.0.1
"""
_ = storage_ip, wait # For compatibility purposes only
return self.sdk.create_vm_from_template(name, source_vm, disks, mountpoint)
def delete_vm(self, vmid, storagedriver_mountpoint=None, storagedriver_storage_ip=None, devicename=None, disks_info=None, wait=True):
"""
Deletes a given VM and its disks
"""
_ = wait # For compatibility purposes only
_ = storagedriver_mountpoint # No vpool mountpoint on kvm, use different logic
_ = storagedriver_storage_ip # 127.0.0.1 always
if disks_info is None:
disks_info = []
return self.sdk.delete_vm(vmid, devicename, disks_info)
def get_vm_agnostic_object(self, vmid):
"""
Loads a VM and returns a hypervisor agnostic representation
"""
return self.sdk.make_agnostic_config(self.sdk.get_vm_object(vmid))
def get_vms_by_nfs_mountinfo(self, ip, mountpoint):
"""
Gets a list of agnostic vm objects for a given ip and mountpoint
"""
_ = ip
vms = []
for vm in self.sdk.get_vms():
config = self.sdk.make_agnostic_config(vm)
if mountpoint in config['datastores']:
vms.append(config)
return vms
def test_connection(self):
"""
Tests the connection
"""
return self.sdk.test_connection()
def is_datastore_available(self, ip, mountpoint):
"""
Check whether a given datastore is in use on the hypervisor
"""
_ = ip
return self.sdk.ssh_run("[ -d {0} ] && echo 'yes' || echo 'no'".format(mountpoint)) == 'yes'
def clone_vm(self, vmid, name, disks, wait=False):
"""
create a clone at vmachine level
#disks are cloned by VDiskController
"""
_ = wait # For compatibility purposes only
return self.sdk.clone_vm(vmid, name, disks)
def set_as_template(self, vmid, disks, wait=False):
"""
Dummy method
TODO: Not yet implemented, setting an existing kvm guest as template
"""
_ = vmid, disks, wait # For compatibility purposes only
raise NotImplementedError()
def get_vm_object(self, vmid):
"""
Dummy method
"""
return self.sdk.get_vm_object(vmid)
def get_vm_object_by_devicename(self, devicename, ip, mountpoint):
"""
devicename = vmachines/template/template.xml # relative to mountpoint
"""
_ = ip, mountpoint
return self.sdk.make_agnostic_config(self.sdk.get_vm_object_by_filename(devicename))
def mount_nfs_datastore(self, name, remote_host, remote_path):
"""
Dummy method
"""
raise NotImplementedError()
def clean_backing_disk_filename(self, path):
"""
Cleans a backing disk filename to the corresponding disk filename
"""
_ = self
return path.strip('/')
def get_backing_disk_path(self, machinename, devicename):
"""
Builds the path for the file backing a given device/disk
"""
return self.get_disk_path(machinename.replace(' ', '_'), devicename)
def get_disk_path(self, machinename, devicename):
"""
Builds the path for the file backing a given device/disk
"""
_ = self
if machinename:
return '/{}_{}.raw'.format(machinename.replace(' ', '_'), devicename)
return '/{}.raw'.format(devicename)
def clean_vmachine_filename(self, path):
"""
Cleans a VM filename
"""
_ = self
return path.strip('/')
def get_vmachine_path(self, machinename, storagerouter_machineid):
"""
Builds the path for the file representing a given vmachine
"""
_ = self
machinename = machinename.replace(' ', '_')
return '/{}/{}.xml'.format(storagerouter_machineid, machinename)
def get_rename_scenario(self, old_name, new_name):
"""
Gets the rename scenario based on the old and new name
"""
_ = self
if old_name.endswith('.xml') and new_name.endswith('.xml'):
return 'RENAME'
return 'UNSUPPORTED'
def should_process(self, devicename, machine_ids=None):
"""
Checks whether a given device should be processed
"""
_ = self
valid = devicename.strip('/') not in ['vmcasts/rss.xml']
if not valid:
return False
if machine_ids is not None:
return any(machine_id for machine_id in machine_ids if devicename.strip('/').startswith(machine_id))
return True
def file_exists(self, vpool, devicename):
"""
Check if devicename exists
"""
_ = vpool
matches = self.sdk.find_devicename(devicename)
return matches is not None
|
Gerapy/Gerapy
|
refs/heads/master
|
gerapy/server/core/utils.py
|
1
|
import fnmatch
import re
from copy import deepcopy
from furl import furl
from subprocess import Popen, PIPE, STDOUT
from os.path import abspath
from shutil import ignore_patterns, copy2, copystat
from jinja2 import Template
from scrapyd_api import ScrapydAPI
from bs4 import BeautifulSoup
import traceback
import json, os, string
from shutil import move, copy, rmtree
from os.path import join, exists, dirname
from django.utils import timezone
from gerapy import get_logger
from gerapy.settings import PROJECTS_FOLDER
logger = get_logger(__name__)
IGNORES = ['.git/', '*.pyc', '.DS_Store', '.idea/', '*.egg', '*.egg-info/', '*.egg-info', 'build/']
TEMPLATES_DIR = join(dirname(dirname(dirname(abspath(__file__)))), 'templates')
TEMPLATES_TO_RENDER = (
('scrapy.cfg',),
('${project_name}', 'settings.py.tmpl'),
('${project_name}', 'items.py.tmpl'),
('${project_name}', 'pipelines.py.tmpl'),
('${project_name}', 'middlewares.py.tmpl'),
)
NO_REFERRER = '<meta name="referrer" content="never">'
BASE = '<base href="{href}">'
def get_scrapyd(client):
if not client.auth:
return ScrapydAPI(scrapyd_url(client.ip, client.port))
return ScrapydAPI(scrapyd_url(client.ip, client.port), auth=(client.username, client.password))
def scrapyd_url(ip, port):
"""
get scrapyd url
:param ip: host
:param port: port
:return: string
"""
url = 'http://{ip}:{port}'.format(ip=ip, port=port)
return url
def log_url(ip, port, project, spider, job):
"""
get log url
:param ip: host
:param port: port
:param project: project
:param spider: spider
:param job: job
:return: string
"""
url = 'http://{ip}:{port}/logs/{project}/{spider}/{job}.log'.format(ip=ip, port=port, project=project,
spider=spider, job=job)
return url
def ignored(ignores, path, file):
"""
judge if the file is ignored
:param ignores: ignored list
:param path: file path
:param file: file name
:return: bool
"""
file_name = join(path, file)
for ignore in ignores:
if '/' in ignore and ignore.rstrip('/') in file_name:
return True
if fnmatch.fnmatch(file_name, ignore):
return True
if file == ignore:
return True
return False
def is_valid_name(project_name):
"""
judge name is valid
:param project_name:
:return:
"""
if not re.search(r'^[_a-zA-Z]\w*$', project_name):
logger.error('project name %s must begin with a letter and contain only letters, numbers and underscores',
project_name)
return False
return True
def copy_tree(src, dst):
"""
copy tree
:param src:
:param dst:
:return:
"""
ignore = ignore_patterns(*IGNORES)
names = os.listdir(src)
ignored_names = ignore(src, names)
if not os.path.exists(dst):
os.makedirs(dst)
for name in names:
if name in ignored_names:
continue
src_name = os.path.join(src, name)
dst_name = os.path.join(dst, name)
if os.path.isdir(src_name):
copy_tree(src_name, dst_name)
else:
copy2(src_name, dst_name)
copystat(src, dst)
def get_tree(path, ignores=IGNORES):
"""
get tree structure
:param path: Folder path
:param ignores: Ignore files
:return: Json
"""
result = []
for file in os.listdir(path):
if os.path.isdir(join(path, file)):
if not ignored(ignores, path, file):
children = get_tree(join(path, file), ignores)
if children:
result.append({
'label': file,
'children': children,
'path': path
})
else:
if not ignored(ignores, path, file):
result.append({'label': file, 'path': path})
return result
def render_template(tpl_file, dst_file, *args, **kwargs):
"""
render template
:param tpl_file: Template file name
:param dst_file: Destination file name
:param args: args
:param kwargs: kwargs
:return: None
"""
vars = dict(*args, **kwargs)
template = Template(open(tpl_file, encoding='utf-8').read())
os.remove(tpl_file)
result = template.render(vars)
open(dst_file, 'w', encoding='utf-8').write(result)
def get_traceback():
"""
get last line of error
:return: String
"""
info = traceback.format_exc(limit=1)
if info:
info = info.splitlines()
info = list(filter(lambda x: x, info))
if len(info):
return info[-1]
return None
return info
def process_request(request):
"""
process request
:param request:
:return:
"""
result = {
'url': request.url,
'method': request.method,
'meta': request.meta,
'callback': request.callback,
'cookies': request.cookies,
'headers': request.headers,
'priority': request.priority,
'dont_filter': request.dont_filter,
}
# set body
if request.method.lower() != 'get':
result['body'] = request.body
if isinstance(result['body'], bytes):
result['body'] = result['body'].decode('utf-8')
result['body'] = str2body(result['body'])
return result
def process_response(response):
"""
process response to dict
:param response:
:return:
"""
return {
'html': process_html(response.text, furl(response.url).origin),
'url': response.url,
'status': response.status
}
def process_item(item):
return dict(item)
def process_html(html, base_url):
"""
process html, add some tricks such as no referrer
:param html: source html
:return: processed html
"""
dom = BeautifulSoup(html, 'lxml')
dom.find('head').insert(0, BeautifulSoup(NO_REFERRER, 'lxml'))
dom.find('head').insert(0, BeautifulSoup(BASE.format(href=base_url), 'lxml'))
html = str(dom)
# html = unescape(html)
return html
def get_output_error(project_name, spider_name):
"""
get scrapy runtime error
:param project_name: project name
:param spider_name: spider name
:return: output, error
"""
work_cwd = os.getcwd()
project_path = join(PROJECTS_FOLDER, project_name)
try:
os.chdir(project_path)
cmd = ' '.join(['scrapy', 'crawl', spider_name])
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read()
if isinstance(output, bytes):
output = output.decode('utf-8')
return output
finally:
os.chdir(work_cwd)
def get_items_configuration(configuration):
"""
get items configuration including allowed_spiders and tables or collections
:param configuration: configuration data
:return: items
"""
configuration = deepcopy(configuration)
items = configuration.get('items')
spiders = configuration.get('spiders')
for spider in spiders:
# MongoDB
mongodb_collection_map = spider.get('storage').get('mongodb').get('collections')
for mongodb_collection_map_item in mongodb_collection_map:
collection = mongodb_collection_map_item.get('collection')
item_name = mongodb_collection_map_item.get('item')
for item in items:
if item.get('name') == item_name:
allowed_spiders = item.get('mongodb_spiders', set())
allowed_spiders.add(spider.get('name'))
mongodb_collections = item.get('mongodb_collections', set())
mongodb_collections.add(collection)
item['mongodb_spiders'], item['mongodb_collections'] = allowed_spiders, mongodb_collections
# MySQL
mysql_table_map = spider.get('storage').get('mysql').get('tables')
for mysql_table_map_item in mysql_table_map:
collection = mysql_table_map_item.get('table')
item_name = mysql_table_map_item.get('item')
for item in items:
if item.get('name') == item_name:
allowed_spiders = item.get('mysql_spiders', set())
allowed_spiders.add(spider.get('name'))
mysql_tables = item.get('mysql_tables', set())
mysql_tables.add(collection)
item['mysql_spiders'], item['mysql_tables'] = allowed_spiders, mysql_tables
# transfer attr
attrs = ['mongodb_spiders', 'mongodb_collections', 'mysql_spiders', 'mysql_tables']
for item in items:
for attr in attrs:
if item.get(attr):
item[attr] = list(item[attr])
return items
def process_custom_settings(spider):
"""
process custom settings of some config items
:param spider:
:return:
"""
custom_settings = spider.get('custom_settings')
def add_dict_to_custom_settings(custom_settings, keys):
"""
if config doesn't exist, add default value
:param custom_settings:
:param keys:
:return:
"""
for key in keys:
for item in custom_settings:
if item['key'] == key:
break
else:
custom_settings.append({
'key': key,
'value': '{}'
})
return custom_settings
keys = ['DOWNLOADER_MIDDLEWARES', 'SPIDER_MIDDLEWARES', 'ITEM_PIPELINES']
custom_settings = add_dict_to_custom_settings(custom_settings, keys)
for item in custom_settings:
if item['key'] == 'DOWNLOADER_MIDDLEWARES':
item_data = json.loads(item['value'])
if spider.get('cookies', {}).get('enable', {}): item_data[
'gerapy.downloadermiddlewares.cookies.CookiesMiddleware'] = 554
if spider.get('proxy', {}).get('enable', {}): item_data[
'gerapy.downloadermiddlewares.proxy.ProxyMiddleware'] = 555
item_data['gerapy.downloadermiddlewares.pyppeteer.PyppeteerMiddleware'] = 601
item_data['scrapy_splash.SplashCookiesMiddleware'] = 723
item_data['scrapy_splash.SplashMiddleware'] = 725
item_data['scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware'] = 810
item['value'] = json.dumps(item_data)
if item['key'] == 'SPIDER_MIDDLEWARES':
item_data = json.loads(item['value'])
item_data['scrapy_splash.SplashDeduplicateArgsMiddleware'] = 100
item['value'] = json.dumps(item_data)
if item['key'] == 'ITEM_PIPELINES':
item_data = json.loads(item['value'])
if spider.get('storage', {}).get('mysql', {}).get('enable', {}): item_data[
'gerapy.pipelines.MySQLPipeline'] = 300
if spider.get('storage', {}).get('mongodb', {}).get('enable', {}): item_data[
'gerapy.pipelines.MongoDBPipeline'] = 301
item['value'] = json.dumps(item_data)
return spider
def generate_project(project_name):
"""
generate project code
:param project_name: project name
:return: project data
"""
# get configuration
from gerapy.server.core.models import Project
configuration = Project.objects.get(name=project_name).configuration
configuration = json.loads(configuration)
# remove original project dir
project_dir = join(PROJECTS_FOLDER, project_name)
if exists(project_dir):
rmtree(project_dir)
# generate project
copy_tree(join(TEMPLATES_DIR, 'project'), project_dir)
move(join(PROJECTS_FOLDER, project_name, 'module'), join(project_dir, project_name))
for paths in TEMPLATES_TO_RENDER:
path = join(*paths)
tplfile = join(project_dir,
string.Template(path).substitute(project_name=project_name))
items = get_items_configuration(configuration)
vars = {
'project_name': project_name,
'items': items,
}
render_template(tplfile, tplfile.rstrip('.tmpl'), **vars)
# generate spider
spiders = configuration.get('spiders')
for spider in spiders:
spider = process_custom_settings(spider)
source_tpl_file = join(TEMPLATES_DIR, 'spiders', 'crawl.tmpl')
new_tpl_file = join(PROJECTS_FOLDER, project_name, project_name, 'spiders', 'crawl.tmpl')
spider_file = "%s.py" % join(PROJECTS_FOLDER, project_name, project_name, 'spiders', spider.get('name'))
copy(source_tpl_file, new_tpl_file)
render_template(new_tpl_file, spider_file, spider=spider, project_name=project_name)
# save generated_at attr
model = Project.objects.get(name=project_name)
model.generated_at = timezone.now()
# clear built_at attr
model.built_at = None
model.save()
def bytes2str(data):
"""
bytes2str
:param data: origin data
:return: str
"""
if isinstance(data, bytes):
data = data.decode('utf-8')
data = data.strip()
return data
def clients_of_task(task):
"""
get valid clients of task
:param task: task object
:return:
"""
from gerapy.server.core.models import Client
client_ids = json.loads(task.clients)
for client_id in client_ids:
client = Client.objects.get(id=client_id)
if client:
yield client
def get_job_id(client, task):
"""
construct job id
:param client: client object
:param task: task object
:return: job id
"""
return '%s-%s-%s' % (client.name, task.project, task.spider)
def load_dict(x, transformer=None):
"""
convert to dict
:param x:
:return:
"""
if x is None or isinstance(x, dict):
return x
try:
data = json.loads(x)
if not transformer:
transformer = lambda x: x
data = {k: transformer(v) for k, v in data.items()}
return data
except:
return {}
def str2list(x, transformer=None):
"""
convert to list
:param x:
:return:
"""
if x is None or isinstance(x, list):
return x
try:
data = json.loads(x)
if not transformer:
transformer = lambda x: x
data = list(map(lambda x: transformer(x), data))
return data
except:
return []
def str2bool(v):
"""
convert string to bool
:param v:
:return:
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
return True
def str2json(v):
"""
convert str to json data
:param v:
:return:
"""
try:
return json.loads(v)
except:
return None
def str2dict(v):
"""
convert str to dict data
:param v:
:return:
"""
try:
return json.loads(v)
except:
return {}
def str2body(v):
"""
convert str to json data or keep original string
:param v:
:return:
"""
try:
return json.loads(v)
except:
return v
def str2str(v):
"""
convert str to str, process for 'None', 'null', '',
:param v:
:return:
"""
if v.lower() in ('none', 'null', 'undefined', 'nil', 'false'):
return None
return str(v)
|
kevclarx/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/dellos6.py
|
51
|
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine, ignore_line, DEFAULT_COMMENT_TOKENS
_DEVICE_CONFIGS = {}
WARNING_PROMPTS_RE = [
r"[\r\n]?\[confirm yes/no\]:\s?$",
r"[\r\n]?\[y/n\]:\s?$",
r"[\r\n]?\[yes/no\]:\s?$"
]
dellos6_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
}
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in dellos6_argument_spec:
if key != 'provider' and module.params[key]:
warnings.append('argument %s has been deprecated and will be '
'removed in a future version' % key)
if provider:
for param in ('auth_pass', 'password'):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def get_config(module, flags=[]):
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
cfg = to_text(out, errors='surrogate_or_strict').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
responses.append(to_text(out, errors='surrogate_or_strict'))
return responses
def load_config(module, commands):
rc, out, err = exec_command(module, 'configure terminal')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict'))
for command in to_list(commands):
if command == 'end':
continue
cmd = {'command': command, 'prompt': WARNING_PROMPTS_RE, 'answer': 'yes'}
rc, out, err = exec_command(module, module.jsonify(cmd))
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
exec_command(module, 'end')
def get_sublevel_config(running_config, module):
contents = list()
current_config_contents = list()
sublevel_config = Dellos6NetworkConfig(indent=0)
obj = running_config.get_object(module.params['parents'])
if obj:
contents = obj._children
for c in contents:
if isinstance(c, ConfigLine):
current_config_contents.append(c.raw)
sublevel_config.add(current_config_contents, module.params['parents'])
return sublevel_config
def os6_parse(lines, indent=None, comment_tokens=None):
sublevel_cmds = [
re.compile(r'^vlan.*$'),
re.compile(r'^stack.*$'),
re.compile(r'^interface.*$'),
re.compile(r'datacenter-bridging.*$'),
re.compile(r'line (console|telnet|ssh).*$'),
re.compile(r'ip ssh !(server).*$'),
re.compile(r'ip (dhcp|vrf).*$'),
re.compile(r'(ip|mac|management|arp) access-list.*$'),
re.compile(r'ipv6 (dhcp|router).*$'),
re.compile(r'mail-server.*$'),
re.compile(r'vpc domain.*$'),
re.compile(r'router.*$'),
re.compile(r'route-map.*$'),
re.compile(r'policy-map.*$'),
re.compile(r'class-map match-all.*$'),
re.compile(r'captive-portal.*$'),
re.compile(r'admin-profile.*$'),
re.compile(r'link-dependency group.*$'),
re.compile(r'banner motd.*$'),
re.compile(r'openflow.*$'),
re.compile(r'support-assist.*$'),
re.compile(r'template.*$'),
re.compile(r'address-family.*$'),
re.compile(r'spanning-tree mst.*$'),
re.compile(r'logging.*$'),
re.compile(r'(radius-server|tacacs-server) host.*$')]
childline = re.compile(r'^exit$')
config = list()
parent = list()
children = []
parent_match = False
for line in str(lines).split('\n'):
text = str(re.sub(r'([{};])', '', line)).strip()
cfg = ConfigLine(text)
cfg.raw = line
if not text or ignore_line(text, comment_tokens):
parent = list()
children = []
continue
else:
parent_match = False
# handle sublevel parent
for pr in sublevel_cmds:
if pr.match(line):
if len(parent) != 0:
cfg._parents.extend(parent)
parent.append(cfg)
config.append(cfg)
if children:
children.insert(len(parent) - 1, [])
children[len(parent) - 2].append(cfg)
parent_match = True
continue
# handle exit
if childline.match(line):
if children:
parent[len(children) - 1]._children.extend(children[len(children) - 1])
if len(children) > 1:
parent[len(children) - 2]._children.extend(parent[len(children) - 1]._children)
cfg._parents.extend(parent)
children.pop()
parent.pop()
if not children:
children = list()
if parent:
cfg._parents.extend(parent)
parent = list()
config.append(cfg)
# handle sublevel children
elif parent_match is False and len(parent) > 0:
if not children:
cfglist = [cfg]
children.append(cfglist)
else:
children[len(parent) - 1].append(cfg)
cfg._parents.extend(parent)
config.append(cfg)
# handle global commands
elif not parent:
config.append(cfg)
return config
class Dellos6NetworkConfig(NetworkConfig):
def load(self, contents):
self._items = os6_parse(contents, self._indent, DEFAULT_COMMENT_TOKENS)
def _diff_line(self, other, path=None):
diff = list()
for item in self.items:
if str(item) == "exit":
for diff_item in diff:
if item._parents == diff_item._parents:
diff.append(item)
break
elif item not in other:
diff.append(item)
return diff
|
sheafferusa/erpnext
|
refs/heads/develop
|
erpnext/patches/v6_0/multi_currency.py
|
3
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
# Reload doctype
for dt in ("Account", "GL Entry", "Journal Entry",
"Journal Entry Account", "Sales Invoice", "Purchase Invoice", "Customer", "Supplier"):
frappe.reload_doctype(dt)
for company in frappe.get_all("Company", fields=["name", "default_currency", "default_receivable_account"]):
# update currency in account and gl entry as per company currency
frappe.db.sql("""update `tabAccount` set account_currency = %s
where ifnull(account_currency, '') = '' and company=%s""", (company.default_currency, company.name))
# update newly introduced field's value in sales / purchase invoice
frappe.db.sql("""
update
`tabSales Invoice`
set
base_paid_amount=paid_amount,
base_write_off_amount=write_off_amount,
party_account_currency=%s
where company=%s
""", (company.default_currency, company.name))
frappe.db.sql("""
update
`tabPurchase Invoice`
set
base_write_off_amount=write_off_amount,
party_account_currency=%s
where company=%s
""", (company.default_currency, company.name))
# update exchange rate, debit/credit in account currency in Journal Entry
frappe.db.sql("""
update `tabJournal Entry Account` jea
set exchange_rate=1,
debit_in_account_currency=debit,
credit_in_account_currency=credit,
account_type=(select account_type from `tabAccount` where name=jea.account)
""")
frappe.db.sql("""
update `tabJournal Entry Account` jea, `tabJournal Entry` je
set account_currency=%s
where jea.parent = je.name and je.company=%s
""", (company.default_currency, company.name))
# update debit/credit in account currency in GL Entry
frappe.db.sql("""
update
`tabGL Entry`
set
debit_in_account_currency=debit,
credit_in_account_currency=credit,
account_currency=%s
where
company=%s
""", (company.default_currency, company.name))
# Set party account if default currency of party other than company's default currency
for dt in ("Customer", "Supplier"):
parties = frappe.get_all(dt)
for p in parties:
# Get party GL Entries
party_gle = frappe.db.get_value("GL Entry", {"party_type": dt, "party": p.name,
"company": company.name}, ["account", "account_currency"], as_dict=True)
party = frappe.get_doc(dt, p.name)
# set party account currency
if party_gle or not party.party_account_currency:
party.party_account_currency = company.default_currency
# Add default receivable /payable account if not exists
# and currency is other than company currency
if party.party_account_currency and party.party_account_currency != company.default_currency:
party_account_exists = False
for d in party.get("accounts"):
if d.company == company.name:
party_account_exists = True
break
if not party_account_exists:
party_account = None
if party_gle:
party_account = party_gle.account
else:
default_receivable_account_currency = frappe.db.get_value("Account",
company.default_receivable_account, "account_currency")
if default_receivable_account_currency != company.default_currency:
party_account = company.default_receivable_account
if party_account:
party.append("accounts", {
"company": company.name,
"account": party_account
})
party.flags.ignore_mandatory = True
party.save()
|
xrmx/django
|
refs/heads/master
|
tests/model_package/tests.py
|
380
|
from __future__ import unicode_literals
from django.db import connection, models
from django.db.backends.utils import truncate_name
from django.test import TestCase
from .models.article import Article, Site
from .models.publication import Publication
class Advertisement(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", blank=True)
class ModelPackageTests(TestCase):
def test_m2m_tables_in_subpackage_models(self):
"""
Regression for #12168: models split into subpackages still get M2M
tables.
"""
p = Publication.objects.create(title="FooBar")
site = Site.objects.create(name="example.com")
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
def test_models_in_the_test_package(self):
"""
Regression for #12245 - Models can exist in the test package, too.
"""
p = Publication.objects.create(title="FooBar")
ad = Advertisement.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisement.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
def test_automatic_m2m_column_names(self):
"""
Regression for #12386 - field names on the autogenerated intermediate
class that are specified as dotted strings don't retain any path
component for the field or column name.
"""
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_db_table(),
truncate_name('model_package_article_publications', connection.ops.max_name_length()),
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
|
barnone/EigenD
|
refs/heads/2.0
|
tools/pip_cmd/lex.py
|
3
|
#-----------------------------------------------------------------------------
# ply: lex.py
#
# Author: David M. Beazley (beazley@cs.uchicago.edu)
# Department of Computer Science
# University of Chicago
# Chicago, IL 60637
#
# Copyright (C) 2001, David M. Beazley
#
# $Header: /cvs/projects/PLY/lex.py,v 1.1.1.1 2004/05/21 15:34:10 beazley Exp $
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file COPYING for a complete copy of the LGPL.
#
#
# This module automatically constructs a lexical analysis module from regular
# expression rules defined in a user-defined module. The idea is essentially the same
# as that used in John Aycock's Spark framework, but the implementation works
# at the module level rather than requiring the use of classes.
#
# This module tries to provide an interface that is closely modeled after
# the traditional lex interface in Unix. It also differs from Spark
# in that:
#
# - It provides more extensive error checking and reporting if
# the user supplies a set of regular expressions that can't
# be compiled or if there is any other kind of a problem in
# the specification.
#
# - The interface is geared towards LALR(1) and LR(1) parser
# generators. That is tokens are generated one at a time
# rather than being generated in advanced all in one step.
#
# There are a few limitations of this module
#
# - The module interface makes it somewhat awkward to support more
# than one lexer at a time. Although somewhat inelegant from a
# design perspective, this is rarely a practical concern for
# most compiler projects.
#
# - The lexer requires that the entire input text be read into
# a string before scanning. I suppose that most machines have
# enough memory to make this a minor issues, but it makes
# the lexer somewhat difficult to use in interactive sessions
# or with streaming data.
#
#-----------------------------------------------------------------------------
r"""
lex.py
This module builds lex-like scanners based on regular expression rules.
To use the module, simply write a collection of regular expression rules
and actions like this:
# lexer.py
import lex
# Define a list of valid tokens
tokens = (
'IDENTIFIER', 'NUMBER', 'PLUS', 'MINUS'
)
# Define tokens as functions
def t_IDENTIFIER(t):
r' ([a-zA-Z_](\w|_)* '
return t
def t_NUMBER(t):
r' \d+ '
return t
# Some simple tokens with no actions
t_PLUS = r'\+'
t_MINUS = r'-'
# Initialize the lexer
lex.lex()
The tokens list is required and contains a complete list of all valid
token types that the lexer is allowed to produce. Token types are
restricted to be valid identifiers. This means that 'MINUS' is a valid
token type whereas '-' is not.
Rules are defined by writing a function with a name of the form
t_rulename. Each rule must accept a single argument which is
a token object generated by the lexer. This token has the following
attributes:
t.type = type string of the token. This is initially set to the
name of the rule without the leading t_
t.value = The value of the lexeme.
t.lineno = The value of the line number where the token was encountered
For example, the t_NUMBER() rule above might be called with the following:
t.type = 'NUMBER'
t.value = '42'
t.lineno = 3
Each rule returns the token object it would like to supply to the
parser. In most cases, the token t is returned with few, if any
modifications. To discard a token for things like whitespace or
comments, simply return nothing. For instance:
def t_whitespace(t):
r' \s+ '
pass
For faster lexing, you can also define this in terms of the ignore set like this:
t_ignore = ' \t'
The characters in this string are ignored by the lexer. Use of this feature can speed
up parsing significantly since scanning will immediately proceed to the next token.
lex requires that the token returned by each rule has an attribute
t.type. Other than this, rules are free to return any kind of token
object that they wish and may construct a new type of token object
from the attributes of t (provided the new object has the required
type attribute).
If illegal characters are encountered, the scanner executes the
function t_error(t) where t is a token representing the rest of the
string that hasn't been matched. If this function isn't defined, a
LexError exception is raised. The .text attribute of this exception
object contains the part of the string that wasn't matched.
The t.skip(n) method can be used to skip ahead n characters in the
input stream. This is usually only used in the error handling rule.
For instance, the following rule would print an error message and
continue:
def t_error(t):
print "Illegal character in input %s" % t.value[0]
t.skip(1)
Of course, a nice scanner might wish to skip more than one character
if the input looks very corrupted.
The lex module defines a t.lineno attribute on each token that can be used
to track the current line number in the input. The value of this
variable is not modified by lex so it is up to your lexer module
to correctly update its value depending on the lexical properties
of the input language. To do this, you might write rules such as
the following:
def t_newline(t):
r' \n+ '
t.lineno += t.value.count("\n")
To initialize your lexer so that it can be used, simply call the lex.lex()
function in your rule file. If there are any errors in your
specification, warning messages or an exception will be generated to
alert you to the problem.
(dave: this needs to be rewritten)
To use the newly constructed lexer from another module, simply do
this:
import lex
import lexer
plex.input("position = initial + rate*60")
while 1:
token = plex.token() # Get a token
if not token: break # No more tokens
... do whatever ...
Assuming that the module 'lexer' has initialized plex as shown
above, parsing modules can safely import 'plex' without having
to import the rule file or any additional imformation about the
scanner you have defined.
"""
# -----------------------------------------------------------------------------
__version__ = "1.4"
import re, types, sys, copy
# Exception thrown when invalid token encountered and no default
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class
class LexToken:
def __str__(self):
return "LexToken(%s,%r,%d)" % (self.type,self.value,self.lineno)
def __repr__(self):
return str(self)
def skip(self,n):
try:
self._skipn += n
except AttributeError:
self._skipn = n
# -----------------------------------------------------------------------------
# Lexer class
#
# input() - Store a new string in the lexer
# token() - Get the next token
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexindexfunc = [ ] # Reverse mapping of groups to functions and types
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = None # Ignored characters
self.lineno = 1 # Current line number
self.debug = 0 # Debugging mode
self.optimize = 0 # Optimized mode
self.token = self.errtoken
def __copy__(self):
c = Lexer()
c.lexre = self.lexre
c.lexdata = self.lexdata
c.lexpos = self.lexpos
c.lexlen = self.lexlen
c.lenindexfunc = self.lexindexfunc
c.lexerrorf = self.lexerrorf
c.lextokens = self.lextokens
c.lexignore = self.lexignore
c.lineno = self.lineno
c.optimize = self.optimize
c.token = c.realtoken
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
if not isinstance(s,types.StringType):
raise ValueError, "Expected a string"
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
self.token = self.realtoken
# Change the token routine to point to realtoken()
global token
if token == self.errtoken:
token = self.token
# ------------------------------------------------------------
# errtoken() - Return error if token is called with no data
# ------------------------------------------------------------
def errtoken(self):
raise RuntimeError, "No input string given with input()"
# ------------------------------------------------------------
# token() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def realtoken(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
m = self.lexre.match(lexdata,lexpos)
if m:
i = m.lastindex
lexpos = m.end()
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexer = self
func,tok.type = self.lexindexfunc[i]
if not func:
self.lexpos = lexpos
return tok
# If token is processed by a function, call it
self.lexpos = lexpos
newtok = func(tok)
self.lineno = tok.lineno # Update line number
# Every function must return a token, if nothing, we just move to next token
if not newtok: continue
# Verify type of the token. If not in the token map, raise an error
if not self.optimize:
if not self.lextokens.has_key(newtok.type):
raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.func_code.co_filename, func.func_code.co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
oldpos = lexpos
newtok = self.lexerrorf(tok)
lexpos += getattr(tok,"_skipn",0)
if oldpos == lexpos:
# Error method didn't change text position at all. This is an error.
self.lexpos = lexpos
raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
if not newtok: continue
self.lexpos = lexpos
return newtok
self.lexpos = lexpos
raise LexError, ("No match found", lexdata[lexpos:])
# No more input data
self.lexpos = lexpos + 1
return None
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the filename.
# -----------------------------------------------------------------------------
def validate_file(filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
noerror = 0
linen += 1
return noerror
# -----------------------------------------------------------------------------
# _read_lextab(module)
#
# Reads lexer table from a lextab file instead of using introspection.
# -----------------------------------------------------------------------------
def _read_lextab(lexer, fdict, module):
exec "import %s as lextab" % module
lexer.lexre = re.compile(lextab._lexre, re.VERBOSE)
lexer.lexindexfunc = lextab._lextab
for i in range(len(lextab._lextab)):
t = lexer.lexindexfunc[i]
if t:
if t[0]:
lexer.lexindexfunc[i] = (fdict[t[0]],t[1])
lexer.lextokens = lextab._lextokens
lexer.lexignore = lextab._lexignore
if lextab._lexerrorf:
lexer.lexerrorf = fdict[lextab._lexerrorf]
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,debug=0,optimize=0,lextab="lextab"):
ldict = None
regex = ""
error = 0
files = { }
lexer = Lexer()
lexer.debug = debug
lexer.optimize = optimize
global token,input
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, types.InstanceType):
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = { }
for (i,v) in _items:
ldict[i] = v
else:
raise ValueError,"Expected a module or instance"
else:
# No module given. We might be able to get information from the caller.
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
ldict = f.f_globals # Grab its globals dictionary
if optimize and lextab:
try:
_read_lextab(lexer,ldict, lextab)
if not lexer.lexignore: lexer.lexignore = ""
token = lexer.token
input = lexer.input
return lexer
except ImportError:
pass
# Get the tokens map
if (module and isinstance(module,types.InstanceType)):
tokens = getattr(module,"tokens",None)
else:
try:
tokens = ldict["tokens"]
except KeyError:
tokens = None
if not tokens:
raise SyntaxError,"lex: module does not define 'tokens'"
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise SyntaxError,"lex: tokens must be a list or tuple."
# Build a dictionary of valid token names
lexer.lextokens = { }
if not optimize:
# Utility function for verifying tokens
def is_identifier(s):
for c in s:
if not (c.isalnum() or c == '_'): return 0
return 1
for n in tokens:
if not is_identifier(n):
print "lex: Bad token name '%s'" % n
error = 1
if lexer.lextokens.has_key(n):
print "lex: Warning. Token '%s' multiply defined." % n
lexer.lextokens[n] = None
else:
for n in tokens: lexer.lextokens[n] = None
if debug:
print "lex: tokens = '%s'" % lexer.lextokens.keys()
# Get a list of symbols with the t_ prefix
tsymbols = [f for f in ldict.keys() if f[:2] == 't_']
# Now build up a list of functions and a list of strings
fsymbols = [ ]
ssymbols = [ ]
for f in tsymbols:
if callable(ldict[f]):
fsymbols.append(ldict[f])
elif isinstance(ldict[f], types.StringType):
ssymbols.append((f,ldict[f]))
else:
print "lex: %s not defined as a function or string" % f
error = 1
# Sort the functions by line number
fsymbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
# Sort the strings by regular expression length
ssymbols.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
# Check for non-empty symbols
if len(fsymbols) == 0 and len(ssymbols) == 0:
raise SyntaxError,"lex: no rules of the form t_rulename are defined."
# Add all of the rules defined with actions first
for f in fsymbols:
line = f.func_code.co_firstlineno
file = f.func_code.co_filename
files[file] = None
ismethod = isinstance(f, types.MethodType)
if not optimize:
nargs = f.func_code.co_argcount
if ismethod:
reqargs = 2
else:
reqargs = 1
if nargs > reqargs:
print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
error = 1
continue
if nargs < reqargs:
print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
error = 1
continue
if f.__name__ == 't_ignore':
print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
error = 1
continue
if f.__name__ == 't_error':
lexer.lexerrorf = f
continue
if f.__doc__:
if not optimize:
try:
c = re.compile(f.__doc__, re.VERBOSE)
except re.error,e:
print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s'" % (f.__name__,f.__doc__)
# Okay. The regular expression seemed okay. Let's append it to the master regular
# expression we're building
if (regex): regex += "|"
regex += "(?P<%s>%s)" % (f.__name__,f.__doc__)
else:
print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
# Now add all of the simple rules
for name,r in ssymbols:
if name == 't_ignore':
lexer.lexignore = r
continue
if not optimize:
if name == 't_error':
raise SyntaxError,"lex: Rule 't_error' must be defined as a function"
error = 1
continue
if not lexer.lextokens.has_key(name[2:]):
print "lex: Rule '%s' defined for an unspecified token %s." % (name,name[2:])
error = 1
continue
try:
c = re.compile(r,re.VERBOSE)
except re.error,e:
print "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s'" % (name,r)
if regex: regex += "|"
regex += "(?P<%s>%s)" % (name,r)
if not optimize:
for f in files.keys():
if not validate_file(f):
error = 1
try:
if debug:
print "lex: regex = '%s'" % regex
lexer.lexre = re.compile(regex, re.VERBOSE)
# Build the index to function map for the matching engine
lexer.lexindexfunc = [ None ] * (max(lexer.lexre.groupindex.values())+1)
for f,i in lexer.lexre.groupindex.items():
handle = ldict[f]
if type(handle) in (types.FunctionType, types.MethodType):
lexer.lexindexfunc[i] = (handle,handle.__name__[2:])
else:
# If rule was specified as a string, we build an anonymous
# callback function to carry out the action
lexer.lexindexfunc[i] = (None,f[2:])
# If a lextab was specified, we create a file containing the precomputed
# regular expression and index table
if lextab and optimize:
lt = open(lextab+".py","w")
lt.write("# %s.py. This file automatically created by PLY. Don't edit.\n" % lextab)
lt.write("_lexre = %s\n" % repr(regex))
lt.write("_lextab = [\n");
for i in range(0,len(lexer.lexindexfunc)):
t = lexer.lexindexfunc[i]
if t:
if t[0]:
lt.write(" ('%s',%s),\n"% (t[0].__name__, repr(t[1])))
else:
lt.write(" (None,%s),\n" % repr(t[1]))
else:
lt.write(" None,\n")
lt.write("]\n");
lt.write("_lextokens = %s\n" % repr(lexer.lextokens))
lt.write("_lexignore = %s\n" % repr(lexer.lexignore))
if (lexer.lexerrorf):
lt.write("_lexerrorf = %s\n" % repr(lexer.lexerrorf.__name__))
else:
lt.write("_lexerrorf = None\n")
lt.close()
except re.error,e:
print "lex: Fatal error. Unable to compile regular expression rules. %s" % e
error = 1
if error:
raise SyntaxError,"lex: Unable to build lexer."
if not lexer.lexerrorf:
print "lex: Warning. no t_error rule is defined."
if not lexer.lexignore: lexer.lexignore = ""
# Create global versions of the token() and input() functions
token = lexer.token
input = lexer.input
return lexer
# -----------------------------------------------------------------------------
# run()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
print "Reading from standard input (type EOF to end):"
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
print "(%s,'%s',%d)" % (tok.type, tok.value, tok.lineno)
|
MartinHjelmare/home-assistant
|
refs/heads/dev
|
homeassistant/components/huawei_lte/device_tracker.py
|
5
|
"""Support for device tracking of Huawei LTE routers."""
from typing import Any, Dict, List, Optional
import attr
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
PLATFORM_SCHEMA, DeviceScanner,
)
from homeassistant.const import CONF_URL
from ..huawei_lte import DATA_KEY, RouterData
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_URL): cv.url,
})
HOSTS_PATH = "wlan_host_list.Hosts"
def get_scanner(hass, config):
"""Get a Huawei LTE router scanner."""
data = hass.data[DATA_KEY].get_data(config)
data.subscribe(HOSTS_PATH)
return HuaweiLteScanner(data)
@attr.s
class HuaweiLteScanner(DeviceScanner):
"""Huawei LTE router scanner."""
data = attr.ib(type=RouterData)
_hosts = attr.ib(init=False, factory=dict)
def scan_devices(self) -> List[str]:
"""Scan for devices."""
self.data.update()
self._hosts = {
x["MacAddress"]: x
for x in self.data[HOSTS_PATH + ".Host"]
if x.get("MacAddress")
}
return list(self._hosts)
def get_device_name(self, device: str) -> Optional[str]:
"""Get name for a device."""
host = self._hosts.get(device)
return host.get("HostName") or None if host else None
def get_extra_attributes(self, device: str) -> Dict[str, Any]:
"""
Get extra attributes of a device.
Some known extra attributes that may be returned in the dict
include MacAddress (MAC address), ID (client ID), IpAddress
(IP address), AssociatedSsid (associated SSID), AssociatedTime
(associated time in seconds), and HostName (host name).
"""
return self._hosts.get(device) or {}
|
Sweetgrassbuffalo/ReactionSweeGrass-v2
|
refs/heads/master
|
.meteor/local/dev_bundle/python/Lib/test/testall.py
|
196
|
# Backward compatibility -- you should use regrtest instead of this module.
from warnings import warnpy3k
warnpy3k("the test.testall module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import sys, regrtest
sys.argv[1:] = ["-vv"]
regrtest.main()
|
zws0932/oms
|
refs/heads/master
|
deploy/views.py
|
12
|
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from deploy.saltapi import SaltAPI
from oms import settings
from oms.mysql import db_operate
from asset.models import HostList
from oms.models import *
from deploy.code import Code_Work
from deploy.json_data import BuildJson
import time
def salt_key_list(request):
"""
list all key
"""
user = request.user
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
minions,minions_pre = sapi.list_all_key()
return render_to_response('salt_key_list.html', {'all_minions': minions, 'all_minions_pre': minions_pre})
def salt_accept_key(request):
"""
accept salt minions key
"""
node_name = request.GET.get('node_name')
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
ret = sapi.accept_key(node_name)
Message.objects.create(type='salt', action='key', action_ip=node_name, content='saltstack accept node key')
return HttpResponseRedirect(reverse('key_list'))
def salt_delete_key(request):
"""
delete salt minions key
"""
node_name = request.GET.get('node_name')
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
ret = sapi.delete_key(node_name)
Message.objects.create(type='salt', action='key', action_ip=node_name, content='saltstack delete node key')
return HttpResponseRedirect(reverse('key_list'))
def module_deploy(request):
"""
deploy (nginx/php/mysql..etc) module
"""
ret = []
jid = []
user = request.user
if request.method == 'POST':
action = request.get_full_path().split('=')[1]
if action == 'deploy':
tgt = request.POST.get('tgt')
arg = request.POST.getlist('module')
tgtcheck = HostList.objects.filter(hostname=tgt)
if tgtcheck:
Message.objects.create(type='salt', action='deploy', action_ip=tgt, content='saltstack %s module depoy' % (arg))
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
if 'sysinit' in arg:
obj = sapi.async_deploy(tgt,arg[-1]) #先执行初始化模块,其他任意
jid.append(obj)
arg.remove('sysinit')
if arg:
for i in arg:
obj = sapi.async_deploy(tgt,i)
jid.append(obj)
else:
for i in arg:
obj = sapi.async_deploy(tgt,i)
jid.append(obj)
db = db_operate()
for i in jid:
time.sleep(10)
sql = 'select returns from salt_returns where jid=%s'
result=db.select_table(settings.RETURNS_MYSQL,sql,str(i)) #通过jid获取执行结果
ret.append(result)
#sapi.async_deploy('test-01','zabbix.api') #调用zabbix.api执行模块监控
else:
ret = '亲,目标主机不对,请重新输入'
return render_to_response('salt_module_deploy.html',
{'ret': ret},context_instance=RequestContext(request))
def remote_execution(request):
"""
remote command execution
"""
ret = ''
tgtcheck = ''
danger = ('rm','reboot','init ','shutdown')
user = request.user
if request.method == 'POST':
action = request.get_full_path().split('=')[1]
if action == 'exec':
tgt = request.POST.get('tgt')
arg = request.POST.get('arg')
tgtcheck = HostList.objects.filter(hostname=tgt)
argcheck = arg not in danger
if tgtcheck and argcheck:
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
ret = sapi.remote_execution(tgt,'cmd.run',arg)
elif not tgtcheck:
ret = '亲,目标主机不正确,请重新输入'
elif not argcheck:
ret = '亲,命令很危险, 你这样子老大会不开森'
Message.objects.create(type='salt', action='execution', action_ip=tgt, content='saltstack execution command: %s ' % (arg))
return render_to_response('salt_remote_execution.html',
{'ret': ret},context_instance=RequestContext(request))
def code_deploy(request):
"""
Pull code for building, pushed to the server
"""
ret = ''
host = {'ga': 'test-01', 'beta': 'localhost.localdomain'}
user = request.user
if request.method == 'POST':
action = request.get_full_path().split('=')[1]
if action == 'push':
pro = request.POST.get('project')
url = request.POST.get('url')
ver = request.POST.get('version')
env = request.POST.get('env')
capi = Code_Work(pro=pro,url=url,ver=ver)
data = {pro:{'ver':ver}}
obj = capi.work() #构建rpm包
if obj['comment'][0]['result'] and obj['comment'][1]['result'] and obj['comment'][2]['result']:
json_api = BuildJson()
json_api.build_data(host[env],data) #刷新pillar数据,通过deploy下发SLS执行代码发布
sapi = SaltAPI(url=settings.SALT_API['url'],username=settings.SALT_API['user'],password=settings.SALT_API['password'])
if env == 'beta':
jid = sapi.target_deploy('beta','deploy.'+pro)
elif env == 'ga':
jid = sapi.target_deploy('tg','deploy.'+pro)
else:
jid = sapi.target_deploy('beta','deploy.'+pro)
time.sleep(8)
db = db_operate()
sql = 'select returns from salt_returns where jid=%s'
ret=db.select_table(settings.RETURNS_MYSQL,sql,str(jid)) #通过jid获取执行结果
return render_to_response('code_deploy.html',
{'ret': ret},context_instance=RequestContext(request))
|
mw44118/pitz
|
refs/heads/master
|
tests/__init__.py
|
2
|
# vim: set expandtab ts=4 sw=4 filetype=python:
|
gtzilla/fastFrag-utils
|
refs/heads/master
|
app_engine/fastfrag-converter/tornado/autoreload.py
|
4
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A module to automatically restart the server when a module is modified.
This module depends on IOLoop, so it will not work in WSGI applications
and Google AppEngine.
"""
import functools
import logging
import os
import sys
import types
from tornado import ioloop
try:
import signal
except ImportError:
signal = None
def start(io_loop=None, check_time=500):
"""Restarts the process automatically when a module is modified.
We run on the I/O loop, and restarting is a destructive operation,
so will terminate any pending requests.
"""
io_loop = io_loop or ioloop.IOLoop.instance()
modify_times = {}
callback = functools.partial(_reload_on_update, io_loop, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop)
scheduler.start()
_reload_attempted = False
def _reload_on_update(io_loop, modify_times):
global _reload_attempted
if _reload_attempted:
# We already tried to reload and it didn't work, so don't try again.
return
for module in sys.modules.values():
# Some modules play games with sys.modules (e.g. email/__init__.py
# in the standard library), and occasionally this can cause strange
# failures in getattr. Just ignore anything that's not an ordinary
# module.
if not isinstance(module, types.ModuleType): continue
path = getattr(module, "__file__", None)
if not path: continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
try:
modified = os.stat(path).st_mtime
except:
continue
if path not in modify_times:
modify_times[path] = modified
continue
if modify_times[path] != modified:
logging.info("%s modified; restarting server", path)
_reload_attempted = True
for fd in io_loop._handlers.keys():
try:
os.close(fd)
except:
pass
if hasattr(signal, "setitimer"):
# Clear the alarm signal set by
# ioloop.set_blocking_log_threshold so it doesn't fire
# after the exec.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
os.execv(sys.executable, [sys.executable] + sys.argv)
except OSError:
# Mac OS X versions prior to 10.6 do not support execv in
# a process that contains multiple threads. Instead of
# re-executing in the current process, start a new one
# and cause the current process to exit. This isn't
# ideal since the new process is detached from the parent
# terminal and thus cannot easily be killed with ctrl-C,
# but it's better than not being able to autoreload at
# all.
# Unfortunately the errno returned in this case does not
# appear to be consistent, so we can't easily check for
# this error specifically.
os.spawnv(os.P_NOWAIT, sys.executable,
[sys.executable] + sys.argv)
sys.exit(0)
|
rfosterslo/django-dockets
|
refs/heads/master
|
dockets/__init__.py
|
52
|
__version__ = '0.0.1'
|
ecreall/lagendacommun
|
refs/heads/master
|
lac/content/interface.py
|
1
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from dace.interfaces import Attribute, IUser, IEntity as IEntityO, IApplication
from dace.interfaces import IGroup as OrigineIGroup
from pontus.interfaces import IVisualisableElement, IImage as SourceIImage
from lac.utilities.data_manager import (
interface_config,
interface,
# OBJECTTYPE,
IMAGETYPE,
FILETYPE,
ICALTTYPE,
file_deserializer,
cultural_event_deserializer,
review_deserializer,
schedule_deserializer,
smartfolder_deserializer,
cinema_review_deserializer,
interview_review_deserializer,
sub_object_serialize)
def get_subinterfaces(interface):
result = list(getattr(interface, '__sub_interfaces__', []))
for sub_interface in list(result):
if getattr(sub_interface, 'is_abstract', False):
result.extend(get_subinterfaces(sub_interface))
result.append(interface)
return list(set(result))
@interface(True)
class IEntity(IEntityO):
pass
@interface()
@interface_config(type_id='lac_image',
deserializer=file_deserializer,
serializer=sub_object_serialize)
class IImage(SourceIImage):
pass
@interface(True)
class ISearchableEntity(IEntity):
name = Attribute('name')
title = Attribute('title')
description = Attribute('description')
visibility_dates = Attribute('visibility_dates')
keywords = Attribute('keywords')
object_id = Attribute('object_id')
@interface()
class IFile(IVisualisableElement, ISearchableEntity):
pass
@interface(True)
class IStructureBase(IVisualisableElement, IEntity):
structure_name = Attribute('structure_name')
domains = Attribute('domains')
address = Attribute('address')
contact = Attribute('contact')
picture = Attribute('picture', type=IMAGETYPE)
@interface()
@interface_config(type_id='structure')
class IStructure(IStructureBase):
structure_type = Attribute('structure_type')
@interface()
@interface_config(type_id='company')
class ICompany(IStructureBase):
pass
@interface(True)
class IBaseUser(IEntity):
first_name = Attribute('first_name')
last_name = Attribute('last_name')
user_title = Attribute('user_title')
is_cultural_animator = Attribute('is_cultural_animator')
structure = Attribute('structure', type='structure')
company = Attribute('company', type='company')
@interface()
@interface_config(type_id='person')
class IPerson(IVisualisableElement,
ISearchableEntity,
IBaseUser,
IUser):
picture = Attribute('picture', type=IMAGETYPE)
signature = Attribute('signature')
@interface()
class IPreregistration(IBaseUser):
pass
@interface()
@interface_config(type_id='group')
class IGroup(IVisualisableElement,
ISearchableEntity,
OrigineIGroup):
pass
@interface(True)
class IDuplicableEntity(IEntity):
pass
# original = Attribute('original', type=OBJECTTYPE)
#branches = Attribute('branches', type=OBJECTTYPE, multiplicity='*')
@interface(True)
class IParticipativeEntity(IEntity):
pass
#contributors = Attribute('contributors', type=OBJECTTYPE, multiplicity='*')
@interface()
@interface_config(type_id='schedule',
deserializer=schedule_deserializer,
serializer=sub_object_serialize)
class ISchedule(IVisualisableElement, IEntity):
dates = Attribute('dates', type=ICALTTYPE)
ticket_type = Attribute('ticket_type')
ticketing_url = Attribute('ticketing_url')
price = Attribute('price')
venue = Attribute('venue', type='venue')
@interface()
@interface_config(type_id='cultural_event',
deserializer=cultural_event_deserializer)
class ICulturalEvent(IVisualisableElement,
ISearchableEntity,
IDuplicableEntity):
# original = Attribute('original', type='cultural_event')
#branches = Attribute('branches', type='cultural_event', multiplicity='*')
details = Attribute('details')
artists = Attribute('artists', type='artist', multiplicity='*')
contacts = Attribute('contacts')
picture = Attribute('picture', type='lac_image')
schedules = Attribute('schedules', type='schedule', multiplicity='*')
selling_tickets = Attribute('selling_tickets')
ticketing_url = Attribute('ticketing_url')
accept_conditions = Attribute('accept_conditions')
@interface(True)
class IBaseReview(IVisualisableElement,
ISearchableEntity):
surtitle = Attribute('surtitle')
article = Attribute('article')
picture = Attribute('picture', type='lac_image')
artists = Attribute('artists', type='artist', multiplicity='*')
signature = Attribute('signature')
informations = Attribute('informations')
@interface()
@interface_config(type_id='brief')
class IBrief(IVisualisableElement, ISearchableEntity):
picture = Attribute('picture', type='lac_image')
details = Attribute('details')
informations = Attribute('informations')
publication_number = Attribute('publication_number')
@interface()
@interface_config(type_id='film_schedule')
class IFilmSchedule(ISearchableEntity, ISchedule):
pass
@interface()
@interface_config(type_id='review',
deserializer=review_deserializer)
class IReview(IBaseReview):
pass
@interface()
@interface_config(type_id='cinema_review',
deserializer=cinema_review_deserializer)
class ICinemaReview(IBaseReview):
nationality = Attribute('nationality')
directors = Attribute('directors', type='artist', multiplicity='*')
duration = Attribute('duration')
appreciation = Attribute('appreciation')
opinion = Attribute('opinion')
@interface()
@interface_config(type_id='interview',
deserializer=interview_review_deserializer)
class IInterview(IBaseReview):
review = Attribute('review')
@interface()
@interface_config(type_id='film_synopses')
class IFilmSynopses(IVisualisableElement, ISearchableEntity):
picture = Attribute('picture', type='lac_image')
abstract = Attribute('abstract')
informations = Attribute('informations')
@interface(True)
class IAdvertising(IVisualisableElement, ISearchableEntity):
dates = Attribute('dates', type=ICALTTYPE)
request_quotation = Attribute('request_quotation')
@interface()
@interface_config(type_id='web_advertising')
class IWebAdvertising(IAdvertising):
picture = Attribute('picture', type=FILETYPE)
html_content = Attribute('html_content')
advertisting_url = Attribute('advertisting_url')
@interface()
@interface_config(type_id='periodic_advertising')
class IPeriodicAdvertising(IAdvertising):
picture = Attribute('picture', type=FILETYPE)
@interface()
@interface_config(type_id='game')
class IGame(IVisualisableElement, ISearchableEntity):
pass
@interface()
class ICreationCulturelleApplication(IVisualisableElement, IApplication):
pass
@interface()
class IKeyword(IVisualisableElement, IEntity):
pass
@interface()
class INewsletter(IVisualisableElement, IEntity):
pass
@interface()
@interface_config(type_id='smartfolder',
deserializer=smartfolder_deserializer)
class ISmartFolder(IVisualisableElement, IEntity):
add_as_a_block = Attribute('add_as_a_block')
view_type = Attribute('view_type')
children = Attribute('children', type='smartfolder', multiplicity='*')
style = Attribute('style')
classifications = Attribute('classifications', multiplicity='*')
@interface()
class ISiteFolder(IVisualisableElement, IEntity):
pass
@interface()
class IOrganization(IVisualisableElement, IGroup):
pass
@interface(True)
class IServiceDefinition(IVisualisableElement, IEntity):
pass
@interface()
class IModerationServiceDefinition(IServiceDefinition):
pass
@interface()
class ISellingTicketsServiceDefinition(IServiceDefinition):
pass
@interface()
class IImportServiceDefinition(IServiceDefinition):
pass
@interface()
class IExtractionServiceDefinition(IServiceDefinition):
pass
@interface()
class IPromotionServiceDefinition(IServiceDefinition):
pass
@interface()
class INewsletterServiceDefinition(IServiceDefinition):
pass
@interface(True)
class IUnitServiceDefinition(IServiceDefinition):
pass
@interface()
class IModerationServiceUnitDefinition(IUnitServiceDefinition):
pass
@interface()
class IService(IVisualisableElement, IEntity):
pass
@interface()
class IModerationService(IService):
pass
@interface()
class ISellingTicketsService(IService):
pass
@interface()
class IImportService(IService):
pass
@interface()
class IExtractionService(IService):
pass
@interface()
class IPromotionService(IService):
pass
@interface()
class INewsletterService(IService):
pass
@interface(True)
class IUnitService(IService):
pass
@interface()
class IModerationServiceUnit(IUnitService, IModerationService):
pass
@interface()
class ICustomerAccount(IEntity):
pass
@interface()
class IOrder(IVisualisableElement, IEntity):
pass
@interface()
@interface_config(type_id='artist')
class IArtistInformationSheet(IVisualisableElement, ISearchableEntity):
picture = Attribute('picture', type=FILETYPE)
biography = Attribute('biography')
@interface()
@interface_config(type_id='venue')
class IVenue(IVisualisableElement, ISearchableEntity):
kind = Attribute('kind')
capacity = Attribute('capacity')
addresses = Attribute('addresses')
contact = Attribute('contact')
@interface()
class IAlert(IVisualisableElement,
IEntity):
pass
@interface()
class ILabel(IVisualisableElement, IEntity):
price = Attribute('price')
@interface()
class ISocialApplication(IVisualisableElement, IEntity):
pass
|
7kbird/chrome
|
refs/heads/master
|
remoting/webapp/build-html.py
|
8
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Builds the complete main.html file from the basic components.
"""
from HTMLParser import HTMLParser
import argparse
import os
import re
import sys
def error(msg):
print 'Error: %s' % msg
sys.exit(1)
class HtmlChecker(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.ids = set()
def handle_starttag(self, tag, attrs):
for (name, value) in attrs:
if name == 'id':
if value in self.ids:
error('Duplicate id: %s' % value)
self.ids.add(value)
class GenerateWebappHtml:
def __init__(self, template_files, js_files, instrumented_js_files):
self.js_files = js_files
self.instrumented_js_files = instrumented_js_files
self.templates_expected = set()
for template in template_files:
self.templates_expected.add(os.path.basename(template))
self.templates_found = set()
def includeJavascript(self, output):
for js_path in sorted(self.js_files):
js_file = os.path.basename(js_path)
output.write(' <script src="' + js_file + '"></script>\n')
for js_path in sorted(self.instrumented_js_files):
js_file = os.path.basename(js_path)
output.write(' <script src="' + js_file + '" data-cover></script>\n')
def verifyTemplateList(self):
"""Verify that all the expected templates were found."""
if self.templates_expected > self.templates_found:
extra = self.templates_expected - self.templates_found
print 'Extra templates specified:', extra
return False
return True
def validateTemplate(self, template_path):
template = os.path.basename(template_path)
if template in self.templates_expected:
self.templates_found.add(template)
return True
return False
def processTemplate(self, output, template_file, indent):
with open(template_file, 'r') as input_template:
first_line = True
skip_header_comment = False
for line in input_template:
# If the first line is the start of a copyright notice, then
# skip over the entire comment.
# This will remove the copyright info from the included files,
# but leave the one on the main template.
if first_line and re.match(r'<!--', line):
skip_header_comment = True
first_line = False
if skip_header_comment:
if re.search(r'-->', line):
skip_header_comment = False
continue
m = re.match(
r'^(\s*)<meta-include src="(.+)"\s*/>\s*$',
line)
if m:
prefix = m.group(1)
template_name = m.group(2)
if not self.validateTemplate(template_name):
error('Found template not in list of expected templates: %s' %
template_name)
self.processTemplate(output, template_name, indent + len(prefix))
continue
m = re.match(r'^\s*<meta-include type="javascript"\s*/>\s*$', line)
if m:
self.includeJavascript(output)
continue
if line.strip() == '':
output.write('\n')
else:
output.write((' ' * indent) + line)
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument(
'--js', nargs='+', help='The Javascript files to include in HTML <head>')
parser.add_argument(
'--templates',
nargs='*',
default=[],
help='The html template files used by input-template')
parser.add_argument(
'--exclude-js',
nargs='*',
default=[],
help='The Javascript files to exclude from <--js> and <--instrumentedjs>')
parser.add_argument(
'--instrument-js',
nargs='*',
default=[],
help='Javascript to include and instrument for code coverage')
parser.add_argument('output_file')
parser.add_argument('input_template')
return parser.parse_args(sys.argv[1:])
def main():
args = parseArgs()
out_file = args.output_file
js_files = set(args.js) - set(args.exclude_js)
# Create the output directory if it does not exist.
out_directory = os.path.dirname(out_file)
if not os.path.exists(out_directory):
os.makedirs(out_directory)
# Generate the main HTML file from the templates.
with open(out_file, 'w') as output:
gen = GenerateWebappHtml(args.templates, js_files, args.instrument_js)
gen.processTemplate(output, args.input_template, 0)
# Verify that all the expected templates were found.
if not gen.verifyTemplateList():
error('Extra templates specified')
# Verify that the generated HTML file is valid.
with open(out_file, 'r') as input_html:
parser = HtmlChecker()
parser.feed(input_html.read())
if __name__ == '__main__':
sys.exit(main())
|
makki-io/pitrot
|
refs/heads/master
|
dbcon.py
|
1
|
"""
Modülün Açıklaması
__author__ : Çağatay Tengiz
__date__ : 15.11.2013
"""
import datetime
from sqlalchemy import *
from sqlalchemy import func
from sqlalchemy.ext.declarative import \
declared_attr,\
declarative_base, \
DeferredReflection
from sqlalchemy.orm import \
sessionmaker, \
scoped_session, \
column_property, \
relationship, \
deferred, \
events, \
object_session, \
backref
from bottle import request
from i18n import _
Base = declarative_base()
engine = None
plugin_sqlalchemy = None
revision = '$Revision$'
def init_sa(conf):
global engine
global plugin_sqlalchemy
engine = create_engine('firebird+fdb://%s:%s@%s/%s/%s?charset=UTF8' %
(
conf['database.db_user'],
conf['database.db_pass'],
conf['database.db_server'],
conf['database.db_path'],
conf['database.db_name']
),
echo=conf['database.echo_sqlalchemy'] == '1',
retaining=True,
enable_rowcount=False
)
"""
engine = create_engine('mysql+mysqldb://root:@127.0.0.1:3306/pitrot?charset=utf8',
echo=conf['database.echo_sqlalchemy'] == '1')
"""
Base.metadata.bind = engine
from bottle.ext import sqlalchemy
plugin_sqlalchemy = sqlalchemy.Plugin(
engine, # SQLAlchemy engine created with create_engine function.
Base.metadata, # SQLAlchemy metadata, required only if create=True.
keyword='db', # Keyword used to inject session database in a route (default 'db').
create=False, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False).
commit=True, # If it is true, plugin commit changes after route is executed (default True).
use_kwargs=False # If it is true and keyword is not defined, plugin uses **kwargs argument to inject session database (default False).
)
class MkMixin(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
zlins_dttm = Column(DateTime, default=func.now())
zlupd_dttm = Column(DateTime, onupdate=func.now())
zlins_usr = Column(BigInteger)
zlupd_usr = Column(BigInteger)
zlins_dttm._creation_order = 9990
zlins_usr._creation_order = 9991
zlupd_dttm._creation_order = 9992
zlupd_usr._creation_order = 9993
@staticmethod
def create_usr(mapper, connection, target):
if 'beaker.session' in request.environ:
target.zlins_usr = request.environ['beaker.session']['usr_id']
@staticmethod
def update_usr(mapper, connection, target):
if 'beaker.session' in request.environ:
target.zlupd_usr = request.environ['beaker.session']['usr_id']
@classmethod
def __declare_last__(cls):
# get called after mappings are completed
# http://docs.sqlalchemy.org/en/rel_0_7/orm/extensions/declarative.html#declare-last
events.event.listen(cls, 'before_insert', cls.create_usr)
events.event.listen(cls, 'before_update', cls.update_usr)
__table_args__ = {'autoload': False, # Bu seçenek true olursa, veritabanından otomatik yüklemeye çalışıyor...
'extend_existing': True}
#__mapper_args__= {'exclude_properties': ['cnt_bin', 'cnt_thb']} #'always_refresh': True}
class Config(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_config'), primary_key=True)
code = Column(String(30))
defi = Column(String(200))
cval = Column(String(200))
class Usr(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_usr'), primary_key=True)
code = Column(String(30), nullable=False, unique=True)
upass = Column(String(30))
email = Column(String(50), nullable=False, unique=True)
full_name = Column(String(50))
is_admin = Column(Boolean, default=False, nullable=False)
#authentication method
# 0: Password
# 1: Auth through active directory
auth_method = Column(SmallInteger, default=0)
dc_name = Column(String(20))
client_id = Column(BigInteger, ForeignKey("client.id",
onupdate="CASCADE",
ondelete="CASCADE",
name="fk_usr_client"))
client_code = column_property(select([literal_column('Client.code')],
from_obj=text('Client')).where(text('Client.id=Usr.client_id')))
projects = relationship('Project_Usr',
backref='b_projects_usr',
primaryjoin=('Usr.id==Project_Usr.usr_id'))
worklog = relationship('WorkLog',
backref='b_usr_worklog',
order_by=[desc('WorkLog.dt')],
primaryjoin='Usr.id==WorkLog.usr_id',
cascade="all, delete-orphan")
statu = Column(SmallInteger, default=1)
ck_notification_self = Column(Boolean, default=False)
ck_notification_watcher = Column(Boolean, default=False)
ck_notification_project = Column(Boolean, default=False)
ck_notification_public_project = Column(Boolean, default=False)
usrrole_id = Column(BigInteger, ForeignKey("usrrole.id",
onupdate="CASCADE",
ondelete="NO ACTION",
name="fk_usr_usrrole"))
usrrole_code = column_property(select([literal_column('UsrRole.code')],
from_obj=text('UsrRole')).where(text('UsrRole.id=Usr.usrrole_id')))
hourly_wage = Column(Numeric)
hourly_wage_crn = Column(String(3))
@property
def statu_def(self):
if self.statu == 1:
return _('Active')
elif self.statu == 2:
return _('Waiting Email Conf.')
elif self.statu == 3:
return _('Waiting Admin Conf.')
elif self.statu == 99:
return _('Disabled')
class UsrRole(Base, MkMixin):
id = Column(BigInteger, Sequence("gn_usrrole"), primary_key=True)
code = Column(String(30), unique=True, nullable=False)
class UsrGrp(Base, MkMixin):
id = Column(BigInteger, Sequence("gn_usrgrp"), primary_key=True)
code = Column(String(30), unique=True, nullable=False)
members = relationship('UsrGrp_Usr',
backref="b_usrgrp",
primaryjoin="UsrGrp_Usr.usrgrp_id==UsrGrp.id",
cascade="all, delete-orphan")
class UsrGrp_Usr(Base, MkMixin):
id = Column(BigInteger, Sequence("gn_usrgrp_usr"), primary_key=True)
usr_id = Column(BigInteger, ForeignKey("usr.id",
onupdate="CASCADE",
ondelete="CASCADE",
name="fk_usrgrp_usr_usr"),
nullable=False)
usr_code = column_property(select([literal_column('Usr.code')],
from_obj=text('Usr')).where(text('Usr.id=UsrGrp_Usr.usr_id')))
usrgrp_id = Column(BigInteger, ForeignKey("usrgrp.id",
onupdate="CASCADE",
ondelete="CASCADE",
name="fk_usrgrp_usr_usrgrp"),
nullable=False)
usrgrp_code = column_property(select([literal_column('UsrGrp.code')],
from_obj=text('UsrGrp')).where(text('UsrGrp.id=UsrGrp_Usr.usrgrp_id')))
__table_args__ = (UniqueConstraint('usr_id', 'usrgrp_id', name='uq_usrgrp_usr_1'), MkMixin.__table_args__)
class DfLang(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_dflang'), primary_key=True)
code = Column(String(2), unique=True, nullable=False)
defi = Column(String(50))
class DfTag(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_dftag'), primary_key=True)
code = Column(String(30))
class DfIssueCategory(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_dfissuecategory'), primary_key=True)
code = Column(String(30), unique=True)
class DfIssuePriority(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_dfissuepriority'), primary_key=True)
code = Column(String(30), unique=True)
nro = Column(Integer, nullable=False)
class DfIssueStatus(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_dfissuestatus'), primary_key=True)
code = Column(String(30), unique=True)
nro = Column(Integer, nullable=False, default=0)
issue_closed = Column(Boolean, default=False, nullable=False)
class DfIssueSeverity(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_dfissueseverity'), primary_key=True)
code = Column(String(30), unique=True)
class Client(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_client'), primary_key=True)
code = Column(String(30), unique=True, nullable=False)
name = Column(String(200))
users = relationship('Usr',
backref='b_client_usr',
order_by=[asc('Usr.code')],
primaryjoin='Client.id==Usr.client_id',
cascade="all, delete-orphan"
)
projects = relationship('Project',
backref='b_client_project',
order_by=[asc('Project.code')],
primaryjoin='Client.id==Project.client_id',
cascade="all, delete-orphan"
)
class Project(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_project'), primary_key=True)
parent_id = Column(BigInteger,ForeignKey("project.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_project_parent"))
parent_code = column_property(select([literal_column('p.code')],
from_obj=text('Project p')).where(text('p.id=Project.parent_id')))
code = Column(String(30), unique=True, nullable=False)
name = Column(String(200))
is_public = Column(Boolean, default=False, nullable=False)
is_active = Column(Boolean, default=True, nullable=False)
client_id = Column(BigInteger, ForeignKey("client.id",
onupdate="CASCADE",
ondelete="CASCADE",
name="fk_project_client"))
client_code = column_property(select([literal_column('Client.code')],
from_obj=text('Client')).where(text('Client.id=Project.client_id')))
client_name = column_property(select([literal_column('Client.name')],
from_obj=text('Client')).where(text('Client.id=Project.client_id')))
dt_start = Column(Date)
dt_finish = Column(Date)
notes = Column(TEXT)
#todo: danışman bazında değişebilir yapalım
hourly_rate = Column(Numeric)
hourly_rate_crn = Column(String(3))
issues = relationship('Issue',
backref='b_issue_project',
primaryjoin=('Project.id==Issue.project_id'),
order_by=[desc('Issue.id')],
cascade="all, delete-orphan"
)
users = relationship('Project_Usr',
backref='b_project_usr_project',
order_by=[asc('Project_Usr.usr_id')],
primaryjoin='Project.id==Project_Usr.project_id',
cascade="all, delete-orphan"
)
uploads = relationship('Project_Upload',
backref='b_upload_project',
primaryjoin='Project.id==Project_Upload.project_id',
cascade="all, delete-orphan")
children = relationship("Project",
# cascade deletions
#cascade="all, delete-orphan",
backref=backref("parent", remote_side=id))
worklog = relationship('WorkLog',
backref='b_project_worklog',
order_by=[desc('WorkLog.dt')],
primaryjoin='Project.id==WorkLog.project_id',
cascade="all, delete-orphan"
)
category = relationship('Project_Category',
backref='b_project_category',
order_by=[asc('Project_Category.code')],
primaryjoin='Project.id==Project_Category.project_id',
cascade="all, delete-orphan"
)
status = relationship('Project_Status',
backref='b_project_status',
order_by=[asc('Project_Status.nro')],
primaryjoin='Project.id==Project_Status.project_id',
cascade="all, delete-orphan"
)
milestone = relationship('Project_Milestone',
backref='b_project_milestone',
order_by=[asc('Project_Milestone.code')],
primaryjoin='Project.id==Project_Milestone.project_id',
cascade="all, delete-orphan"
)
class Project_Usr(Base, MkMixin):
id = Column(BigInteger, Sequence("gn_project_usr"), primary_key=True)
usr_id = Column(BigInteger, ForeignKey("usr.id",
onupdate="CASCADE",
ondelete="CASCADE",
name="fk_project_usr_usr"),
nullable=False)
usr_code = column_property(select([literal_column('Usr.code')],
from_obj=text('Usr')).where(text('Usr.id=Project_Usr.usr_id')))
project_id = Column(BigInteger, ForeignKey("project.id",
onupdate="CASCADE",
ondelete="CASCADE",
name="fk_project_usr_project"),
nullable=False)
project_code = column_property(select([literal_column('Project.code')],
from_obj=text('Project')).where(text('Project.id=Project_Usr.project_id')))
usrrole_id = Column(BigInteger, ForeignKey("usrrole.id",
onupdate="CASCADE",
ondelete="NO ACTION",
name="fk_project_usr_usrrole"))
usrrole_code = column_property(select([literal_column('UsrRole.code')],
from_obj=text('UsrRole')).where(text('UsrRole.id=Project_Usr.usrrole_id')))
__table_args__ = (UniqueConstraint('usr_id', 'project_id', name='uq_project_usr_1'), MkMixin.__table_args__)
class Project_Upload(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_project_upload'), primary_key=True)
project_id = Column(BigInteger, ForeignKey("project.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_project_upload_project"),
nullable=False)
upload_id = Column(BigInteger, ForeignKey("upload.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_project_upload_upload"),
nullable=False)
defi = Column(String(200))
uuid = column_property(select([literal_column('Upload.uuid')],
from_obj=text('Upload')).where(text('Project_Upload.upload_id=Upload.id')))
file_name = column_property(select([literal_column('Upload.file_name')],
from_obj=text('Upload')).where(text('Project_Upload.upload_id=Upload.id')))
class Project_Milestone(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_project_milestone'), primary_key=True)
project_id = Column(BigInteger, ForeignKey("project.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_project_milestone_project"),
nullable=False)
code = Column(String(30), nullable=False)
name = Column(String(200))
dt_plan = Column(Date, nullable=False)
is_active = Column(Boolean, default=True)
is_released = Column(Boolean, default=False)
active_count = column_property(select([func.count(literal_column('Issue.id'))], from_obj=text("Issue")).
where(and_(text("Issue.milestone_id=Project_Milestone.id"),
text("(select issue_closed from project_status where project_status.id=issue.status_id) = 0")
))
)
closed_count = column_property(select([func.count(literal_column('Issue.id'))], from_obj=text("Issue")).
where(and_(text("Issue.milestone_id=Project_Milestone.id"),
text("(select issue_closed from project_status where project_status.id=issue.status_id) = 1")
))
)
@property
def total_count(self):
return self.active_count + self.closed_count
@property
def percentage(self):
if self.total_count > 0:
return (self.closed_count / self.total_count) * 100
else:
return 0
__table_args__ = (UniqueConstraint('project_id', 'code', name='uq_project_milesone_1'), MkMixin.__table_args__)
class Project_Status(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_project_status'), primary_key=True)
project_id = Column(BigInteger, ForeignKey("project.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_project_status_project"),
nullable=False)
code = Column(String(30), nullable=False)
nro = Column(Integer, nullable=False, default=0)
issue_closed = Column(Boolean, default=False, nullable=False)
__table_args__ = (UniqueConstraint('project_id', 'code', name='uq_project_status_1'), MkMixin.__table_args__)
class Project_Category(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_project_category'), primary_key=True)
project_id = Column(BigInteger, ForeignKey("project.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_project_category_project"),
nullable=False)
code = Column(String(30), nullable=False)
__table_args__ = (UniqueConstraint('project_id', 'code', name='uq_project_category_1'), MkMixin.__table_args__)
class Upload(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_upload'), primary_key=True)
uuid = Column(String(40), unique=True)
file_name = Column(String(200))
ext = Column(String(20))
cnt = deferred(Column(BLOB))
cnt_th = deferred(Column(BLOB))
notes = Column(TEXT)
mimetype = Column(String(100))
class Issue(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_issue'), primary_key=True)
parent_id = Column(BigInteger, ForeignKey("issue.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_issue_parent"))
project_id = Column(BigInteger, ForeignKey("project.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_issue_project"),
nullable=False)
project_code = column_property(select([literal_column('project.code')],
from_obj=text('Project')).where(text('Project.id=Issue.project_id')))
usr_id_from = Column(BigInteger, ForeignKey("usr.id",
ondelete="NO ACTION",
onupdate="CASCADE",
name="fk_issue_usr_from"),
nullable=False)
usr_code_from = column_property(select([literal_column('usr.code')],
from_obj=text('Usr')).where(text('Usr.id=Issue.usr_id_from')))
usr_id_assigned = Column(BigInteger, ForeignKey("usr.id",
ondelete="NO ACTION",
onupdate="CASCADE",
name="fk_issue_usr_assigned"))
usr_code_assigned = column_property(select([literal_column('usr.code')],
from_obj=text('Usr')).where(text('Usr.id=Issue.usr_id_assigned')))
category_id = Column(BigInteger, ForeignKey(Project_Category.id,
ondelete="NO ACTION",
onupdate="CASCADE",
name="fk_issue_category"),
nullable=False)
category = column_property(select([literal_column('Project_Category.code')],
from_obj=text('Project_Category')).
where(text('Project_Category.id=Issue.category_id')))
status_id = Column(BigInteger, ForeignKey(Project_Status.id,
ondelete="NO ACTION",
onupdate="CASCADE",
name="fk_issue_status"),
nullable=False)
status = column_property(select([literal_column('Project_Status.code')],
from_obj=text('Project_Status')).
where(text('Project_Status.id=Issue.status_id')))
status_nro = column_property(select([literal_column('Project_Status.nro')],
from_obj=text('Project_Status')).
where(text('Project_Status.id=Issue.status_id')))
issue_closed = column_property(select([literal_column('Project_Status.issue_closed')],
from_obj=text('Project_Status')).
where(text('Project_Status.id=Issue.status_id')))
priority_id = Column(BigInteger, ForeignKey("dfissuepriority.id",
ondelete="NO ACTION",
onupdate="CASCADE",
name="fk_issue_dfissuepriority"),
nullable=False)
priority = column_property(select([literal_column('DfIssuePriority.code')],
from_obj=text('DfIssuePriority')).
where(text('DfIssuePriority.id=Issue.priority_id')))
priority_nro = column_property(select([literal_column('DfIssuePriority.nro')],
from_obj=text('DfIssuePriority')).
where(text('DfIssuePriority.id=Issue.priority_id')))
dt_open = Column(Date)
dt_due = Column(Date)
dt_plan = Column(Date)
dt_plan_fn = Column(Date)
estimated_hours = Column(Numeric(9, 2))
reference = Column(String(30)) #Karşı danışman firmaya iş açılıyorsa onun referansı
milestone_id = Column(BigInteger, ForeignKey("project_milestone.id",
ondelete="SET NULL",
onupdate="CASCADE",
name="fk_issue_milestone"))
milestone = column_property(select([literal_column('Project_Milestone.code')],
from_obj=text('Project_Milestone')).
where(text('Project_Milestone.id=Issue.milestone_id')))
milestone_name = column_property(select([literal_column('Project_Milestone.name')],
from_obj=text('Project_Milestone')).
where(text('Project_Milestone.id=Issue.milestone_id')))
last_update = column_property(select([literal_column('Issue_Changeset.zlins_dttm')],
from_obj=text('Issue_Changeset')).
where(text('Issue_Changeset.issue_id=Issue.id')).
order_by(desc('Issue_Changeset.zlins_dttm')).limit(1))
last_updated_by_usr_id = column_property(select([literal_column('Issue_Changeset.usr_id')],
from_obj=text('Issue_Changeset')).
where(text('Issue_Changeset.issue_id=Issue.id')).
order_by(desc('Issue_Changeset.zlins_dttm')).limit(1))
last_updated_by_usr_code = column_property(select([Usr.code], from_obj=Usr).
where(Usr.id == select([literal_column('Issue_Changeset.usr_id')],
from_obj=text('Issue_Changeset')).
where(text('Issue_Changeset.issue_id=Issue.id')).
order_by(desc('Issue_Changeset.zlins_dttm')).limit(1)))
"""
spent_hours = column_property(select(
[literal_column()]
))
"""
@property
def age(self):
if type(self.dt_open) != str:
_delta = datetime.date.today() - self.dt_open
return _delta.days
else:
return 0
@property
def overdue(self):
if self.dt_due and type(self.dt_due) != str:
_delta = datetime.date.today() - self.dt_due
return _delta.days
else:
return 0
@property
def overplan(self):
if self.dt_plan and type(self.dt_plan) != str:
_delta = datetime.date.today() - self.dt_plan
return _delta.days
else:
return 0
title = Column(String(200))
description = Column(TEXT)
is_private = Column(Boolean, nullable=False, default=False)
done_ratio = Column(Integer, default=0)
uploads = relationship('Issue_Upload',
backref='b_upload_issue',
primaryjoin='Issue.id==Issue_Upload.issue_id',
cascade="all, delete-orphan")
watchers = relationship('Issue_Usr',
backref="b_usr_issue",
primaryjoin="Issue.id==Issue_Usr.issue_id",
cascade="all, delete-orphan")
logs = relationship('Issue_Log',
backref="b_log_issue",
primaryjoin="Issue.id==Issue_Log.issue_id",
cascade="all, delete-orphan")
comments = relationship('Issue_Comment',
backref="b_comment_issue",
primaryjoin="Issue.id==Issue_Comment.issue_id",
cascade="all, delete-orphan")
changes = relationship('Issue_Changeset',
backref="b_changeset_issue",
primaryjoin="Issue.id==Issue_Changeset.issue_id",
cascade="all, delete-orphan",
order_by="Issue_Changeset.id")
rels = relationship('Issue_Rel',
backref="b_rel_issue",
primaryjoin="Issue.id==Issue_Rel.issue_id_src",
cascade="all, delete-orphan",
order_by="Issue_Rel.id")
#referenced as dst
refs = relationship('Issue_Rel',
backref="b_ref_issue",
primaryjoin="Issue.id==Issue_Rel.issue_id_dst",
cascade="all, delete-orphan",
order_by="Issue_Rel.id")
class Issue_Upload(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_issue_upload'), primary_key=True)
changeset = Column(BigInteger, ForeignKey("issue_changeset.id",
ondelete="SET NULL",
onupdate="CASCADE",
name="fk_issue_upload_changeset"))
issue_id = Column(BigInteger, ForeignKey("issue.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_issue_upload_issue"),
nullable=False)
upload_id = Column(BigInteger, ForeignKey("upload.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_issue_upload_upload"),
nullable=False)
defi = Column(String(200))
uuid = column_property(select([literal_column('Upload.uuid')],
from_obj=text('Upload')).
where(text('Issue_Upload.upload_id=Upload.id')))
file_name = column_property(select([literal_column('Upload.file_name')],
from_obj=text('Upload')).
where(text('Issue_Upload.upload_id=Upload.id')))
class Issue_Changeset(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_issue_changeset'), primary_key=True)
issue_id = Column(BigInteger, ForeignKey("issue.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_issue_changeset_issue"),
nullable=False)
usr_id = Column(BigInteger, ForeignKey("usr.id",
ondelete="SET NULL",
onupdate="CASCADE",
name="fk_issue_changeset_usr"))
usr_code = column_property(select([literal_column('Usr.code')],
from_obj=text('Usr')).
where(text('Usr.id=Issue_Changeset.usr_id')))
uploads = relationship('Issue_Upload',
backref='b_upload_changeset',
primaryjoin='Issue_Changeset.id==Issue_Upload.changeset')
logs = relationship('Issue_Log',
backref="b_log_changeset",
primaryjoin="Issue_Changeset.id==Issue_Log.changeset")
comments = relationship('Issue_Comment',
backref="b_comment_changeset",
primaryjoin="Issue_Changeset.id==Issue_Comment.changeset")
class Issue_Log(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_issue_log'), primary_key=True)
changeset = Column(BigInteger, ForeignKey("issue_changeset.id",
ondelete="SET NULL",
onupdate="CASCADE",
name="fk_issue_log_changeset"))
issue_id = Column(BigInteger, ForeignKey("issue.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_issue_log_issue"),
nullable=False)
field_name = Column(String(50))
old_val = Column(TEXT)
new_val = Column(TEXT)
caption = Column(String(50))
old_val_text = Column(TEXT)
new_val_text = Column(TEXT)
@events.event.listens_for(Issue_Log, 'before_insert')
@events.event.listens_for(Issue_Log, 'before_update')
def issue_log_biu(mapper, connection, target):
target.old_val_text = None
target.new_val_text = None
if target.field_name == 'parent_id':
target.caption = _('Parent Issue')
elif target.field_name == 'project_id':
target.caption = _('Project')
if target.old_val:
target.old_val_text = connection.scalar('select code from project where id = %d' %(int(target.old_val)))
target.new_val_text = connection.scalar('select code from project where id = %d' %(int(target.new_val)))
elif target.field_name == 'usr_id_from':
target.caption = _('Reporter')
if target.old_val:
target.old_val_text = connection.scalar('select code from usr where id = %d' %(int(target.old_val)))
target.new_val_text = connection.scalar('select code from usr where id = %d' %(int(target.new_val)))
elif target.field_name == 'usr_id_assigned':
target.caption = _('Assignee')
if target.old_val:
target.old_val_text = connection.scalar('select code from usr where id = %d' %(int(target.old_val)))
target.new_val_text = connection.scalar('select code from usr where id = %d' %(int(target.new_val)))
elif target.field_name == 'category_id':
target.caption = _('Category')
if target.old_val:
target.old_val_text = connection.scalar('select code from project_category where id = %d' %(int(target.old_val)))
target.new_val_text = connection.scalar('select code from project_category where id = %d' %(int(target.new_val)))
elif target.field_name == 'status_id':
target.caption = _('Status')
if target.old_val:
target.old_val_text = connection.scalar('select code from project_status where id = %d' %(int(target.old_val)))
target.new_val_text = connection.scalar('select code from project_status where id = %d' %(int(target.new_val)))
elif target.field_name == 'priority_id':
target.caption = _('Priority')
if target.old_val:
target.old_val_text = connection.scalar('select code from dfissuepriority where id = %d' %(int(target.old_val)))
target.new_val_text = connection.scalar('select code from dfissuepriority where id = %d' %(int(target.new_val)))
elif target.field_name == 'milestone_id':
target.caption = _('Milestone')
if target.old_val:
target.old_val_text = connection.scalar('select code from project_milestone where id = %d' %(int(target.old_val)))
target.new_val_text = connection.scalar('select code from project_milestone where id = %d' %(int(target.new_val)))
elif target.field_name == 'dt_open':
target.caption = _('Date Opened')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
elif target.field_name == 'dt_due':
target.caption = _('Due Date')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
elif target.field_name == 'dt_plan':
target.caption = _('Plan Start Date')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
elif target.field_name == 'dt_plan_fn':
target.caption = _('Plan Finish Date')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
elif target.field_name == 'estimated_hours':
target.caption = _('Estimated Hours')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
elif target.field_name == 'reference':
target.caption = _('Reference Ticket')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
elif target.field_name == 'title':
target.caption = _('Title')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
elif target.field_name == 'description':
target.caption = _('Description')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
elif target.field_name == 'is_private':
target.caption = _('Private')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
elif target.field_name == 'done_ratio':
target.caption = _('% Done')
target.old_val_text = target.old_val
target.new_val_text = target.new_val
class Issue_Rel(Base, MkMixin):
id = Column(BigInteger, Sequence("gn_issue_rel"), primary_key=True)
issue_id_src = Column(BigInteger, ForeignKey("issue.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_issue_issue_rel_src"),
nullable=False)
issue_src = relationship('Issue',
primaryjoin="Issue.id==Issue_Rel.issue_id_src")
issue_id_dst = Column(BigInteger, ForeignKey("issue.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_issue_issue_rel_dst"),
nullable=False)
issue_dst = relationship('Issue',
primaryjoin="Issue.id==Issue_Rel.issue_id_dst")
#REL Types Are :
#REL : Related To
#DUP : Duplicates
#PRE : Precedes
#FLW : Follows
#BLK : Blocks
#STW : Starts With
#ENW : Ends With
rel_type = Column(String(10))
@property
def rel_type_def(self):
if self.rel_type == 'REL':
return _('Related To')
elif self.rel_type == 'DUP':
return _('Duplicates')
elif self.rel_type == 'PRE':
return _('Precedes')
elif self.rel_type == 'FLW':
return _('Follows')
elif self.rel_type == 'BLK':
return _('Blocks')
elif self.rel_type == 'STW':
return _('Starts With')
elif self.rel_type == 'ENW':
return _('Ends With')
class Issue_Usr(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_issue_usr'), primary_key=True)
usr_id = Column(BigInteger, ForeignKey("usr.id", ondelete="CASCADE", onupdate="CASCADE", name="fk_issue_usr_usr"))
usr_code = column_property(select([literal_column('Usr.code')],
from_obj=text('Usr')).
where(text('Usr.id=Issue_Usr.usr_id')))
issue_id = Column(BigInteger, ForeignKey("issue.id", ondelete="CASCADE", onupdate="CASCADE", name="fk_issue_usr_issue"))
class Issue_Comment(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_issue_comment'), primary_key=True)
changeset = Column(BigInteger, ForeignKey("issue_changeset.id",
ondelete="SET NULL",
onupdate="CASCADE",
name="fk_issue_comment_changeset"))
issue_id = Column(BigInteger, ForeignKey("issue.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_issue_comment_issue"),
nullable=False)
usr_id = Column(BigInteger, ForeignKey("usr.id", ondelete="CASCADE", onupdate="CASCADE", name="fk_issue_comment_usr"))
usr_code = column_property(select([literal_column('Usr.code')],
from_obj=text('Usr')).where(text('Usr.id=Issue_Comment.usr_id')))
comment = Column(TEXT)
class Wiki(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_wiki'), primary_key=True)
title = Column(String(200))
link = Column(String(200))
parent_id = Column(BigInteger, ForeignKey("wiki.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_wiki_parent"))
project_id = Column(BigInteger, ForeignKey("project.id",
ondelete="SET NULL",
onupdate="CASCADE",
name="fk_wiki_project"))
project_code = column_property(select([literal_column('Project.code')],
from_obj=text('Project')).
where(text('Project.id=Wiki.project_id')))
client_code = column_property(select([literal_column('Client.code')],
from_obj=text('Client')).
where(text('Client.id=(select client_id from project where project.id=wiki.project_id)')))
text = Column(TEXT)
__table_args__ = (UniqueConstraint('project_id', 'link', name='uq_wiki_1'), MkMixin.__table_args__)
class WorkLog(Base, MkMixin):
id = Column(BigInteger, Sequence('gn_worklog'), primary_key=True)
issue_id = Column(BigInteger, ForeignKey("issue.id",
ondelete="SET NULL",
onupdate="CASCADE",
name="fk_worklog_issue"))
project_id = Column(BigInteger, ForeignKey("project.id",
ondelete="SET NULL",
onupdate="CASCADE",
name="fk_worklog_project"))
project_code = column_property(select([literal_column('Project.code')],
from_obj=text('Project')).
where(text('Project.id=Worklog.project_id')))
client_code = column_property(select([literal_column('Client.code')],
from_obj=text('Client')).
where(text('Client.id=(select client_id from project where project.id=worklog.project_id)')))
usr_id = Column(BigInteger, ForeignKey("usr.id",
ondelete="CASCADE",
onupdate="CASCADE",
name="fk_worklog_usr"),
nullable=False)
usr_code = column_property(select([literal_column('Usr.code')],
from_obj=text('Usr')).where(text('Usr.id=Worklog.usr_id')))
description = Column(Text)
location = Column(String(200))
dt = Column(Date, nullable=False)
tm_st = Column(Time)
tm_fn = Column(Time)
bill_to_client = Column(Boolean, default=False)
is_billed = Column(Boolean, default=False)
@property
def dttm_st(self):
if self.dt and self.tm_st:
return datetime.datetime.combine(self.dt, self.tm_st)
else:
return None
@property
def dttm_fn(self):
if self.dt and self.tm_fn:
return datetime.datetime.combine(self.dt, self.tm_fn)
else:
return None
@property
def duration(self):
if self.dttm_st and self.dttm_fn:
_delta = self.dttm_fn - self.dttm_st
return _delta
else:
return None
def db_default_vals():
dbsession = scoped_session(sessionmaker(bind=engine)) # http://docs.sqlalchemy.org/en/rel_0_8/orm/session.html?highlight=scoped_session#sqlalchemy.orm.scoping.scoped_session
#Clients
u = dbsession.query(Client)
u.delete()
dbsession.commit()
for k in [[1, 'Makki'],
[2, 'Makliftsan'],
[3, 'Astaş'],
[4, 'Aktifsped']]:
rw = Client()
rw.id = k[0]
rw.code = k[1]
dbsession.add(rw)
dbsession.commit()
#Users
u = dbsession.query(Usr)
u.delete()
dbsession.commit()
for k in [[1, 'admin', 'admin', 'info@makki.com.tr', True, 1],
[2, 'guest', 'guest', 'guest@xyz.com', False, 1],
[3, 'zuhtu', '123', 'xx@yy.com', False, 2]
]:
rw = Usr()
rw.id = k[0]
rw.code = k[1]
rw.upass = k[2]
rw.email = k[3]
rw.is_admin = k[4]
rw.client_id = k[5]
dbsession.add(rw)
dbsession.commit()
#Projects
u = dbsession.query(Project)
u.delete()
dbsession.commit()
for k in [[1, 'PM', None, 1, False]]:
rw = Project()
rw.id = k[0]
rw.code = k[1]
rw.parent_id = k[2]
rw.client_id = k[3]
rw.is_public = k[4]
dbsession.add(rw)
dbsession.commit()
#User Roles
u = dbsession.query(UsrRole)
u.delete()
dbsession.commit()
for k in [['1', 'Project Admin'],
['2', 'Developer'],
['3', 'Key User'],
['4', 'User'],
['5', 'Client Rep.']]:
rw = UsrRole()
rw.id = k[0]
rw.code = k[1]
dbsession.add(rw)
dbsession.commit()
#Priorities
u = dbsession.query(DfIssuePriority)
u.delete()
dbsession.commit()
for k in [['1', 'Very Low'],
['2', 'Low'],
['3', 'Medium'],
['4', 'High'],
['5', 'Show Breaker']]:
rw = DfIssuePriority()
rw.id = k[0]
rw.code = k[1]
rw.nro = k[0]
dbsession.add(rw)
dbsession.commit()
#Categories
u = dbsession.query(DfIssueCategory)
u.delete()
for k in [['1', 'Bug'],
['2', 'Feature Request'],
['3', 'Enhancement']
]:
rw = DfIssueCategory()
rw.id = k[0]
rw.code = k[1]
dbsession.add(rw)
dbsession.commit()
dbsession.commit()
#Status
u = dbsession.query(DfIssueStatus)
u.delete()
for k in [['1', 'New', 1, 0],
['2', 'On Progress', 2, 0],
['3', 'Waiting For Deployment', 3, 0],
['4', 'In Customer Test', 4, 0],
['5', 'Closed', 5, 1],
['6', 'Rejected', 6, 0],
]:
rw = DfIssueStatus()
rw.id = k[0]
rw.code = k[1]
rw.nro = k[2]
rw.issue_closed = k[3]
dbsession.add(rw)
dbsession.commit()
dbsession.commit()
#Default site specific configurations
u = dbsession.query(Config)
u.delete()
dbsession.commit()
xid = 0
for k in [['home_page', 'Home Page', 1],
['site_title', 'Site Title', 'Makki Issue Management System'],
['default_language', 'Default Language', '1']]:
xid += 1
cconf = Config()
cconf.code = k[0]
cconf.defi = k[1]
cconf.cval = k[2]
cconf.id = xid
dbsession.add(cconf)
dbsession.commit()
#Default Languages
u = dbsession.query(DfLang)
u.delete()
dbsession.commit()
for k in [[1, 'en', 'English'], [2, 'tr', 'Türkçe'], [3, 'de', 'Deutsch']]:
clang = DfLang()
clang.id = k[0]
clang.code = k[1]
clang.defi = k[2]
dbsession.add(clang)
dbsession.commit()
#Default Projects Pages
if __name__ == "__main__":
import app
init_sa(app.baseApp.config)
Base.metadata.create_all()
#db_default_vals()
|
petrhosek/rubber
|
refs/heads/master
|
rubber/cmd_info.py
|
1
|
# This file is part of Rubber and thus covered by the GPL
# (c) Emmanuel Beffara, 2002--2006
"""
This is the command line interface for the information extractor.
"""
import sys
from getopt import *
import string
from os.path import *
from rubber import _, msg
from rubber.environment import Environment
from rubber.version import *
import rubber.cmdline
class Main (rubber.cmdline.Main):
def __init__ (self):
rubber.cmdline.Main.__init__(self)
msg.write = self.stdout_write
def stdout_write (self, text, level=0):
sys.stdout.write(text + "\n")
def short_help (self):
sys.stderr.write(_("""\
usage: rubber-info [options] source
For more information, try `rubber-info --help'.
"""))
def help (self):
print _("""\
This is Rubber's information extractor version %s.
usage: rubber-info [options] source
available options:
all options accepted by rubber(1)
actions:
--boxes report overfull and underfull boxes
--check report errors or warnings (default action)
--deps show the target file's dependencies
--errors show all errors that occured during compilation
--help display this help
--refs show the list of undefined references
--rules print the dependency rules including intermediate results
--version print the program's version and exit
--warnings show all LaTeX warnings\
""") % version
def parse_opts (self, cmdline):
try:
long = [ "module=", "readopts=", "short", "verbose", "boxes",
"check", "deps", "errors", "help", "refs", "rules", "version",
"warnings" ]
args = rubber.cmdline.Main.parse_opts(self, cmdline, long=long)
opts, args = getopt(args, "", long)
self.max_errors = -1
except GetoptError, e:
msg.error(e)
sys.exit(1)
for (opt,arg) in opts:
if opt in ("-h", "--help"):
self.help()
sys.exit(0)
elif opt in ("-m", "--module"):
self.modules.append(arg)
elif opt in ("-o" ,"--readopts"):
file = open(arg)
opts2 = file.read().split()
file.close()
args = self.parse_opts(opts2) + args
elif opt in ("-s", "--short"):
msg.short = 1
elif opt in ("-v", "--verbose"):
msg.level = msg.level + 1
elif opt == "--version":
msg(0, version)
sys.exit(0)
else:
if self.act:
sys.stderr.write(_("You must specify only one action.\n"))
sys.exit(1)
self.act = opt[2:]
return args
def main (self, cmdline):
self.env = Environment()
self.prologue = []
self.epilogue = []
self.act = None
args = self.parse_opts(cmdline)
if not self.act: self.act = "check"
msg.log(_(
"This is Rubber's information extractor version %s.") % version)
if len(args) != 1:
sys.stderr.write(_("You must specify one source file.\n"))
sys.exit(1)
src = args[0]
if self.env.set_source(src):
sys.stderr.write(_("I cannot find %s.\n") % src)
sys.exit(1)
if self.act == "deps":
self.prepare(src)
deps = {}
for dep in self.env.main.source_nodes():
for file in dep.leaves():
deps[file] = None
print string.join(deps.keys())
elif self.act == "rules":
self.prepare(src)
seen = {}
next = [self.env.final]
while len(next) > 0:
node = next[0]
next = next[1:]
if seen.has_key(node):
continue
seen[node] = None
if len(node.sources) == 0:
continue
print "\n%s:" % string.join(node.products),
print string.join(node.sources)
next.extend(node.source_nodes())
else:
self.prepare(src, parse=0)
return self.info_log(self.act)
return 0
def prepare (self, src, parse=1):
"""
Check for the source file and prepare it for processing.
"""
env = self.env
if env.make_source():
sys.exit(1)
if not parse:
return
for dir in self.path:
env.main.do_path(dir)
for cmd in self.prologue:
cmd = parse_line(cmd, {})
env.main.command(cmd[0], cmd[1:], {'file': 'command line'})
self.env.main.parse()
for cmd in self.epilogue:
cmd = parse_line(cmd, {})
env.main.command(cmd[0], cmd[1:], {'file': 'command line'})
def info_log (self, act):
"""
Check for a log file and extract information from it if it exists,
accroding to the argument's value.
"""
log = self.env.main.log
ret = log.read(self.env.main.target + ".log")
if ret == 1:
msg.error(_("The log file is invalid."))
return 1
elif ret == 2:
msg.error(_("There is no log file"))
return 1
if act == "boxes":
if not msg.display_all(log.get_boxes()):
msg.info(_("There is no bad box."))
elif act == "check":
if msg.display_all(log.get_errors()): return 0
msg.info(_("There was no error."))
if msg.display_all(log.get_references()): return 0
msg.info(_("There is no undefined reference."))
if not msg.display_all(log.get_warnings()):
msg.info(_("There is no warning."))
if not msg.display_all(log.get_boxes()):
msg.info(_("There is no bad box."))
elif act == "errors":
if not msg.display_all(log.get_errors()):
msg.info(_("There was no error."))
elif act == "refs":
if not msg.display_all(log.get_references()):
msg.info(_("There is no undefined reference."))
elif act == "warnings":
if not msg.display_all(log.get_warnings()):
msg.info(_("There is no warning."))
else:
sys.stderr.write(_("\
I don't know the action `%s'. This should not happen.\n") % act)
return 1
return 0
def __call__ (self, cmdline):
if cmdline == []:
self.short_help()
return 1
try:
self.main(cmdline)
except KeyboardInterrupt:
msg(0, _("*** interrupted"))
return 2
def script_entry_point():
"""
Entry point for setuptools generated console scripts
"""
import sys
return Main()(sys.argv[1:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.