repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
edgarRd/incubator-airflow | refs/heads/master | tests/dags/test_mark_success.py | 15 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
DEFAULT_DATE = datetime(2016, 1, 1)
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
}
dag = DAG(dag_id='test_mark_success', default_args=args)
task = BashOperator(
task_id='task1',
bash_command='sleep 600',
dag=dag)
|
archf/ansible | refs/heads/devel | test/integration/targets/module_utils/module_utils/bar0/__init__.py | 12133432 | |
caotianwei/django | refs/heads/master | tests/utils_tests/__init__.py | 12133432 | |
uw-it-aca/canvas-event-consumer | refs/heads/master | events/migrations/__init__.py | 12133432 | |
molecular-toolkit/chemistry-docker-images | refs/heads/master | makefiles/buildfiles/pyscf/getresuts.py | 12133432 | |
peterbraden/tensorflow | refs/heads/master | tensorflow/contrib/learn/python/learn/preprocessing/text.py | 7 | """Implements a number of text preprocessing utilities."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import six
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
from tensorflow.python.platform import gfile
from .categorical_vocabulary import CategoricalVocabulary
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+",
re.UNICODE)
def tokenizer(iterator):
"""Tokenizer generator.
Args:
iterator: Input iterator with strings.
Yields:
array of tokens per each value in the input.
"""
for value in iterator:
yield TOKENIZER_RE.findall(value)
class ByteProcessor(object):
"""Maps documents into sequence of ids for bytes."""
def __init__(self, max_document_length):
self.max_document_length = max_document_length
def fit(self, X):
"""Does nothing. No fitting required."""
pass
def fit_transform(self, X):
"""Calls transform."""
return self.transform(X)
# pylint: disable=no-self-use
def reverse(self, X):
"""Reverses output of transform back to text.
Args:
X: iterator or matrix of integers.
Document representation in bytes.
Returns:
Iterators of utf-8 strings.
"""
for data in X:
document = np.trim_zeros(data.astype(np.int8), trim='b').tostring()
try:
yield document.decode('utf-8')
except UnicodeDecodeError:
yield ''
def transform(self, X):
"""Transforms input documents into sequence of ids.
Args:
X: iterator or list of input documents.
Documents can be bytes or unicode strings, which will be encoded as
utf-8 to map to bytes. Note, in Python2 str and bytes is the same type.
Returns:
iterator of byte ids.
"""
if six.PY3:
# For Python3 defined buffer as memoryview.
buffer_or_memoryview = memoryview
else:
buffer_or_memoryview = buffer # pylint: disable=undefined-variable
for document in X:
if isinstance(document, six.text_type):
document = document.encode('utf-8')
document_mv = buffer_or_memoryview(document)
buff = np.frombuffer(document_mv[:self.max_document_length],
dtype=np.uint8)
yield np.pad(buff, (0, self.max_document_length - len(buff)), 'constant')
class VocabularyProcessor(object):
"""Maps documents to sequences of word ids.
Parameters:
max_document_length: Maximum length of documents.
if documents are longer, they will be trimmed, if shorter - padded.
min_frequency: Minimum frequency of words in the vocabulary.
vocabulary: CategoricalVocabulary object.
Attributes:
vocabulary_: CategoricalVocabulary object.
"""
def __init__(self,
max_document_length,
min_frequency=0,
vocabulary=None,
tokenizer_fn=None):
self.max_document_length = max_document_length
self.min_frequency = min_frequency
if vocabulary:
self.vocabulary_ = vocabulary
else:
self.vocabulary_ = CategoricalVocabulary()
if tokenizer_fn:
self._tokenizer = tokenizer_fn
else:
self._tokenizer = tokenizer
def fit(self, raw_documents, unused_y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Args:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit format signature of estimators.
Returns:
self
"""
for tokens in self._tokenizer(raw_documents):
for token in tokens:
self.vocabulary_.add(token)
if self.min_frequency > 0:
self.vocabulary_.trim(self.min_frequency)
self.vocabulary_.freeze()
return self
def fit_transform(self, raw_documents, unused_y=None):
"""Learn the vocabulary dictionary and return indexies of words.
Args:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit_transform signature of estimators.
Returns:
X: iterable, [n_samples, max_document_length]
Word-id matrix.
"""
self.fit(raw_documents)
return self.transform(raw_documents)
def transform(self, raw_documents):
"""Transform documents to word-id matrix.
Convert words to ids with vocabulary fitted with fit or the one
provided in the constructor.
Args:
raw_documents: An iterable which yield either str or unicode.
Returns:
X: iterable, [n_samples, max_document_length]
Word-id matrix.
"""
for tokens in self._tokenizer(raw_documents):
word_ids = np.zeros(self.max_document_length, np.int64)
for idx, token in enumerate(tokens):
if idx >= self.max_document_length:
break
word_ids[idx] = self.vocabulary_.get(token)
yield word_ids
def reverse(self, documents):
"""Reverses output of vocabulary mapping to words.
Args:
documents: iterable, list of class ids.
Returns:
Iterator over mapped in words documents.
"""
for item in documents:
output = []
for class_id in item:
output.append(self.vocabulary_.reverse(class_id))
yield ' '.join(output)
def save(self, filename):
"""Saves vocabulary processor into given file.
Args:
filename: Path to output file.
"""
with gfile.Open(filename, 'wb') as f:
f.write(pickle.dumps(self))
@classmethod
def restore(cls, filename):
"""Restores vocabulary processor from given file.
Args:
filename: Path to file to load from.
Returns:
VocabularyProcessor object.
"""
with gfile.Open(filename, 'rb') as f:
return pickle.loads(f.read())
|
jarn0ld/gnuradio | refs/heads/master | gr-wxgui/python/wxgui/histo_window.py | 47 | #
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
##################################################
# Imports
##################################################
import plotter
import common
import wx
import numpy
import math
import pubsub
from constants import *
from gnuradio import gr #for gr.prefs
import forms
##################################################
# Constants
##################################################
DEFAULT_WIN_SIZE = (600, 300)
##################################################
# histo window control panel
##################################################
class control_panel(wx.Panel):
"""
A control panel with wx widgits to control the plotter and histo sink.
"""
def __init__(self, parent):
"""
Create a new control panel.
Args:
parent: the wx parent window
"""
self.parent = parent
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)
parent[SHOW_CONTROL_PANEL_KEY] = True
parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)
control_box = wx.BoxSizer(wx.VERTICAL)
SIZE = (100, -1)
control_box = forms.static_box_sizer(
parent=self, label='Options',
bold=True, orient=wx.VERTICAL,
)
#num bins
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Num Bins',
converter=forms.int_converter(),
ps=parent, key=NUM_BINS_KEY,
)
#frame size
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Frame Size',
converter=forms.int_converter(),
ps=parent, key=FRAME_SIZE_KEY,
)
#run/stop
control_box.AddStretchSpacer()
forms.toggle_button(
sizer=control_box, parent=self,
true_label='Stop', false_label='Run',
ps=parent, key=RUNNING_KEY,
)
#set sizer
self.SetSizerAndFit(control_box)
##################################################
# histo window with plotter and control panel
##################################################
class histo_window(wx.Panel, pubsub.pubsub):
def __init__(
self,
parent,
controller,
size,
title,
maximum_key,
minimum_key,
num_bins_key,
frame_size_key,
msg_key,
):
pubsub.pubsub.__init__(self)
#setup
self.samples = list()
#proxy the keys
self.proxy(MAXIMUM_KEY, controller, maximum_key)
self.proxy(MINIMUM_KEY, controller, minimum_key)
self.proxy(NUM_BINS_KEY, controller, num_bins_key)
self.proxy(FRAME_SIZE_KEY, controller, frame_size_key)
self.proxy(MSG_KEY, controller, msg_key)
#initialize values
self[RUNNING_KEY] = True
self[X_DIVS_KEY] = 8
self[Y_DIVS_KEY] = 4
#init panel and plot
wx.Panel.__init__(self, parent, style=wx.SIMPLE_BORDER)
self.plotter = plotter.bar_plotter(self)
self.plotter.SetSize(wx.Size(*size))
self.plotter.SetSizeHints(*size)
self.plotter.set_title(title)
self.plotter.enable_point_label(True)
self.plotter.enable_grid_lines(False)
#setup the box with plot and controls
self.control_panel = control_panel(self)
main_box = wx.BoxSizer(wx.HORIZONTAL)
main_box.Add(self.plotter, 1, wx.EXPAND)
main_box.Add(self.control_panel, 0, wx.EXPAND)
self.SetSizerAndFit(main_box)
#register events
self.subscribe(MSG_KEY, self.handle_msg)
self.subscribe(X_DIVS_KEY, self.update_grid)
self.subscribe(Y_DIVS_KEY, self.update_grid)
def handle_msg(self, msg):
"""
Handle the message from the fft sink message queue.
Args:
msg: the frame as a character array
"""
if not self[RUNNING_KEY]: return
#convert to floating point numbers
self.samples = 100*numpy.fromstring(msg, numpy.float32)[:self[NUM_BINS_KEY]] #only take first frame
self.plotter.set_bars(
bars=self.samples,
bar_width=0.6,
color_spec=(0, 0, 1),
)
self.update_grid()
def update_grid(self):
if not len(self.samples): return
#calculate the maximum y value
y_off = math.ceil(numpy.max(self.samples))
y_off = min(max(y_off, 1.0), 100.0) #between 1% and 100%
#update the x grid
self.plotter.set_x_grid(
self[MINIMUM_KEY], self[MAXIMUM_KEY],
common.get_clean_num((self[MAXIMUM_KEY] - self[MINIMUM_KEY])/self[X_DIVS_KEY]),
)
self.plotter.set_x_label('Counts')
#update the y grid
self.plotter.set_y_grid(0, y_off, y_off/self[Y_DIVS_KEY])
self.plotter.set_y_label('Frequency', '%')
self.plotter.update()
|
RNAer/qiita | refs/heads/master | qiita_pet/handlers/stats.py | 1 | from __future__ import division
from random import choice
from moi import r_client
from tornado.gen import coroutine, Task
from qiita_db.util import get_count
from qiita_db.study import Study
from qiita_db.util import get_lat_longs
from .base_handlers import BaseHandler
class StatsHandler(BaseHandler):
def _get_stats(self, callback):
# check if the key exists in redis
lats = r_client.lrange('stats:sample_lats', 0, -1)
longs = r_client.lrange('stats:sample_longs', 0, -1)
if not (lats or longs):
# if we don't have them, then fetch from disk and add to the
# redis server with a 24-hour expiration
lat_longs = get_lat_longs()
with r_client.pipeline() as pipe:
for latitude, longitude in lat_longs:
# storing as a simple data structure, hopefully this
# doesn't burn us later
pipe.rpush('stats:sample_lats', latitude)
pipe.rpush('stats:sample_longs', longitude)
# set the key to expire in 24 hours, so that we limit the
# number of times we have to go to the database to a reasonable
# amount
r_client.expire('stats:sample_lats', 86400)
r_client.expire('stats:sample_longs', 86400)
pipe.execute()
else:
# If we do have them, put the redis results into the same structure
# that would come back from the database
longs = [float(x) for x in longs]
lats = [float(x) for x in lats]
lat_longs = zip(lats, longs)
# Get the number of studies
num_studies = get_count('qiita.study')
# Get the number of samples
num_samples = len(lats)
# Get the number of users
num_users = get_count('qiita.qiita_user')
callback([num_studies, num_samples, num_users, lat_longs])
@coroutine
def get(self):
num_studies, num_samples, num_users, lat_longs = \
yield Task(self._get_stats)
# Pull a random public study from the database
public_studies = Study.get_by_status('public')
study = Study(choice(list(public_studies))) if public_studies else None
if study is None:
random_study_info = None
random_study_title = None
random_study_id = None
else:
random_study_info = study.info
random_study_title = study.title
random_study_id = study.id
self.render('stats.html',
num_studies=num_studies, num_samples=num_samples,
num_users=num_users, lat_longs=lat_longs,
random_study_info=random_study_info,
random_study_title=random_study_title,
random_study_id=random_study_id)
|
nuncjo/odoo | refs/heads/8.0 | addons/sale_stock/__openerp__.py | 214 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales and Warehouse Management',
'version': '1.0',
'category': 'Hidden',
'summary': 'Quotation, Sale Orders, Delivery & Invoicing Control',
'description': """
Manage sales quotations and orders
==================================
This module makes the link between the sales and warehouses management applications.
Preferences
-----------
* Shipping: Choice of delivery at once or partial delivery
* Invoicing: choose how invoices will be paid
* Incoterms: International Commercial terms
You can choose flexible invoicing methods:
* *On Demand*: Invoices are created manually from Sales Orders when needed
* *On Delivery Order*: Invoices are generated from picking (delivery)
* *Before Delivery*: A Draft invoice is created and must be paid before delivery
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/warehouse',
'depends': ['sale', 'stock_account'],
'data': [
'security/sale_stock_security.xml',
'security/ir.model.access.csv',
'company_view.xml',
'sale_stock_view.xml',
'sale_stock_workflow.xml',
'stock_view.xml',
'res_config_view.xml',
'report/sale_report_view.xml',
],
'demo': ['sale_stock_demo.xml'],
'test': [
'test/sale_stock_users.yml',
'test/cancel_order_sale_stock.yml',
'test/picking_order_policy.yml',
'test/prepaid_order_policy.yml',
'test/sale_order_onchange.yml',
'test/sale_order_canceled_line.yml',
],
'installable': True,
'auto_install': True,
}
|
jrk/llvm-py | refs/heads/llvm-2.9 | test/typehandle.py | 5 | #!/usr/bin/env python
from llvm.core import *
# create a type handle object
th = TypeHandle.new(Type.opaque())
# create the struct with an opaque* instead of self*
ts = Type.struct([ Type.int(), Type.pointer(th.type) ])
# unify the types
th.type.refine(ts)
# create a module, and add a "typedef"
m = Module.new('mod1')
m.add_type_name("struct.node", th.type)
# show what we created
print m
|
sonya/eea | refs/heads/master | py/parsers/shp.py | 1 | #
# Copyright 2012 Sonya Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import parsers.base as base, parsers.dbf as dbf
from parsers.base import Parser
from parsers.dbf import DbfParser
import json
class ShpParser(Parser):
def __init__(self, filename):
if not filename.endswith("shp"):
dbffilename = filename + ".dbf"
shpfilename = filename + ".shp"
else:
dbffilename = filename.split(".")[0] + ".dbf"
shpfilename = filename
Parser.__init__(self, shpfilename, base.LITTLE_ENDIAN)
self.bbox = []
self.features = []
self.ignore_fields = []
self.dbf_parser = DbfParser(dbffilename)
self.shape_type = None
self.last_read_type = None
self.bbox_cache = None
self.shape_types = {
0: self.NullShape,
1: self.Point,
3: self.PolyLine,
5: self.Polygon,
8: self.MultiPoint,
11: self.PointZ,
13: self.PolyLineZ,
15: self.PolygonZ,
18: self.MultiPointZ,
21: self.PointM,
23: self.PolyLineM,
25: self.PolygonM,
28: self.MultiPointM,
31: self.MultiPatch,
}
### read methods from spec ###
def read_point(self):
x = self.read_double()
y = self.read_double()
self.last_read_type = "Point"
return [x, y]
def NullShape(self):
self.last_read_type = "Null"
def Point(self):
return { "type": "Point", "coordinates": self.read_point() }
def PolyLine(self):
feature = {"coordinates": []}
bbox = self.read_bbox()
numparts = self.read_word(4)
numpoints = self.read_word(4)
if numparts == 1:
feature["type"] = "LineString"
while numpoints > 0:
feature["coordinates"].append(self.read_point())
numpoints -= 1
else:
feature["type"] = "MultiLineString"
parts = []
while numparts > 0:
parts.append(self.read_word(4))
numparts -= 1
arrindex = 0
points = []
while numpoints > 0:
if arrindex in parts:
points = []
feature["coordinates"].append(points) # pointer magic
points.append(self.read_point())
numpoints -= 1
arrindex += 1
return feature
# from spec:
# "The neighborhood to the right of an observer walking along
# the ring in vertex order is the neighborhood inside the polygon."
def Polygon(self):
feature = {
"type": "Polygon",
"coordinates": [],
}
self.bbox_cache = self.read_bbox()
numrings = self.read_word(4)
numpoints = self.read_word(4)
rings = []
while numrings > 0:
rings.append(self.read_word(4))
numrings -= 1
arrindex = 0
points = []
while numpoints > 0:
if arrindex in rings:
points = []
feature["coordinates"].append(points)
points.append(self.read_point())
numpoints -= 1
arrindex += 1
return feature
def MultiPoint(self):
bbox = self.read_bbox()
numpoints = self.read_word(4)
points = []
while (numpoints > 0):
points.append(self.read_point())
numpoints -= 1
return {
"type": "MultiPoint",
"coordinates": points,
}
def PointZ(self): return
def PolyLineZ(self): return
def PolygonZ(self): return
def MultiPointZ(self): return
def PointM(self): return
def PolyLineM(self): return
def PolygonM(self): return
def MultiPointM(self): return
def MultiPatch(self): return
def read_bbox(self):
bbox = []
for i in range(4):
bbox.append(self.read_double())
return bbox
############### main (.shp) file header description ############
# Byte Field Value Type Order
# 0 File Code 9994 Integer Big
# 4 Unused 0 Integer Big
# 8 Unused 0 Integer Big
# 12 Unused 0 Integer Big
# 16 Unused 0 Integer Big
# 20 Unused 0 Integer Big
# 24 File Length File Length Integer Big
# 28 Version 1000 Integer Little
# 32 Shape Type Shape Type Integer Little
# 36 Bounding Box Xmin Double Little
# 44 Bounding Box Ymin Double Little
# 52 Bounding Box Xmax Double Little
# 60 Bounding Box Ymax Double Little
# 68* Bounding Box Zmin Double Little
# 76* Bounding Box Zmax Double Little
# 84* Bounding Box Mmin Double Little
# 92* Bounding Box Mmax Double Little
#################################################################
def read_header(self):
if self.position > 0:
raise Exception("header already read")
if self.read_word(4, base.BIG_ENDIAN) != 9994:
raise Exception("incorrect header values")
self.skipto(24)
self.file_length = self.read_word(4, base.BIG_ENDIAN) * self.WORDSIZE
self.skipto(32)
self.shape_type = self.read_word(4)
self.bbox = self.read_bbox()
self.skipto(100)
self.dbf_parser.read_header()
def add_ignore_fields(self, fieldlist):
self.ignore_fields = fieldlist
############### main (.shp) file record headers #################
# Byte Field Value Type Order
# 0 Record Number Record Number Integer Big
# 4 Content Length Content Length Integer Big
#################################################################
def read_record(self):
rec_no = self.read_word(4, base.BIG_ENDIAN)
rec_length = self.read_word(4, base.BIG_ENDIAN)
read_shape = self.shape_types[self.read_word(4)]
properties = self.dbf_parser.read_record()
for field in self.ignore_fields:
del properties[field]
feature = {
"type": "Feature",
"geometry": read_shape(),
"properties": properties
}
if self.bbox_cache is not None:
feature["bbox"] = self.bbox_cache
self.bbox_cache = None
return feature
def read_all_records(self):
while self.position < self.file_length:
self.features.append(self.read_record())
def write_json(self, filename):
outfile = open(filename, 'w')
outfile.write(json.dumps({
"type": "FeatureCollection",
"bbox": self.bbox,
"features": self.features,
}))
outfile.close()
print("new json file written to", filename)
def join_data(join_field, iterable_data):
data = {}
for row in iterable_data:
row_id = row[join_field]
del row[join_field]
data[row_id] = row
for feature in self.features:
attribs = data[feature[join_field]]
for (attrib_name, attrib_value) in attribs.items():
feature["properties"][attrib_name] = attrib_value
|
jondo/shogun | refs/heads/develop | examples/undocumented/python_modular/regression_linear_ridge_modular.py | 16 | #!/usr/bin/env python
###########################################################################
# linear ridge regression
###########################################################################
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,1e-6],[traindat,testdat,label_traindat,100]]
def regression_linear_ridge_modular (fm_train=traindat,fm_test=testdat,label_train=label_traindat,tau=1e-6):
from modshogun import RegressionLabels, RealFeatures
from modshogun import LinearRidgeRegression
rr=LinearRidgeRegression(tau, RealFeatures(traindat), RegressionLabels(label_train))
rr.train()
out = rr.apply(RealFeatures(fm_test)).get_labels()
return out,rr
if __name__=='__main__':
print('LinearRidgeRegression')
regression_linear_ridge_modular(*parameter_list[0])
|
citrix-openstack-build/neutron | refs/heads/master | neutron/plugins/ml2/drivers/mech_arista/mechanism_arista.py | 3 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import jsonrpclib
from oslo.config import cfg
from neutron.openstack.common import log as logging
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import driver_api
from neutron.plugins.ml2.drivers.mech_arista import config # noqa
from neutron.plugins.ml2.drivers.mech_arista import db
from neutron.plugins.ml2.drivers.mech_arista import exceptions as arista_exc
LOG = logging.getLogger(__name__)
EOS_UNREACHABLE_MSG = _('Unable to reach EOS')
class AristaRPCWrapper(object):
"""Wraps Arista JSON RPC.
All communications between Neutron and EOS are over JSON RPC.
EOS - operating system used on Arista hardware
Command API - JSON RPC API provided by Arista EOS
"""
required_options = ['eapi_username',
'eapi_password',
'eapi_host']
def __init__(self):
self._server = jsonrpclib.Server(self._eapi_host_url())
self.keystone_conf = cfg.CONF.keystone_authtoken
self.region = cfg.CONF.ml2_arista.region_name
def _keystone_url(self):
keystone_auth_url = ('%s://%s:%s/v2.0/' %
(self.keystone_conf.auth_protocol,
self.keystone_conf.auth_host,
self.keystone_conf.auth_port))
return keystone_auth_url
def get_tenants(self):
"""Returns dict of all tanants known by EOS.
:returns: dictionary containing the networks per tenant
and VMs allocated per tenant
"""
cmds = ['show openstack config region %s' % self.region]
command_output = self._run_openstack_cmds(cmds)
tenants = command_output[0]['tenants']
return tenants
def plug_host_into_network(self, vm_id, host, port_id,
network_id, tenant_id, port_name):
"""Creates VLAN between TOR and compute host.
:param vm_id: globally unique identifier for VM instance
:param host: ID of the host where the VM is placed
:param port_id: globally unique port ID that connects VM to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
:param port_name: Name of the port - for display purposes
"""
cmds = ['tenant %s' % tenant_id,
'vm id %s hostid %s' % (vm_id, host)]
if port_name:
cmds.append('port id %s name %s network-id %s' %
(port_id, port_name, network_id))
else:
cmds.append('port id %s network-id %s' %
(port_id, network_id))
cmds.append('exit')
cmds.append('exit')
self._run_openstack_cmds(cmds)
def unplug_host_from_network(self, vm_id, host, port_id,
network_id, tenant_id):
"""Removes previously configured VLAN between TOR and a host.
:param vm_id: globally unique identifier for VM instance
:param host: ID of the host where the VM is placed
:param port_id: globally unique port ID that connects VM to network
:param network_id: globally unique neutron network identifier
:param tenant_id: globally unique neutron tenant identifier
"""
cmds = ['tenant %s' % tenant_id,
'vm id %s host %s' % (vm_id, host),
'no port id %s network-id %s' % (port_id, network_id),
'exit',
'exit']
self._run_openstack_cmds(cmds)
def create_network(self, tenant_id, network_id, network_name, seg_id):
"""Creates a network on Arista Hardware
:param tenant_id: globally unique neutron tenant identifier
:param network_id: globally unique neutron network identifier
:param network_name: Network name - for display purposes
:param seg_id: Segment ID of the network
"""
cmds = ['tenant %s' % tenant_id]
if network_name:
cmds.append('network id %s name %s' % (network_id, network_name))
else:
cmds.append('network id %s' % network_id)
cmds.append('segment 1 type vlan id %d' % seg_id)
cmds.append('exit')
cmds.append('exit')
cmds.append('exit')
self._run_openstack_cmds(cmds)
def create_network_segments(self, tenant_id, network_id,
network_name, segments):
"""Creates a network on Arista Hardware
Note: This method is not used at the moment. create_network()
is used instead. This will be used once the support for
multiple segments is added in Neutron.
:param tenant_id: globally unique neutron tenant identifier
:param network_id: globally unique neutron network identifier
:param network_name: Network name - for display purposes
:param segments: List of segments in a given network
"""
if segments:
cmds = ['tenant %s' % tenant_id,
'network id %s name %s' % (network_id, network_name)]
seg_num = 1
for seg in segments:
cmds.append('segment %d type %s id %d' % (seg_num,
seg['network_type'], seg['segmentation_id']))
seg_num += 1
cmds.append('exit') # exit for segment mode
cmds.append('exit') # exit for network mode
cmds.append('exit') # exit for tenant mode
self._run_openstack_cmds(cmds)
def delete_network(self, tenant_id, network_id):
"""Deletes a specified network for a given tenant
:param tenant_id: globally unique neutron tenant identifier
:param network_id: globally unique neutron network identifier
"""
cmds = ['tenant %s' % tenant_id,
'no network id %s' % network_id,
'exit',
'exit']
self._run_openstack_cmds(cmds)
def delete_vm(self, tenant_id, vm_id):
"""Deletes a VM from EOS for a given tenant
:param tenant_id : globally unique neutron tenant identifier
:param vm_id : id of a VM that needs to be deleted.
"""
cmds = ['tenant %s' % tenant_id,
'no vm id %s' % vm_id,
'exit',
'exit']
self._run_openstack_cmds(cmds)
def delete_tenant(self, tenant_id):
"""Deletes a given tenant and all its networks and VMs from EOS.
:param tenant_id: globally unique neutron tenant identifier
"""
cmds = ['no tenant %s' % tenant_id, 'exit']
self._run_openstack_cmds(cmds)
def delete_this_region(self):
"""Deletes this entire region from EOS.
This is equivalent of unregistering this Neurtron stack from EOS
All networks for all tenants are removed.
"""
cmds = []
self._run_openstack_cmds(cmds, deleteRegion=True)
def _register_with_eos(self):
"""This is the registration request with EOS.
This the initial handshake between Neutron and EOS.
critical end-point information is registered with EOS.
"""
cmds = ['auth url %s user %s password %s' %
(self._keystone_url(),
self.keystone_conf.admin_user,
self.keystone_conf.admin_password)]
self._run_openstack_cmds(cmds)
def _run_openstack_cmds(self, commands, deleteRegion=None):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param deleteRegion : True/False - to delte entire region from EOS
"""
command_start = ['enable', 'configure', 'management openstack']
if deleteRegion:
command_start.append('no region %s' % self.region)
else:
command_start.append('region %s' % self.region)
command_end = ['exit', 'exit']
full_command = command_start + commands + command_end
LOG.info(_('Executing command on Arista EOS: %s'), full_command)
try:
# this returns array of return values for every command in
# full_command list
ret = self._server.runCmds(version=1, cmds=full_command)
# Remove return values for 'configure terminal',
# 'management openstack' and 'exit' commands
ret = ret[len(command_start):-len(command_end)]
except Exception as error:
host = cfg.CONF.ml2_arista.eapi_host
msg = (_('Error %(err)s while trying to execute '
'commands %(cmd)s on EOS %(host)s') %
{'err': error, 'cmd': full_command, 'host': host})
LOG.exception(msg)
raise arista_exc.AristaRpcError(msg=msg)
return ret
def _eapi_host_url(self):
self._validate_config()
user = cfg.CONF.ml2_arista.eapi_username
pwd = cfg.CONF.ml2_arista.eapi_password
host = cfg.CONF.ml2_arista.eapi_host
eapi_server_url = ('https://%s:%s@%s/command-api' %
(user, pwd, host))
return eapi_server_url
def _validate_config(self):
for option in self.required_options:
if cfg.CONF.ml2_arista.get(option) is None:
msg = _('Required option %s is not set') % option
LOG.error(msg)
raise arista_exc.AristaConfigError(msg=msg)
class SyncService(object):
"""Synchronizatin of information between Neutron and EOS
Periodically (through configuration option), this service
ensures that Networks and VMs configured on EOS/Arista HW
are always in sync with Neutron DB.
"""
def __init__(self, rpc_wrapper, neutron_db):
self._rpc = rpc_wrapper
self._ndb = neutron_db
def synchronize(self):
"""Sends data to EOS which differs from neutron DB."""
LOG.info(_('Syncing Neutron <-> EOS'))
try:
eos_tenants = self._rpc.get_tenants()
except arista_exc.AristaRpcError:
msg = _('EOS is not available, will try sync later')
LOG.warning(msg)
return
db_tenants = db.get_tenants()
if not db_tenants and eos_tenants:
# No tenants configured in Neutron. Clear all EOS state
try:
self._rpc.delete_this_region()
msg = _('No Tenants configured in Neutron DB. But %d '
'tenants disovered in EOS during synchronization.'
'Enitre EOS region is cleared') % len(eos_tenants)
except arista_exc.AristaRpcError:
msg = _('EOS is not available, failed to delete this region')
LOG.warning(msg)
return
if len(eos_tenants) > len(db_tenants):
# EOS has extra tenants configured which should not be there.
for tenant in eos_tenants:
if tenant not in db_tenants:
try:
self._rpc.delete_tenant(tenant)
except arista_exc.AristaRpcError:
msg = _('EOS is not available,'
'failed to delete tenant %s') % tenant
LOG.warning(msg)
return
# EOS and Neutron has matching set of tenants. Now check
# to ensure that networks and VMs match on both sides for
# each tenant.
for tenant in db_tenants:
db_nets = db.get_networks(tenant)
db_vms = db.get_vms(tenant)
eos_nets = self._get_eos_networks(eos_tenants, tenant)
eos_vms = self._get_eos_vms(eos_tenants, tenant)
# Check for the case if everything is already in sync.
if eos_nets == db_nets:
# Net list is same in both Neutron and EOS.
# check the vM list
if eos_vms == db_vms:
# Nothing to do. Everything is in sync for this tenant
break
# Neutron DB and EOS reruires synchronization.
# First delete anything which should not be EOS
# delete VMs from EOS if it is not present in neutron DB
for vm_id in eos_vms:
if vm_id not in db_vms:
try:
self._rpc.delete_vm(tenant, vm_id)
except arista_exc.AristaRpcError:
msg = _('EOS is not available,'
'failed to delete vm %s') % vm_id
LOG.warning(msg)
return
# delete network from EOS if it is not present in neutron DB
for net_id in eos_nets:
if net_id not in db_nets:
try:
self._rpc.delete_network(tenant, net_id)
except arista_exc.AristaRpcError:
msg = _('EOS is not available,'
'failed to delete network %s') % net_id
LOG.warning(msg)
return
# update networks in EOS if it is present in neutron DB
for net_id in db_nets:
if net_id not in eos_nets:
vlan_id = db_nets[net_id]['segmentationTypeId']
net_name = self._ndb.get_network_name(tenant, net_id)
try:
self._rpc.create_network(tenant, net_id,
net_name,
vlan_id)
except arista_exc.AristaRpcError:
msg = _('EOS is not available, failed to create'
'network id %s') % net_id
LOG.warning(msg)
return
# Update VMs in EOS if it is present in neutron DB
for vm_id in db_vms:
if vm_id not in eos_vms:
vm = db_vms[vm_id]
ports = self._ndb.get_all_ports_for_vm(tenant, vm_id)
for port in ports:
port_id = port['id']
network_id = port['network_id']
port_name = port['name']
try:
self._rpc.plug_host_into_network(vm['vmId'],
vm['host'],
port_id,
network_id,
tenant,
port_name)
except arista_exc.AristaRpcError:
msg = _('EOS is not available, failed to create'
'vm id %s') % vm['vmId']
LOG.warning(msg)
def _get_eos_networks(self, eos_tenants, tenant):
networks = {}
if eos_tenants:
networks = eos_tenants[tenant]['tenantNetworks']
return networks
def _get_eos_vms(self, eos_tenants, tenant):
vms = {}
if eos_tenants:
vms = eos_tenants[tenant]['tenantVmInstances']
return vms
class AristaDriver(driver_api.MechanismDriver):
"""Ml2 Mechanism driver for Arista networking hardware.
Remebers all networks and VMs that are provisioned on Arista Hardware.
Does not send network provisioning request if the network has already been
provisioned before for the given port.
"""
def __init__(self, rpc=None):
self.rpc = rpc or AristaRPCWrapper()
self.ndb = db.NeutronNets()
confg = cfg.CONF.ml2_arista
self.segmentation_type = db.VLAN_SEGMENTATION
self.timer = None
self.eos = SyncService(self.rpc, self.ndb)
self.sync_timeout = confg['sync_interval']
self.eos_sync_lock = threading.Lock()
self._synchronization_thread()
def initialize(self):
self.rpc._register_with_eos()
self._cleanupDb()
def create_network_precommit(self, context):
"""Remember the tenant, and network information."""
network = context.current
segments = context.network_segments
network_id = network['id']
tenant_id = network['tenant_id']
segmentation_id = segments[0]['segmentation_id']
with self.eos_sync_lock:
db.remember_tenant(tenant_id)
db.remember_network(tenant_id,
network_id,
segmentation_id)
def create_network_postcommit(self, context):
"""Provision the network on the Arista Hardware."""
network = context.current
network_id = network['id']
network_name = network['name']
tenant_id = network['tenant_id']
segments = context.network_segments
vlan_id = segments[0]['segmentation_id']
with self.eos_sync_lock:
if db.is_network_provisioned(tenant_id, network_id):
try:
self.rpc.create_network(tenant_id,
network_id,
network_name,
vlan_id)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
else:
msg = _('Network %s is not created as it is not found in'
'Arista DB') % network_id
LOG.info(msg)
def update_network_precommit(self, context):
"""At the moment we only support network name change
Any other change in network is not supprted at this time.
We do not store the network names, therefore, no DB store
action is performed here.
"""
new_network = context.current
orig_network = context.original
if new_network['name'] != orig_network['name']:
msg = _('Network name changed to %s') % new_network['name']
LOG.info(msg)
def update_network_postcommit(self, context):
"""At the moment we only support network name change
If network name is changed, a new network create request is
sent to the Arista Hardware.
"""
new_network = context.current
orig_network = context.original
if new_network['name'] != orig_network['name']:
network_id = new_network['id']
network_name = new_network['name']
tenant_id = new_network['tenant_id']
vlan_id = new_network['provider:segmentation_id']
with self.eos_sync_lock:
if db.is_network_provisioned(tenant_id, network_id):
try:
self.rpc.create_network(tenant_id,
network_id,
network_name,
vlan_id)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
else:
msg = _('Network %s is not updated as it is not found in'
'Arista DB') % network_id
LOG.info(msg)
def delete_network_precommit(self, context):
"""Delete the network infromation from the DB."""
network = context.current
network_id = network['id']
tenant_id = network['tenant_id']
with self.eos_sync_lock:
if db.is_network_provisioned(tenant_id, network_id):
db.forget_network(tenant_id, network_id)
# if necessary, delete tenant as well.
self.delete_tenant(tenant_id)
def delete_network_postcommit(self, context):
"""Send network delete request to Arista HW."""
network = context.current
network_id = network['id']
tenant_id = network['tenant_id']
with self.eos_sync_lock:
# Succeed deleting network in case EOS is not accessible.
# EOS state will be updated by sync thread once EOS gets
# alive.
try:
self.rpc.delete_network(tenant_id, network_id)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
def create_port_precommit(self, context):
"""Remember the infromation about a VM and its ports
A VM information, along with the physical host information
is saved.
"""
port = context.current
device_id = port['device_id']
device_owner = port['device_owner']
# TODO(sukhdev) revisit this once port biniding support is implemented
host = port['binding:host_id']
# device_id and device_owner are set on VM boot
is_vm_boot = device_id and device_owner
if host and is_vm_boot:
port_id = port['id']
network_id = port['network_id']
tenant_id = port['tenant_id']
with self.eos_sync_lock:
db.remember_vm(device_id, host, port_id,
network_id, tenant_id)
def create_port_postcommit(self, context):
"""Plug a physical host into a network.
Send provisioning request to Arista Hardware to plug a host
into appropriate network.
"""
port = context.current
device_id = port['device_id']
device_owner = port['device_owner']
# TODO(sukhdev) revisit this once port biniding support is implemented
host = port['binding:host_id']
# device_id and device_owner are set on VM boot
is_vm_boot = device_id and device_owner
if host and is_vm_boot:
port_id = port['id']
port_name = port['name']
network_id = port['network_id']
tenant_id = port['tenant_id']
with self.eos_sync_lock:
hostname = self._host_name(host)
segmentation_id = db.get_segmentation_id(tenant_id,
network_id)
vm_provisioned = db.is_vm_provisioned(device_id,
host,
port_id,
network_id,
tenant_id)
net_provisioned = db.is_network_provisioned(tenant_id,
network_id,
segmentation_id)
if vm_provisioned and net_provisioned:
try:
self.rpc.plug_host_into_network(device_id,
hostname,
port_id,
network_id,
tenant_id,
port_name)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
else:
msg = _('VM %s is not created as it is not found in'
'Arista DB') % device_id
LOG.info(msg)
def update_port_precommit(self, context):
# TODO(sukhdev) revisit once the port binding support is implemented
return
def update_port_postcommit(self, context):
# TODO(sukhdev) revisit once the port binding support is implemented
return
def delete_port_precommit(self, context):
"""Delete information about a VM and host from the DB."""
port = context.current
# TODO(sukhdev) revisit this once port biniding support is implemented
host_id = port['binding:host_id']
device_id = port['device_id']
tenant_id = port['tenant_id']
network_id = port['network_id']
port_id = port['id']
with self.eos_sync_lock:
if db.is_vm_provisioned(device_id, host_id, port_id,
network_id, tenant_id):
db.forget_vm(device_id, host_id, port_id,
network_id, tenant_id)
# if necessary, delete tenant as well.
self.delete_tenant(tenant_id)
def delete_port_postcommit(self, context):
"""unPlug a physical host from a network.
Send provisioning request to Arista Hardware to unplug a host
from appropriate network.
"""
port = context.current
device_id = port['device_id']
# TODO(sukhdev) revisit this once port biniding support is implemented
host = port['binding:host_id']
port_id = port['id']
network_id = port['network_id']
tenant_id = port['tenant_id']
try:
with self.eos_sync_lock:
hostname = self._host_name(host)
self.rpc.unplug_host_from_network(device_id,
hostname,
port_id,
network_id,
tenant_id)
except arista_exc.AristaRpcError:
LOG.info(EOS_UNREACHABLE_MSG)
raise ml2_exc.MechanismDriverError()
def delete_tenant(self, tenant_id):
"""delete a tenant from DB.
A tenant is deleted only if there is no network or VM configured
configured for this tenant.
"""
objects_for_tenant = (db.num_nets_provisioned(tenant_id) +
db.num_vms_provisioned(tenant_id))
if not objects_for_tenant:
db.forget_tenant(tenant_id)
def _host_name(self, hostname):
fqdns_used = cfg.CONF.ml2_arista['use_fqdn']
return hostname if fqdns_used else hostname.split('.')[0]
def _synchronization_thread(self):
with self.eos_sync_lock:
self.eos.synchronize()
self.timer = threading.Timer(self.sync_timeout,
self._synchronization_thread)
self.timer.start()
def stop_synchronization_thread(self):
if self.timer:
self.timer.cancel()
self.timer = None
def _cleanupDb(self):
"""Clean up any uncessary entries in our DB."""
db_tenants = db.get_tenants()
for tenant in db_tenants:
neutron_nets = self.ndb.get_all_networks_for_tenant(tenant)
neutron_nets_id = []
for net in neutron_nets:
neutron_nets_id.append(net['id'])
db_nets = db.get_networks(tenant)
for net_id in db_nets.keys():
if net_id not in neutron_nets_id:
db.forget_network(tenant, net_id)
|
xujun10110/Hammer | refs/heads/master | lib/theHarvester/discovery/pgpsearch.py | 16 | import string
import httplib, sys
import myparser
class search_pgp:
def __init__(self,word):
self.word=word
self.results=""
self.server="pgp.rediris.es:11371"
self.hostname="pgp.rediris.es"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
def process(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/pks/lookup?search=" + self.word + "&op=index")
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
def get_emails(self):
rawres=myparser.parser(self.results,self.word)
return rawres.emails()
def get_hostnames(self):
rawres=myparser.parser(self.results,self.word)
return rawres.hostnames()
|
dnsserver/datahub | refs/heads/master | src/examples/python/gen_py/datahub/account/constants.py | 162 | #
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
|
WikiWatershed/gwlf-e | refs/heads/develop | CreateVariableFiles.py | 1 | import os
variable_name = raw_input("Enter the name of the variable:")
#write variable file
var_filename = "%s.py"%(variable_name)
if os.path.exists("gwlfe/"+var_filename):
raise IOError("File already exists")
else:
with open("gwlfe/"+var_filename,"w") as file:
file.write("from numpy import zeros\n"
"from Memoization import memoize\n\n\n"
"def %s():\n"
" pass\n"
"\n\n"
"def %s_f():\n"
" pass\n"%(variable_name,variable_name)
)
#write test file
test_filename = "test_%s.py"%(variable_name)
if os.path.exists("test/"+test_filename):
raise IOError("File already exists")
else:
with open("test/"+test_filename,"w") as file:
file.write("import numpy as np\n"
"from VariableUnittest import VariableUnitTest\n"
"from unittest import skip\n"
"from gwlfe import {variable}\n"
"\n\n"
"class Test{variable}(VariableUnitTest):\n"
" @skip('Not Ready Yet.')\n"
" def test_{variable}(self):\n"
" z = self.z\n"
" np.testing.assert_array_almost_equal(\n"
" {variable}.{variable}_f(),\n"
" {variable}.{variable}(), decimal=7)".format(variable=variable_name)) |
kiwitcms/Kiwi | refs/heads/master | kiwi_lint/bulk_create.py | 2 | # Copyright (c) 2018 Alexander Todorov <atodorov@MrSenko.com>
# Licensed under the GPL 2.0: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
from pylint import checkers, interfaces
from pylint.checkers import utils
class BulkCreateChecker(checkers.BaseChecker):
__implements__ = (interfaces.IAstroidChecker,)
name = "bulk-create-checker"
msgs = {
"E4451": (
"Use bulk_create_with_history() instead of bulk_create()",
"bulk-create-used",
"bulk_create() will not save model history. "
"Use bulk_create_with_history() instead!",
)
}
@utils.check_messages("bulk-create-used")
def visit_attribute(self, node):
if node.attrname == "bulk_create":
self.add_message("bulk-create-used", node=node)
|
2013Commons/HUE-SHARK | refs/heads/master | build/env/lib/python2.7/site-packages/Django-1.2.3-py2.7.egg/django/core/__init__.py | 12133432 | |
Clemson-DPA/dpa-pipe-backend | refs/heads/master | dpa/_site/__init__.py | 12133432 | |
BehavioralInsightsTeam/edx-platform | refs/heads/release-bit | lms/djangoapps/course_wiki/__init__.py | 12133432 | |
ahb0327/intellij-community | refs/heads/master | python/testData/inspections/PyUnresolvedReferencesInspection/FromPackageImportBuiltin/importSource/__init__.py | 12133432 | |
rtucker-mozilla/mozilla_inventory | refs/heads/master | vendor-local/src/django-extensions/build/lib/django_extensions/management/__init__.py | 12133432 | |
flochaz/horizon | refs/heads/stable/juno | openstack_dashboard/dashboards/project/volumes/__init__.py | 12133432 | |
yokose-ks/edx-platform | refs/heads/gacco3/master | lms/djangoapps/django_comment_client/tests/__init__.py | 12133432 | |
bcl/pykickstart | refs/heads/master | pykickstart/errors.py | 2 | #
# errors.py: Kickstart error handling.
#
# Chris Lumens <clumens@redhat.com>
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
"""
Error and warning handling classes and functions.
This module exports several exception classes:
KickstartError - A generic exception class.
KickstartParseError - An exception for errors occurring during parsing.
KickstartValueError - No longer raised by pykickstart, but kept around for
backwards compatibility.
KickstartVersionError - An exception for errors relating to unsupported
syntax versions.
And some warning classes:
KickstartWarning - A generic warning class.
KickstartParseWarning - A class for warnings occurring during parsing.
KickstartDeprecationWarning - A class for warnings occurring during parsing
related to deprecated commands and options.
"""
import warnings
from pykickstart.i18n import _
def formatErrorMsg(lineno, msg=""):
"""This function is deprecated. KickstartError formats the error message now,
so this function returns a tuple that can be formatted by KickstartError.
You should call:
KickstartError(message, lineno=lineno)
But the deprecated way is still supported:
KickstartError(formatErrorMsg(message, lineno=lineno))
"""
warnings.warn("Function formatErrorMsg is deprecated. The error messages "
"are formatted by KickstartError now.", DeprecationWarning)
return lineno, msg
def _format_error_message(lineno, msg=""):
"""Properly format the error message msg in an exception.
This function should be called only in exceptions to format the error messages.
"""
if msg:
return _("The following problem occurred on line %(lineno)s of the kickstart file:"
"\n\n%(msg)s\n") % {"lineno": lineno, "msg": msg}
return _("There was a problem reading from line %s of the kickstart file") % lineno
class KickstartError(Exception):
"""A generic exception class for unspecific error conditions."""
def __init__(self, msg="", lineno=None, formatting=True):
"""Create a new KickstartError exception instance with the descriptive
message msg. The msg will be formatted if formatting is allowed and
the line number lineno is set.
"""
Exception.__init__(self)
self.message = msg
self.lineno = lineno
# Accept tuples from formatErrorMsg for backwards compatibility.
if isinstance(msg, tuple) and len(msg) == 2:
self.lineno, self.message = msg
# Keep the value attribute for backwards compatibility.
self.value = self.message
# Format the error message if it is allowed.
if formatting and self.lineno is not None:
self.value = _format_error_message(self.lineno, self.message)
def __str__(self):
return self.value
class KickstartParseError(KickstartError):
"""An exception class for errors when processing the input file, such as
unknown options, commands, or sections.
"""
class KickstartValueError(KickstartError):
"""This exception class is no longer raised by pykickstart but is kept
for backwards compatibility.
"""
class KickstartVersionError(KickstartError):
"""An exception class for errors related to using an incorrect version of
kickstart syntax.
"""
class KickstartWarning(Warning):
"""A generic warning class for unspecific conditions."""
class KickstartParseWarning(KickstartWarning, UserWarning):
"""A class for warnings occurring during parsing an input file, such as
defining duplicate entries and setting removed keywords.
"""
class KickstartDeprecationWarning(KickstartParseWarning, DeprecationWarning):
"""A class for warnings occurring during parsing related to using deprecated
commands and options.
"""
|
pepetreshere/odoo | refs/heads/patch-2 | addons/sms/models/res_partner.py | 11 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models
class ResPartner(models.Model):
_name = 'res.partner'
_inherit = ['res.partner', 'mail.thread.phone']
def _sms_get_default_partners(self):
""" Override of mail.thread method.
SMS recipients on partners are the partners themselves.
"""
return self
def _sms_get_number_fields(self):
""" This method returns the fields to use to find the number to use to
send an SMS on a record. """
return ['mobile', 'phone']
|
Ssawa/Diamond | refs/heads/master | src/collectors/snmpraw/test/testsnmpraw.py | 29 | #!/usr/bin/python
# coding=utf-8
###############################################################################
import time
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from snmpraw import SNMPRawCollector
from diamond.collector import Collector
###############################################################################
class TestSNMPRawCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SNMPRawCollector', {
})
self.collector = SNMPRawCollector(config, None)
def test_import(self):
self.assertTrue(SNMPRawCollector)
@patch.object(Collector, 'publish_metric')
@patch.object(time, 'time', Mock(return_value=1000))
@patch.object(SNMPRawCollector, '_get_value', Mock(return_value=5))
def test_metric(self, collect_mock):
test_config = {'devices': {'test': {'oids': {'1.1.1.1': 'test'}}}}
self.collector.config.update(test_config)
path = '.'.join([self.collector.config['path_prefix'], 'test',
self.collector.config['path_suffix'], 'test'])
self.collector.collect_snmp('test', None, None, None)
metric = collect_mock.call_args[0][0]
self.assertEqual(metric.metric_type, 'GAUGE')
self.assertEqual(metric.ttl, None)
self.assertEqual(metric.value, self.collector._get_value())
self.assertEqual(metric.precision, self.collector._precision(5))
self.assertEqual(metric.host, None)
self.assertEqual(metric.path, path)
self.assertEqual(metric.timestamp, 1000)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
minesense/VisTrails | refs/heads/master | vistrails/gui/modules/stringformat_configuration.py | 2 | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core.system import get_vistrails_basic_pkg_id
from vistrails.gui.modules.module_configure import StandardModuleConfigurationWidget
class StringFormatConfigurationWidget(StandardModuleConfigurationWidget):
"""
Configuration widget creating the ports corresponding to the format.
"""
def __init__(self, module, controller, parent=None):
""" StringFormatConfigurationWidget(
module: Module,
controller: VistrailController,
parent: QWidget)
-> TupleConfigurationWidget
Let StandardModuleConfigurationWidget constructor store the
controller/module object from the builder and set up the
configuration widget.
After StandardModuleConfigurationWidget constructor, all of
these will be available:
self.module : the Module object int the pipeline
self.controller: the current vistrail controller
"""
StandardModuleConfigurationWidget.__init__(self, module,
controller, parent)
# Give it a nice window title
self.setWindowTitle("StringFormat Configuration")
# Add an empty vertical layout
centralLayout = QtGui.QVBoxLayout()
centralLayout.setMargin(0)
centralLayout.setSpacing(0)
self.setLayout(centralLayout)
# Add the configuration button
self.button = QtGui.QPushButton("Sync ports")
self.connect(self.button, QtCore.SIGNAL('clicked()'),
self.saveTriggered)
centralLayout.addWidget(self.button)
def activate(self):
self.button.focusWidget(QtCore.Qt.ActiveWindowFocusReason)
def saveTriggered(self, checked = False):
""" saveTriggered(checked: bool) -> None
Update vistrail controller and module when the user click Ok
"""
if self.updateVistrail():
self.emit(QtCore.SIGNAL('stateChanged'))
self.emit(QtCore.SIGNAL('doneConfigure'), self.module.id)
def get_format(self):
for i in xrange(self.module.getNumFunctions()):
func = self.module.functions[i]
if func.name == 'format':
return func.params[0].strValue
else:
return ''
def updateVistrail(self):
""" updateVistrail() -> None
Update Vistrail to contain changes in the port table
"""
from vistrails.core.modules.basic_modules import StringFormat
args, kwargs = StringFormat.list_placeholders(self.get_format())
wanted_ports = set('_%d' % n for n in xrange(args)) | kwargs
current_ports = set(port_spec.name
for port_spec in self.module.input_port_specs)
sigstring = '(org.vistrails.vistrails.basic:Variant)'
add_ports = [('input', n, sigstring, -1)
for n in (wanted_ports - current_ports)]
delete_ports = [('input', n)
for n in (current_ports - wanted_ports)]
self.controller.update_ports(self.module.id, delete_ports, add_ports)
return True
|
DANCEcollaborative/forum-xblock | refs/heads/master | XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/django/db/models/sql/compiler.py | 32 | from itertools import izip
from django.core.exceptions import FieldError
from django.db import transaction
from django.db.backends.util import truncate_name
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.model._meta.db_table, None, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping()
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
for table, col in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = opts.concrete_model
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.model._meta
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, col, alias, _, _ = self._setup_joins(parts, opts, None)
col, alias = self._final_join_removal(col, alias)
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.model._meta.ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
for field in ordering:
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((field, []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra_select:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, col, order in self.find_ordering_name(field,
self.query.model._meta, default_order=asc):
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra_select[col])
self.query.ordering_aliases = ordering_aliases
return result, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, col, alias, joins, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j][TABLE_NAME] for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
col, alias = self._final_join_removal(col, alias)
return [(alias, col, order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, target, opts, joins, _, _ = self.query.setup_joins(pieces,
opts, alias, False)
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_alias_chain(joins,
self.query.alias_map[joins[0]][JOIN_TYPE] == self.query.LOUTER)
return field, col, alias, joins, opts
def _final_join_removal(self, col, alias):
"""
A helper method for get_distinct and get_ordering. This method will
trim extra not-needed joins from the tail of the join chain.
This is very similar to what is done in trim_joins, but we will
trim LEFT JOINS here. It would be a good idea to consolidate this
method and query.trim_joins().
"""
if alias:
while 1:
join = self.query.alias_map[alias]
if col != join[RHS_JOIN_COL]:
break
self.query.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
return col, alias
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
result.append('%s %s%s ON (%s.%s = %s.%s)'
% (join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col)))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, []
def get_grouping(self):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
if (len(self.query.model._meta.fields) == len(self.query.select) and
self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.model._meta.db_table, self.query.model._meta.pk.column)
]
group_by = self.query.group_by or []
extra_selects = []
for extra_select, extra_params in self.query.extra_select.itervalues():
extra_selects.append(extra_select)
params.extend(extra_params)
cols = (group_by + self.query.select +
self.query.related_select_cols + extra_selects)
seen = set()
for col in cols:
if col in seen:
continue
seen.add(col)
if isinstance(col, (list, tuple)):
result.append('%s.%s' % (qn(col[0]), qn(col[1])))
elif hasattr(col, 'as_sql'):
result.append(col.as_sql(qn, self.connection))
else:
result.append('(%s)' % str(col))
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
used=None, requested=None, restricted=None, nullable=None,
dupe_set=None, avoid_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
self.query.related_select_fields = []
if not used:
used = set()
if dupe_set is None:
dupe_set = set()
if avoid_set is None:
avoid_set = set()
orig_dupe_set = dupe_set
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
if not select_related_descend(f, restricted, requested):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = f.rel.to._meta.db_table
promote = nullable or f.null
if model:
int_opts = opts
alias = root_alias
alias_chain = []
for int_model in opts.get_base_chain(model):
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
promote=promote)
alias_chain.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if self.query.alias_map[root_alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(alias_chain, True)
else:
alias = root_alias
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join((alias, table, f.column,
f.rel.get_related_field().column),
exclusions=used.union(avoid), promote=promote)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(columns)
if self.query.alias_map[alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(aliases, True)
self.query.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
used, next, restricted, new_nullable, dupe_set, avoid)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = model._meta.db_table
int_opts = opts
alias = root_alias
alias_chain = []
chain = opts.get_base_chain(f.rel.to)
if chain is not None:
for int_model in chain:
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join(
(alias, int_opts.db_table, lhs_col, int_opts.pk.column),
exclusions=used, promote=True, reuse=used
)
alias_chain.append(alias)
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join(
(alias, table, f.rel.get_related_field().column, f.column),
exclusions=used.union(avoid),
promote=True
)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, local_only=True)
self.query.related_select_cols.extend(columns)
self.query.related_select_fields.extend(model._meta.fields)
next = requested.get(f.related_query_name(), {})
new_nullable = f.null or None
self.fill_related_selections(model._meta, table, cur_depth+1,
used, next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
# Set transaction dirty if we're using SELECT FOR UPDATE to ensure
# a subsequent commit/rollback is executed, so any database locks
# are released.
if self.query.select_for_update and transaction.is_managed(self.using):
transaction.set_dirty(self.using)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
if self.query.select_fields:
fields = self.query.select_fields + self.query.related_select_fields
else:
fields = self.query.model._meta.fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
aggregate_start = len(self.query.extra_select.keys()) + len(self.query.select)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return empty_iter()
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in izip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in izip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql = ('SELECT %s FROM (%s) subquery' % (
', '.join([
aggregate.as_sql(qn, self.connection)
for aggregate in self.query.aggregate_select.values()
]),
self.query.subquery)
)
params = self.query.sub_params
return (sql, params)
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def empty_iter():
"""
Returns an iterator containing no results.
"""
yield iter([]).next()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
|
rishirajsurti/BuildingMachineLearningSystemsWithPython | refs/heads/master | ch05/chose_instances.py | 24 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
try:
import ujson as json # UltraJSON if available
except:
import json
import sys
from collections import defaultdict
try:
import enchant
speller = enchant.Dict("en_US")
except:
print("""\
Enchant is not installed, which is not a problem since spell correction features
will not be used in the chapter. If, however, you want to experiment with them
(highly encouraged!), you can get the library from http://packages.python.org/pyenchant/.
""")
class EnchantMock:
def __init__(self):
pass
def check(self, word):
return True
speller = EnchantMock()
from data import chosen, chosen_meta, filtered, filtered_meta
filtered_meta = json.load(open(filtered_meta, "r"))
def misspelled_fraction(p):
tokens = p.split()
if not tokens:
return 0.0
return 1 - float(sum(speller.check(t) for t in tokens)) / len(tokens)
def data(filename, col=None):
for line in open(filename, "r"):
data = line.strip().split("\t")
# check format
Id, ParentId, IsAccepted, TimeToAnswer, Score, Text, NumTextTokens, NumCodeLines, LinkCount, NumImages = data
if col:
yield data[col]
else:
yield data
posts_to_keep = set()
found_questions = 0
num_qestion_sample = 1000
# keep the best and worst, but only if we have one with positive and one with negative score
# filter_method = "negative_positive"
# if true, only keep the lowest scoring answer per class in addition to the accepted one
# filter_method = "only_one_per_class "
# if not None, specifies the number of unaccepted per question
# filter_method = "sample_per_question"
filter_method = "negative_positive" # warning: this does not retrieve many!
# filter_method = "only_one_per_class"
MaxAnswersPerQuestions = 10 # filter_method == "sample_per_question"
# filter_method = "all"
# equal share of questions that are unanswered and those that are answered
# filter_method = "half-half"
unaccepted_scores = {}
has_q_accepted_a = {}
num_q_with_accepted_a = 0
num_q_without_accepted_a = 0
for ParentId, posts in filtered_meta.items():
assert ParentId != -1
if len(posts) < 2:
continue
ParentId = int(ParentId)
AllIds = set([ParentId])
AcceptedId = None
UnacceptedId = None
UnacceptedIds = []
UnacceptedScore = sys.maxsize
NegativeScoreIds = []
PositiveScoreIds = []
if filter_method == "half-half":
has_accepted_a = False
for post in posts:
Id, IsAccepted, TimeToAnswer, Score = post
if IsAccepted:
has_accepted_a = True
break
has_q_accepted_a[ParentId] = has_accepted_a
if has_accepted_a:
if num_q_with_accepted_a < num_qestion_sample / 2:
num_q_with_accepted_a += 1
posts_to_keep.add(ParentId)
else:
if num_q_without_accepted_a < num_qestion_sample / 2:
num_q_without_accepted_a += 1
posts_to_keep.add(ParentId)
if num_q_without_accepted_a + num_q_with_accepted_a > num_qestion_sample:
assert -1 not in posts_to_keep
break
else:
for post in posts:
Id, IsAccepted, TimeToAnswer, Score = post
if filter_method == "all":
AllIds.add(int(Id))
elif filter_method == "only_one_per_class":
if IsAccepted:
AcceptedId = Id
elif Score < UnacceptedScore:
UnacceptedScore = Score
UnacceptedId = Id
elif filter_method == "sample_per_question":
if IsAccepted:
AcceptedId = Id
else:
UnacceptedIds.append(Id)
elif filter_method == "negative_positive":
if Score < 0:
NegativeScoreIds.append((Score, Id))
elif Score > 0:
PositiveScoreIds.append((Score, Id))
else:
raise ValueError(filter_method)
added = False
if filter_method == "all":
posts_to_keep.update(AllIds)
added = True
elif filter_method == "only_one_per_class":
if AcceptedId is not None and UnacceptedId is not None:
posts_to_keep.add(ParentId)
posts_to_keep.add(AcceptedId)
posts_to_keep.add(UnacceptedId)
added = True
elif filter_method == "sample_per_question":
if AcceptedId is not None and UnacceptedIds is not None:
posts_to_keep.add(ParentId)
posts_to_keep.add(AcceptedId)
posts_to_keep.update(UnacceptedIds[:MaxAnswersPerQuestions])
added = True
elif filter_method == "negative_positive":
if PositiveScoreIds and NegativeScoreIds:
posts_to_keep.add(ParentId)
posScore, posId = sorted(PositiveScoreIds)[-1]
posts_to_keep.add(posId)
negScore, negId = sorted(NegativeScoreIds)[0]
posts_to_keep.add(negId)
print("%i: %i/%i %i/%i" % (ParentId, posId,
posScore, negId, negScore))
added = True
if added:
found_questions += 1
if num_qestion_sample and found_questions >= num_qestion_sample:
break
total = 0
kept = 0
already_written = set()
chosen_meta_dict = defaultdict(dict)
with open(chosen, "w") as f:
for line in data(filtered):
strId, ParentId, IsAccepted, TimeToAnswer, Score, Text, NumTextTokens, NumCodeLines, LinkCount, NumImages = line
Text = Text.strip()
total += 1
Id = int(strId)
if Id in posts_to_keep:
if Id in already_written:
print(Id, "is already written")
continue
if kept % 100 == 0:
print(kept)
# setting meta info
post = chosen_meta_dict[Id]
post['ParentId'] = int(ParentId)
post['IsAccepted'] = int(IsAccepted)
post['TimeToAnswer'] = int(TimeToAnswer)
post['Score'] = int(Score)
post['NumTextTokens'] = int(NumTextTokens)
post['NumCodeLines'] = int(NumCodeLines)
post['LinkCount'] = int(LinkCount)
post['MisSpelledFraction'] = misspelled_fraction(Text)
post['NumImages'] = int(NumImages)
post['idx'] = kept # index into the file
if int(ParentId) == -1:
q = chosen_meta_dict[Id]
if not 'Answers' in q:
q['Answers'] = []
if filter_method == "half-half":
q['HasAcceptedAnswer'] = has_q_accepted_a[Id]
else:
q = chosen_meta_dict[int(ParentId)]
if int(IsAccepted) == 1:
assert 'HasAcceptedAnswer' not in q
q['HasAcceptedAnswer'] = True
if 'Answers' not in q:
q['Answers'] = [Id]
else:
q['Answers'].append(Id)
f.writelines("%s\t%s\n" % (Id, Text))
kept += 1
with open(chosen_meta, "w") as fm:
json.dump(chosen_meta_dict, fm)
print("total=", total)
print("kept=", kept)
|
hynekcer/django | refs/heads/master | django/conf/locale/sq/__init__.py | 12133432 | |
medghaim/PokemonGo-Map | refs/heads/master | pogom/pgoapi/protos/__init__.py | 12133432 | |
zenist/ZLib | refs/heads/master | io/codec/url_response.py | 1 | # -*- coding: utf-8 -*-
import unittest
import codecs
class TestUrlResponseCodec(unittest.TestCase):
def test_01(self):
__target = "\u5267\u60c5"
print __target.decode('unicode-escape')
def test(self):
__str = '''{
"args": {},
"data": ""{\"q\": \"\\u5f20\\u827a\\u8c0b\"}"",
"files": {},
"form": {},
"headers": {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Content-Length": "36",
"Content-Type": "application/json",
"Host": "httpbin.org",
"User-Agent": "python-requests/2.9.1"
},
"json": "{"q": "\u5f20\u827a\u8c0b"}",
"origin": "124.193.184.2",
"url": "http://httpbin.org/post"
}'''
print __str.decode('unicode-escape').encode('utf-8') |
ImmaculateObsession/nest | refs/heads/master | nest/wsgi.py | 1 | """
WSGI config for nest project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "nest.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nest.settings.base")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
gisadmin/vb2py | refs/heads/master | vb2py/test/testclassmethods.py | 2 | from complexframework import *
# << ClassMethod tests >> (1 of 7)
#
# Simple public method
tests.append((
VBClassModule(),
"""
Public my_a As String
Public Sub SetA(Value As Integer)
my_a = Value
End Sub
""",
("A = MyClass()\n"
"A.SetA('hello')\n"
"assert A.my_a == 'hello', 'A.my_a was (%s)' % (A.my_a,)\n",)
))
#
# Simple public method with a local variable shadowing a class variable
tests.append((
VBClassModule(),
"""
Public my_a As String
Public my_b As String
Public Sub SetA(Value As Integer)
Dim my_b
my_b = "other"
my_a = Value + my_b
End Sub
Public Sub SetB(Value As Integer)
my_b = Value
End Sub
""",
("A = MyClass()\n"
"A.SetB('thisisb')\n"
"A.SetA('thisisa')\n"
"assert A.my_a == 'thisisaother', 'A.my_a was (%s)' % (A.my_a,)\n"
"assert A.my_b == 'thisisb', 'A.my_b was (%s)' % (A.my_b,)\n",)
))
#
# Simple public method calling another method
tests.append((
VBClassModule(),
"""
Public my_a As String
Public my_b As String
Public Sub SetA(Value As Integer)
SetB Value
my_a = my_b
End Sub
Public Sub SetB(Value As Integer)
my_b = Value
End Sub
""",
("A = MyClass()\n"
"A.SetA('thisisa')\n"
"assert A.my_a == 'thisisa', 'A.my_a was (%s)' % (A.my_a,)\n"
"assert A.my_b == 'thisisa', 'A.my_b was (%s)' % (A.my_b,)\n",)
))
#
# Simple public method with a parameter shadowing a class variable
tests.append((
VBClassModule(),
"""
Public my_a As String
Public my_b As String
Public Sub SetA(my_b As Integer)
my_a = my_b
End Sub
Public Sub SetB(Value As Integer)
my_b = Value
End Sub
""",
("A = MyClass()\n"
"A.SetB('thisisb')\n"
"A.SetA('thisisa')\n"
"assert A.my_a == 'thisisa', 'A.my_a was (%s)' % (A.my_a,)\n"
"assert A.my_b == 'thisisb', 'A.my_b was (%s)' % (A.my_b,)\n",)
))
# << ClassMethod tests >> (2 of 7)
#
# Simple public function
tests.append((
VBClassModule(),
"""
Public lower_bound As Integer
Public Sub setLowerBound(Value As Integer)
lower_bound = Value
End Sub
Public Function Factorial(Value As Integer)
If Value = lower_bound Then
Factorial = 1
Else
Factorial = Value * Factorial(Value-1)
End If
End Function
""",
("A = MyClass()\n"
"A.setLowerBound(1)\n"
"assert A.Factorial(6) == 720, 'A.Factorial(6) was (%s)' % (A.Factorial(6),)\n",)
))
# << ClassMethod tests >> (3 of 7)
#
# Simple public function
tests.append((
VBClassModule(),
"""
Public a1, a2, a3, a4
Public Function add(Optional X=10, Optional Y=20, Optional Z=30)
add = X + Y + Z
End Function
Public Sub set()
a1 = add(1,2,3)
a2 = add(,2,3)
a3 = add(,,3)
a4 = add()
End Sub
""",
("A = MyClass()\n",
"A.set()\n",
"assert A.a1 == 6\n",
"assert A.a2 == 15\n",
"assert A.a3 == 33\n",
"assert A.a4 == 60\n",
)
))
# << ClassMethod tests >> (4 of 7)
#
# Simple private method
tests.append((
VBClassModule(),
"""
Public my_a As String
Private Sub SetA(Value As Integer)
my_a = Value
End Sub
""",
("A = MyClass()\n"
"try:\n"
" A.SetA('hello')\n"
"except AttributeError:\n"
" pass\n"
"else:\n"
" assert 0, 'Method should be private'\n",)
))
# << ClassMethod tests >> (5 of 7)
#
# Simple init method called automatically
tests.append((
VBClassModule(),
"""
Public my_a As String
Public Sub Class_Initialize()
my_a = "hello"
End Sub
Public Sub SetA(Value As Integer)
my_a = Value
End Sub
""",
("A = MyClass()\n"
"assert A.my_a == 'hello', 'A.my_a was (%s)' % (A.my_a,)\n"
"A.SetA('bye')\n"
"assert A.my_a == 'bye', 'A.my_a was (%s)' % (A.my_a,)\n",)
))
#
# Explicitely calling the init method
tests.append((
VBClassModule(),
"""
Public my_a As String
Public Sub Class_Initialize()
my_a = "hello"
End Sub
Public Sub ReInit()
Class_Initialize
End Sub
Public Sub SetA(Value As Integer)
my_a = Value
End Sub
""",
("A = MyClass()\n"
"A.SetA('bye')\n"
"A.ReInit()\n"
"assert A.my_a == 'hello', 'A.my_a was (%s)' % (A.my_a,)\n",)
))
#
# Explicitely calling the terminate method
tests.append((
VBClassModule(),
"""
Public my_a As String
Public Sub Class_Terminate()
my_a = "hello"
End Sub
Public Sub Reset()
Class_Terminate
End Sub
Public Sub SetA(Value As Integer)
my_a = Value
End Sub
""",
("A = MyClass()\n"
"A.SetA('bye')\n"
"A.Reset()\n"
"assert A.my_a == 'hello', 'A.my_a was (%s)' % (A.my_a,)\n",)
))
#
# init method is private
tests.append((
VBClassModule(),
"""
Public my_a As String
Sub Class_Initialize()
my_a = "hello"
End Sub
Public Sub ReInit()
Class_Initialize
End Sub
Public Sub SetA(Value As Integer)
my_a = Value
End Sub
""",
("A = MyClass()\n"
"A.SetA('bye')\n"
"A.ReInit()\n"
"assert A.my_a == 'hello', 'A.my_a was (%s)' % (A.my_a,)\n",)
))
#
# Terminate method is private
tests.append((
VBClassModule(),
"""
Public my_a As String
Sub Class_Terminate()
my_a = "hello"
End Sub
Public Sub Reset()
Class_Terminate
End Sub
Public Sub SetA(Value As Integer)
my_a = Value
End Sub
""",
("A = MyClass()\n"
"A.SetA('bye')\n"
"A.Reset()\n"
"assert A.my_a == 'hello', 'A.my_a was (%s)' % (A.my_a,)\n",)
))
tests.append((
VBClassModule(),
"""
Public my_a As String
Sub Class_Terminate()
'my_a = 1/0
End Sub
Public Sub SetA(Value As Integer)
my_a = Value
End Sub
""",
("$assert python.find('def __del__(self') <> -1, '__del__ method not created'", )
))
# << ClassMethod tests >> (6 of 7)
#
# Class properties
tests.append((
VBClassModule(),
"""
Public arr()
Public Sub DoIt(Value As Integer)
ReDim arr(Value)
End Sub
""",
("A = MyClass()\n"
"B = MyClass()\n"
"A.DoIt(10)\n"
"B.DoIt(20)\n"
"assert len(A.arr) == 11, 'len(A.arr) was (%s)' % (len(A.arr),)\n"
"assert len(B.arr) == 21, 'len(B.arr) was (%s)' % (len(B.arr),)\n",)
))
#
# Make sure class properties are not shared
tests.append((
VBClassModule(),
"""
Public arr(20)
Public Sub DoIt(Value As Integer)
arr(10) = Value
End Sub
""",
("A = MyClass()\n"
"B = MyClass()\n"
"A.DoIt(10)\n"
"B.DoIt(20)\n"
"assert A.arr[10] == 10, 'A.arr[10] was (%s)' % (A.arr[10],)\n"
"assert B.arr[10] == 20, 'B.arr[10] was (%s)' % (B.arr[10],)\n",)
))
# << ClassMethod tests >> (7 of 7)
#
# Me in an expression
tests.append((
VBClassModule(),
"""
Public Val
Public Sub DoIt(Value As Integer)
Me.Val = Value
End Sub
""",
("A = MyClass()\n"
"A.DoIt(10)\n"
"assert A.Val==10, 'A.Val was (%s)' % (A.Val,)\n",)
))
#
# Me in a call
tests.append((
VBClassModule(),
"""
Public Val
Public Sub DoIt(Value As Integer)
Val = Value
Me.AddOne
End Sub
Public Sub AddOne()
Val = Val + 1
End Sub
""",
("A = MyClass()\n"
"A.DoIt(10)\n"
"assert A.Val==11, 'A.Val was (%s)' % (A.Val,)\n",)
))
# -- end -- << ClassMethod tests >>
import vb2py.vbparser
vb2py.vbparser.log.setLevel(0) # Don't print all logging stuff
TestClass = addTestsTo(BasicTest, tests)
if __name__ == "__main__":
main()
|
redhatrises/freeipa | refs/heads/master | ipalib/rpc.py | 2 | # Authors:
# Jason Gerard DeRose <jderose@redhat.com>
# Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
RPC client and shared RPC client/server functionality.
This module adds some additional functionality on top of the ``xmlrpc.client``
module in the Python standard library (``xmlrpclib`` in Python 2).
For documentation on the ``xmlrpclib`` module, see:
http://docs.python.org/2/library/xmlrpclib.html
Also see the `ipaserver.rpcserver` module.
"""
from __future__ import absolute_import
from decimal import Decimal
import datetime
import os
import locale
import base64
import json
import re
import socket
import gzip
import gssapi
from dns import resolver, rdatatype
from dns.exception import DNSException
from ssl import SSLError
import six
from six.moves import urllib
from ipalib.backend import Connectible
from ipalib.constants import LDAP_GENERALIZED_TIME_FORMAT
from ipalib.errors import (public_errors, UnknownError, NetworkError,
KerberosError, XMLRPCMarshallError, JSONError)
from ipalib import errors, capabilities
from ipalib.request import context, Connection
from ipapython.ipa_log_manager import root_logger
from ipapython import ipautil
from ipapython import session_storage
from ipapython.cookie import Cookie
from ipapython.dnsutil import DNSName
from ipalib.text import _
from ipalib.util import create_https_connection
from ipalib.krb_utils import KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN, KRB5KRB_AP_ERR_TKT_EXPIRED, \
KRB5_FCC_PERM, KRB5_FCC_NOFILE, KRB5_CC_FORMAT, \
KRB5_REALM_CANT_RESOLVE, KRB5_CC_NOTFOUND, get_principal
from ipapython.dn import DN
from ipapython.kerberos import Principal
from ipalib.capabilities import VERSION_WITHOUT_CAPABILITIES
from ipalib import api
# The XMLRPC client is in "six.moves.xmlrpc_client", but pylint
# cannot handle that
try:
from xmlrpclib import (Binary, Fault, DateTime, dumps, loads, ServerProxy,
Transport, ProtocolError, MININT, MAXINT)
except ImportError:
# pylint: disable=import-error
from xmlrpc.client import (Binary, Fault, DateTime, dumps, loads, ServerProxy,
Transport, ProtocolError, MININT, MAXINT)
# pylint: disable=import-error
if six.PY3:
from http.client import RemoteDisconnected
else:
from httplib import BadStatusLine as RemoteDisconnected
# pylint: enable=import-error
if six.PY3:
unicode = str
COOKIE_NAME = 'ipa_session'
CCACHE_COOKIE_KEY = 'X-IPA-Session-Cookie'
errors_by_code = dict((e.errno, e) for e in public_errors)
def update_persistent_client_session_data(principal, data):
'''
Given a principal create or update the session data for that
principal in the persistent secure storage.
Raises ValueError if unable to perform the action for any reason.
'''
try:
session_storage.store_data(principal, CCACHE_COOKIE_KEY, data)
except Exception as e:
raise ValueError(str(e))
def read_persistent_client_session_data(principal):
'''
Given a principal return the stored session data for that
principal from the persistent secure storage.
Raises ValueError if unable to perform the action for any reason.
'''
try:
return session_storage.get_data(principal, CCACHE_COOKIE_KEY)
except Exception as e:
raise ValueError(str(e))
def delete_persistent_client_session_data(principal):
'''
Given a principal remove the session data for that
principal from the persistent secure storage.
Raises ValueError if unable to perform the action for any reason.
'''
try:
session_storage.remove_data(principal, CCACHE_COOKIE_KEY)
except Exception as e:
raise ValueError(str(e))
def xml_wrap(value, version):
"""
Wrap all ``str`` in ``xmlrpc.client.Binary``.
Because ``xmlrpc.client.dumps()`` will itself convert all ``unicode`` instances
into UTF-8 encoded ``str`` instances, we don't do it here.
So in total, when encoding data for an XML-RPC packet, the following
transformations occur:
* All ``str`` instances are treated as binary data and are wrapped in
an ``xmlrpc.client.Binary()`` instance.
* Only ``unicode`` instances are treated as character data. They get
converted to UTF-8 encoded ``str`` instances (although as mentioned,
not by this function).
Also see `xml_unwrap()`.
:param value: The simple scalar or simple compound value to wrap.
"""
if type(value) in (list, tuple):
return tuple(xml_wrap(v, version) for v in value)
if isinstance(value, dict):
return dict(
(k, xml_wrap(v, version)) for (k, v) in value.items()
)
if type(value) is bytes:
return Binary(value)
if type(value) is Decimal:
# transfer Decimal as a string
return unicode(value)
if isinstance(value, six.integer_types) and (value < MININT or value > MAXINT):
return unicode(value)
if isinstance(value, DN):
return str(value)
# Encode datetime.datetime objects as xmlrpc.client.DateTime objects
if isinstance(value, datetime.datetime):
if capabilities.client_has_capability(version, 'datetime_values'):
return DateTime(value)
else:
return value.strftime(LDAP_GENERALIZED_TIME_FORMAT)
if isinstance(value, DNSName):
if capabilities.client_has_capability(version, 'dns_name_values'):
return {'__dns_name__': unicode(value)}
else:
return unicode(value)
if isinstance(value, Principal):
return unicode(value)
assert type(value) in (unicode, float, bool, type(None)) + six.integer_types
return value
def xml_unwrap(value, encoding='UTF-8'):
"""
Unwrap all ``xmlrpc.Binary``, decode all ``str`` into ``unicode``.
When decoding data from an XML-RPC packet, the following transformations
occur:
* The binary payloads of all ``xmlrpc.client.Binary`` instances are
returned as ``str`` instances.
* All ``str`` instances are treated as UTF-8 encoded Unicode strings.
They are decoded and the resulting ``unicode`` instance is returned.
Also see `xml_wrap()`.
:param value: The value to unwrap.
:param encoding: The Unicode encoding to use (defaults to ``'UTF-8'``).
"""
if type(value) in (list, tuple):
return tuple(xml_unwrap(v, encoding) for v in value)
if type(value) is dict:
if '__dns_name__' in value:
return DNSName(value['__dns_name__'])
else:
return dict(
(k, xml_unwrap(v, encoding)) for (k, v) in value.items()
)
if isinstance(value, bytes):
return value.decode(encoding)
if isinstance(value, Binary):
assert type(value.data) is bytes
return value.data
if isinstance(value, DateTime):
# xmlprc DateTime is converted to string of %Y%m%dT%H:%M:%S format
return datetime.datetime.strptime(str(value), "%Y%m%dT%H:%M:%S")
assert type(value) in (unicode, int, float, bool, type(None))
return value
def xml_dumps(params, version, methodname=None, methodresponse=False,
encoding='UTF-8'):
"""
Encode an XML-RPC data packet, transparently wraping ``params``.
This function will wrap ``params`` using `xml_wrap()` and will
then encode the XML-RPC data packet using ``xmlrpc.client.dumps()`` (from the
Python standard library).
For documentation on the ``xmlrpc.client.dumps()`` function, see:
http://docs.python.org/library/xmlrpc.client.html#convenience-functions
Also see `xml_loads()`.
:param params: A ``tuple`` or an ``xmlrpc.client.Fault`` instance.
:param methodname: The name of the method to call if this is a request.
:param methodresponse: Set this to ``True`` if this is a response.
:param encoding: The Unicode encoding to use (defaults to ``'UTF-8'``).
"""
if type(params) is tuple:
params = xml_wrap(params, version)
else:
assert isinstance(params, Fault)
return dumps(params,
methodname=methodname,
methodresponse=methodresponse,
encoding=encoding,
allow_none=True,
)
class _JSONPrimer(dict):
"""Fast JSON primer and pre-converter
Prepare a data structure for JSON serialization. In an ideal world, priming
could be handled by the default hook of json.dumps(). Unfortunately the
hook treats Python 2 str as text while FreeIPA considers str as bytes.
The primer uses a couple of tricks to archive maximum performance:
* O(1) type look instead of O(n) chain of costly isinstance() calls
* __missing__ and __mro__ with caching to handle subclasses
* inline code with minor code duplication (func lookup in enc_list/dict)
* avoid surplus function calls (e.g. func is _identity, obj.__class__
instead if type(obj))
* function default arguments to turn global into local lookups
* avoid re-creation of bound method objects (e.g. result.append)
* on-demand lookup of client capabilities with cached values
Depending on the client version number, the primer converts:
* bytes -> {'__base64__': b64encode}
* datetime -> {'__datetime__': LDAP_GENERALIZED_TIME}
* DNSName -> {'__dns_name__': unicode}
The _ipa_obj_hook() functions unserializes the marked JSON objects to
bytes, datetime and DNSName.
:see: _ipa_obj_hook
"""
__slots__ = ('version', '_cap_datetime', '_cap_dnsname')
_identity = object()
def __init__(self, version, _identity=_identity):
super(_JSONPrimer, self).__init__()
self.version = version
self._cap_datetime = None
self._cap_dnsname = None
self.update({
unicode: _identity,
bool: _identity,
type(None): _identity,
float: _identity,
Decimal: unicode,
DN: str,
Principal: unicode,
DNSName: self._enc_dnsname,
datetime.datetime: self._enc_datetime,
bytes: self._enc_bytes,
list: self._enc_list,
tuple: self._enc_list,
dict: self._enc_dict,
})
# int, long
for t in six.integer_types:
self[t] = _identity
def __missing__(self, typ):
# walk MRO to find best match
for c in typ.__mro__:
if c in self:
self[typ] = self[c]
return self[c]
# use issubclass to check for registered ABCs
for c in self:
if issubclass(typ, c):
self[typ] = self[c]
return self[c]
raise TypeError(typ)
def convert(self, obj, _identity=_identity):
# obj.__class__ is twice as fast as type(obj)
func = self[obj.__class__]
return obj if func is _identity else func(obj)
def _enc_datetime(self, val):
cap = self._cap_datetime
if cap is None:
cap = capabilities.client_has_capability(self.version,
'datetime_values')
self._cap_datetime = cap
if cap:
return {'__datetime__': val.strftime(LDAP_GENERALIZED_TIME_FORMAT)}
else:
return val.strftime(LDAP_GENERALIZED_TIME_FORMAT)
def _enc_dnsname(self, val):
cap = self._cap_dnsname
if cap is None:
cap = capabilities.client_has_capability(self.version,
'dns_name_values')
self._cap_dnsname = cap
if cap:
return {'__dns_name__': unicode(val)}
else:
return unicode(val)
def _enc_bytes(self, val):
encoded = base64.b64encode(val)
if not six.PY2:
encoded = encoded.decode('ascii')
return {'__base64__': encoded}
def _enc_list(self, val, _identity=_identity):
result = []
append = result.append
for v in val:
func = self[v.__class__]
append(v if func is _identity else func(v))
return result
def _enc_dict(self, val, _identity=_identity, _iteritems=six.iteritems):
result = {}
for k, v in _iteritems(val):
func = self[v.__class__]
result[k] = v if func is _identity else func(v)
return result
def json_encode_binary(val, version, pretty_print=False):
"""Serialize a Python object structure to JSON
:param object val: Python object structure
:param str version: client version
:param bool pretty_print: indent and sort JSON (warning: slow!)
:return: text
:note: pretty printing triggers a slow path in Python's JSON module. Only
use pretty_print in debug mode.
"""
result = _JSONPrimer(version).convert(val)
if pretty_print:
return json.dumps(result, indent=4, sort_keys=True)
else:
return json.dumps(result)
def _ipa_obj_hook(dct, _iteritems=six.iteritems, _list=list):
"""JSON object hook
:see: _JSONPrimer
"""
if '__base64__' in dct:
return base64.b64decode(dct['__base64__'])
elif '__datetime__' in dct:
return datetime.datetime.strptime(dct['__datetime__'],
LDAP_GENERALIZED_TIME_FORMAT)
elif '__dns_name__' in dct:
return DNSName(dct['__dns_name__'])
else:
# XXX tests assume tuples. Is this really necessary?
for k, v in _iteritems(dct):
if v.__class__ is _list:
dct[k] = tuple(v)
return dct
def json_decode_binary(val):
"""Convert serialized JSON string back to Python data structure
:param val: JSON string
:type val: str, bytes
:return: Python data structure
:see: _ipa_obj_hook, _JSONPrimer
"""
if isinstance(val, bytes):
val = val.decode('utf-8')
return json.loads(val, object_hook=_ipa_obj_hook)
def decode_fault(e, encoding='UTF-8'):
assert isinstance(e, Fault)
if isinstance(e.faultString, bytes):
return Fault(e.faultCode, e.faultString.decode(encoding))
return e
def xml_loads(data, encoding='UTF-8'):
"""
Decode the XML-RPC packet in ``data``, transparently unwrapping its params.
This function will decode the XML-RPC packet in ``data`` using
``xmlrpc.client.loads()`` (from the Python standard library). If ``data``
contains a fault, ``xmlrpc.client.loads()`` will itself raise an
``xmlrpc.client.Fault`` exception.
Assuming an exception is not raised, this function will then unwrap the
params in ``data`` using `xml_unwrap()`. Finally, a
``(params, methodname)`` tuple is returned containing the unwrapped params
and the name of the method being called. If the packet contains no method
name, ``methodname`` will be ``None``.
For documentation on the ``xmlrpc.client.loads()`` function, see:
http://docs.python.org/2/library/xmlrpclib.html#convenience-functions
Also see `xml_dumps()`.
:param data: The XML-RPC packet to decode.
"""
try:
(params, method) = loads(data)
return (xml_unwrap(params), method)
except Fault as e:
raise decode_fault(e)
class DummyParser(object):
def __init__(self):
self.data = []
def feed(self, data):
self.data.append(data)
def close(self):
return b''.join(self.data)
class MultiProtocolTransport(Transport):
"""Transport that handles both XML-RPC and JSON"""
def __init__(self, *args, **kwargs):
Transport.__init__(self)
self.protocol = kwargs.get('protocol', None)
def getparser(self):
if self.protocol == 'json':
parser = DummyParser()
return parser, parser
else:
return Transport.getparser(self)
def send_content(self, connection, request_body):
if self.protocol == 'json':
connection.putheader("Content-Type", "application/json")
else:
connection.putheader("Content-Type", "text/xml")
# gzip compression would be set up here, but we have it turned off
# (encode_threshold is None)
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders(request_body)
class LanguageAwareTransport(MultiProtocolTransport):
"""Transport sending Accept-Language header"""
def get_host_info(self, host):
host, extra_headers, x509 = MultiProtocolTransport.get_host_info(
self, host)
try:
lang = locale.setlocale(locale.LC_ALL, '').split('.')[0].lower()
except locale.Error:
# fallback to default locale
lang = 'en_us'
if not isinstance(extra_headers, list):
extra_headers = []
extra_headers.append(
('Accept-Language', lang.replace('_', '-'))
)
extra_headers.append(
('Referer', 'https://%s/ipa/xml' % str(host))
)
return (host, extra_headers, x509)
class SSLTransport(LanguageAwareTransport):
"""Handles an HTTPS transaction to an XML-RPC server."""
def make_connection(self, host):
host, self._extra_headers, _x509 = self.get_host_info(host)
if self._connection and host == self._connection[0]:
root_logger.debug("HTTP connection keep-alive (%s)", host)
return self._connection[1]
conn = create_https_connection(
host, 443,
api.env.tls_ca_cert,
tls_version_min=api.env.tls_version_min,
tls_version_max=api.env.tls_version_max)
conn.connect()
root_logger.debug("New HTTP connection (%s)", host)
self._connection = host, conn
return self._connection[1]
class KerbTransport(SSLTransport):
"""
Handles Kerberos Negotiation authentication to an XML-RPC server.
"""
flags = [gssapi.RequirementFlag.mutual_authentication,
gssapi.RequirementFlag.out_of_sequence_detection]
def __init__(self, *args, **kwargs):
SSLTransport.__init__(self, *args, **kwargs)
self._sec_context = None
self.service = kwargs.pop("service", "HTTP")
self.ccache = kwargs.pop("ccache", None)
def _handle_exception(self, e, service=None):
minor = e.min_code
if minor == KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN:
raise errors.ServiceError(service=service)
elif minor == KRB5_FCC_NOFILE:
raise errors.NoCCacheError()
elif minor == KRB5KRB_AP_ERR_TKT_EXPIRED:
raise errors.TicketExpired()
elif minor == KRB5_FCC_PERM:
raise errors.BadCCachePerms()
elif minor == KRB5_CC_FORMAT:
raise errors.BadCCacheFormat()
elif minor == KRB5_REALM_CANT_RESOLVE:
raise errors.CannotResolveKDC()
elif minor == KRB5_CC_NOTFOUND:
raise errors.CCacheError()
else:
raise errors.KerberosError(message=unicode(e))
def _get_host(self):
return self._connection[0]
def _remove_extra_header(self, name):
for (h, v) in self._extra_headers:
if h == name:
self._extra_headers.remove((h, v))
break
def get_auth_info(self, use_cookie=True):
"""
Two things can happen here. If we have a session we will add
a cookie for that. If not we will set an Authorization header.
"""
if not isinstance(self._extra_headers, list):
self._extra_headers = []
# Remove any existing Cookie first
self._remove_extra_header('Cookie')
if use_cookie:
session_cookie = getattr(context, 'session_cookie', None)
if session_cookie:
self._extra_headers.append(('Cookie', session_cookie))
return
# Set the remote host principal
host = self._get_host()
service = self.service + "@" + host.split(':')[0]
try:
creds = None
if self.ccache:
creds = gssapi.Credentials(usage='initiate',
store={'ccache': self.ccache})
name = gssapi.Name(service, gssapi.NameType.hostbased_service)
self._sec_context = gssapi.SecurityContext(creds=creds, name=name,
flags=self.flags)
response = self._sec_context.step()
except gssapi.exceptions.GSSError as e:
self._handle_exception(e, service=service)
self._set_auth_header(response)
def _set_auth_header(self, token):
# Remove any existing authorization header first
self._remove_extra_header('Authorization')
if token:
self._extra_headers.append(
('Authorization', 'negotiate %s' % base64.b64encode(token).decode('ascii'))
)
def _auth_complete(self, response):
if self._sec_context:
header = response.getheader('www-authenticate', '')
token = None
for field in header.split(','):
k, _dummy, v = field.strip().partition(' ')
if k.lower() == 'negotiate':
try:
token = base64.b64decode(v.encode('ascii'))
break
# b64decode raises TypeError on invalid input
except (TypeError, UnicodeError):
pass
if not token:
raise KerberosError(
message=u"No valid Negotiate header in server response")
token = self._sec_context.step(token=token)
if self._sec_context.complete:
self._sec_context = None
return True
self._set_auth_header(token)
return False
elif response.status == 401:
self.get_auth_info(use_cookie=False)
return False
return True
def single_request(self, host, handler, request_body, verbose=0):
# Based on Python 2.7's xmllib.Transport.single_request
try:
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
self.get_auth_info()
while True:
if six.PY2:
# pylint: disable=no-value-for-parameter
self.send_request(h, handler, request_body)
# pylint: enable=no-value-for-parameter
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
else:
self.__send_request(h, host, handler, request_body, verbose)
response = h.getresponse()
if response.status != 200:
if (response.getheader("content-length", 0)):
response.read()
if response.status == 401:
if not self._auth_complete(response):
continue
raise ProtocolError(
host + handler,
response.status, response.reason,
response.msg)
self.verbose = verbose
if not self._auth_complete(response):
continue
return self.parse_response(response)
except gssapi.exceptions.GSSError as e:
self._handle_exception(e)
except RemoteDisconnected:
# keep-alive connection was terminated by remote peer, close
# connection and let transport handle reconnect for us.
self.close()
root_logger.debug("HTTP server has closed connection (%s)", host)
raise
except BaseException as e:
# Unexpected exception may leave connections in a bad state.
self.close()
root_logger.debug("HTTP connection destroyed (%s)",
host, exc_info=True)
raise
if six.PY3:
def __send_request(self, connection, host, handler, request_body, debug):
# Based on xmlrpc.client.Transport.send_request
headers = self._extra_headers[:]
if debug:
connection.set_debuglevel(1)
if self.accept_gzip_encoding and gzip:
connection.putrequest("POST", handler, skip_accept_encoding=True)
connection.putheader("Accept-Encoding", "gzip")
headers.append(("Accept-Encoding", "gzip"))
else:
connection.putrequest("POST", handler)
headers.append(("User-Agent", self.user_agent))
self.send_headers(connection, headers) # pylint: disable=E1101
self.send_content(connection, request_body)
return connection
# Find all occurrences of the expiry component
expiry_re = re.compile(r'.*?(&expiry=\d+).*?')
def _slice_session_cookie(self, session_cookie):
# Keep only the cookie value and strip away all other info.
# This is to reduce the churn on FILE ccaches which grow every time we
# set new data. The expiration time for the cookie is set in the
# encrypted data anyway and will be enforced by the server
http_cookie = session_cookie.http_cookie()
# We also remove the "expiry" part from the data which is not required
for exp in self.expiry_re.findall(http_cookie):
http_cookie = http_cookie.replace(exp, '')
return http_cookie
def store_session_cookie(self, cookie_header):
'''
Given the contents of a Set-Cookie header scan the header and
extract each cookie contained within until the session cookie
is located. Examine the session cookie if the domain and path
are specified, if not update the cookie with those values from
the request URL. Then write the session cookie into the key
store for the principal. If the cookie header is None or the
session cookie is not present in the header no action is
taken.
Context Dependencies:
The per thread context is expected to contain:
principal
The current pricipal the HTTP request was issued for.
request_url
The URL of the HTTP request.
'''
if cookie_header is None:
return
principal = getattr(context, 'principal', None)
request_url = getattr(context, 'request_url', None)
root_logger.debug("received Set-Cookie (%s)'%s'", type(cookie_header),
cookie_header)
if not isinstance(cookie_header, list):
cookie_header = [cookie_header]
# Search for the session cookie
session_cookie = None
try:
for cookie in cookie_header:
session_cookie = (
Cookie.get_named_cookie_from_string(
cookie, COOKIE_NAME, request_url,
timestamp=datetime.datetime.utcnow())
)
if session_cookie is not None:
break
except Exception as e:
root_logger.error("unable to parse cookie header '%s': %s", cookie_header, e)
return
if session_cookie is None:
return
cookie_string = self._slice_session_cookie(session_cookie)
root_logger.debug("storing cookie '%s' for principal %s", cookie_string, principal)
try:
update_persistent_client_session_data(principal, cookie_string)
except Exception as e:
# Not fatal, we just can't use the session cookie we were sent.
pass
def parse_response(self, response):
if six.PY2:
header = response.msg.getheaders('Set-Cookie')
else:
header = response.msg.get_all('Set-Cookie')
self.store_session_cookie(header)
return SSLTransport.parse_response(self, response)
class DelegatedKerbTransport(KerbTransport):
"""
Handles Kerberos Negotiation authentication and TGT delegation to an
XML-RPC server.
"""
flags = [gssapi.RequirementFlag.delegate_to_peer,
gssapi.RequirementFlag.mutual_authentication,
gssapi.RequirementFlag.out_of_sequence_detection]
class RPCClient(Connectible):
"""
Forwarding backend plugin for XML-RPC client.
Also see the `ipaserver.rpcserver.xmlserver` plugin.
"""
# Values to set on subclasses:
session_path = None
server_proxy_class = ServerProxy
protocol = None
env_rpc_uri_key = None
def get_url_list(self, rpc_uri):
"""
Create a list of urls consisting of the available IPA servers.
"""
# the configured URL defines what we use for the discovered servers
(_scheme, _netloc, path, _params, _query, _fragment
) = urllib.parse.urlparse(rpc_uri)
servers = []
name = '_ldap._tcp.%s.' % self.env.domain
try:
answers = resolver.query(name, rdatatype.SRV)
except DNSException:
answers = []
for answer in answers:
server = str(answer.target).rstrip(".")
servers.append('https://%s%s' % (ipautil.format_netloc(server), path))
servers = list(set(servers))
# the list/set conversion won't preserve order so stick in the
# local config file version here.
cfg_server = rpc_uri
if cfg_server in servers:
# make sure the configured master server is there just once and
# it is the first one
servers.remove(cfg_server)
servers.insert(0, cfg_server)
else:
servers.insert(0, cfg_server)
return servers
def get_session_cookie_from_persistent_storage(self, principal):
'''
Retrieves the session cookie for the given principal from the
persistent secure storage. Returns None if not found or unable
to retrieve the session cookie for any reason, otherwise
returns a Cookie object containing the session cookie.
'''
# Get the session data, it should contain a cookie string
# (possibly with more than one cookie).
try:
cookie_string = read_persistent_client_session_data(principal)
except Exception:
return None
# Search for the session cookie within the cookie string
try:
session_cookie = Cookie.get_named_cookie_from_string(
cookie_string, COOKIE_NAME,
timestamp=datetime.datetime.utcnow())
except Exception as e:
self.log.debug(
'Error retrieving cookie from the persistent storage: {err}'
.format(err=e))
return None
return session_cookie
def apply_session_cookie(self, url):
'''
Attempt to load a session cookie for the current principal
from the persistent secure storage. If the cookie is
successfully loaded adjust the input url's to point to the
session path and insert the session cookie into the per thread
context for later insertion into the HTTP request. If the
cookie is not successfully loaded then the original url is
returned and the per thread context is not modified.
Context Dependencies:
The per thread context is expected to contain:
principal
The current pricipal the HTTP request was issued for.
The per thread context will be updated with:
session_cookie
A cookie string to be inserted into the Cookie header
of the HTPP request.
'''
original_url = url
principal = getattr(context, 'principal', None)
session_cookie = self.get_session_cookie_from_persistent_storage(principal)
if session_cookie is None:
self.log.debug("failed to find session_cookie in persistent storage for principal '%s'",
principal)
return original_url
else:
self.debug("found session_cookie in persistent storage for principal '%s', cookie: '%s'",
principal, session_cookie)
# Decide if we should send the cookie to the server
try:
session_cookie.http_return_ok(original_url)
except Cookie.Expired as e:
self.debug("deleting session data for principal '%s': %s", principal, e)
try:
delete_persistent_client_session_data(principal)
except Exception as e:
pass
return original_url
except Cookie.URLMismatch as e:
self.debug("not sending session cookie, URL mismatch: %s", e)
return original_url
except Exception as e:
self.error("not sending session cookie, unknown error: %s", e)
return original_url
# O.K. session_cookie is valid to be returned, stash it away where it will will
# get included in a HTTP Cookie headed sent to the server.
self.log.debug("setting session_cookie into context '%s'", session_cookie.http_cookie())
setattr(context, 'session_cookie', session_cookie.http_cookie())
# Form the session URL by substituting the session path into the original URL
scheme, netloc, path, params, query, fragment = urllib.parse.urlparse(original_url)
path = self.session_path
# urlencode *can* take one argument
# pylint: disable=too-many-function-args
session_url = urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
return session_url
def create_connection(self, ccache=None, verbose=None, fallback=None,
delegate=None, ca_certfile=None):
if verbose is None:
verbose = self.api.env.verbose
if fallback is None:
fallback = self.api.env.fallback
if delegate is None:
delegate = self.api.env.delegate
if ca_certfile is None:
ca_certfile = self.api.env.tls_ca_cert
try:
rpc_uri = self.env[self.env_rpc_uri_key]
principal = get_principal(ccache_name=ccache)
stored_principal = getattr(context, 'principal', None)
if principal != stored_principal:
try:
delattr(context, 'session_cookie')
except AttributeError:
pass
setattr(context, 'principal', principal)
# We have a session cookie, try using the session URI to see if it
# is still valid
if not delegate:
rpc_uri = self.apply_session_cookie(rpc_uri)
except (errors.CCacheError, ValueError):
# No session key, do full Kerberos auth
pass
context.ca_certfile = ca_certfile
urls = self.get_url_list(rpc_uri)
serverproxy = None
for url in urls:
kw = dict(allow_none=True, encoding='UTF-8')
kw['verbose'] = verbose
if url.startswith('https://'):
if delegate:
transport_class = DelegatedKerbTransport
else:
transport_class = KerbTransport
else:
transport_class = LanguageAwareTransport
kw['transport'] = transport_class(protocol=self.protocol,
service='HTTP', ccache=ccache)
self.log.info('trying %s' % url)
setattr(context, 'request_url', url)
serverproxy = self.server_proxy_class(url, **kw)
if len(urls) == 1:
# if we have only 1 server and then let the
# main requester handle any errors. This also means it
# must handle a 401 but we save a ping.
return serverproxy
try:
command = getattr(serverproxy, 'ping')
try:
command([], {})
except Fault as e:
e = decode_fault(e)
if e.faultCode in errors_by_code:
error = errors_by_code[e.faultCode]
raise error(message=e.faultString)
else:
raise UnknownError(
code=e.faultCode,
error=e.faultString,
server=url,
)
# We don't care about the response, just that we got one
break
except KerberosError as krberr:
# kerberos error on one server is likely on all
raise errors.KerberosError(message=unicode(krberr))
except ProtocolError as e:
if hasattr(context, 'session_cookie') and e.errcode == 401:
# Unauthorized. Remove the session and try again.
delattr(context, 'session_cookie')
try:
delete_persistent_client_session_data(principal)
except Exception as e:
# This shouldn't happen if we have a session but it isn't fatal.
pass
return self.create_connection(ccache, verbose, fallback, delegate)
if not fallback:
raise
serverproxy = None
except Exception as e:
if not fallback:
raise
else:
self.log.info('Connection to %s failed with %s', url, e)
serverproxy = None
if serverproxy is None:
raise NetworkError(uri=_('any of the configured servers'),
error=', '.join(urls))
return serverproxy
def destroy_connection(self):
conn = getattr(context, self.id, None)
if conn is not None:
conn = conn.conn._ServerProxy__transport
conn.close()
def _call_command(self, command, params):
"""Call the command with given params"""
# For XML, this method will wrap/unwrap binary values
# For JSON we do that in the proxy
return command(*params)
def forward(self, name, *args, **kw):
"""
Forward call to command named ``name`` over XML-RPC.
This method will encode and forward an XML-RPC request, and will then
decode and return the corresponding XML-RPC response.
:param command: The name of the command being forwarded.
:param args: Positional arguments to pass to remote command.
:param kw: Keyword arguments to pass to remote command.
"""
server = getattr(context, 'request_url', None)
self.log.info("Forwarding '%s' to %s server '%s'",
name, self.protocol, server)
command = getattr(self.conn, name)
params = [args, kw]
try:
return self._call_command(command, params)
except Fault as e:
e = decode_fault(e)
self.debug('Caught fault %d from server %s: %s', e.faultCode,
server, e.faultString)
if e.faultCode in errors_by_code:
error = errors_by_code[e.faultCode]
raise error(message=e.faultString)
raise UnknownError(
code=e.faultCode,
error=e.faultString,
server=server,
)
except SSLError as e:
raise NetworkError(uri=server, error=str(e))
except ProtocolError as e:
# By catching a 401 here we can detect the case where we have
# a single IPA server and the session is invalid. Otherwise
# we always have to do a ping().
session_cookie = getattr(context, 'session_cookie', None)
if session_cookie and e.errcode == 401:
# Unauthorized. Remove the session and try again.
delattr(context, 'session_cookie')
try:
principal = getattr(context, 'principal', None)
delete_persistent_client_session_data(principal)
except Exception as e:
# This shouldn't happen if we have a session but it isn't fatal.
pass
# Create a new serverproxy with the non-session URI
serverproxy = self.create_connection(os.environ.get('KRB5CCNAME'), self.env.verbose, self.env.fallback, self.env.delegate)
setattr(context, self.id, Connection(serverproxy, self.disconnect))
return self.forward(name, *args, **kw)
raise NetworkError(uri=server, error=e.errmsg)
except socket.error as e:
raise NetworkError(uri=server, error=str(e))
except (OverflowError, TypeError) as e:
raise XMLRPCMarshallError(error=str(e))
class xmlclient(RPCClient):
session_path = '/ipa/session/xml'
server_proxy_class = ServerProxy
protocol = 'xml'
env_rpc_uri_key = 'xmlrpc_uri'
def _call_command(self, command, params):
version = params[1].get('version', VERSION_WITHOUT_CAPABILITIES)
params = xml_wrap(params, version)
result = command(*params)
return xml_unwrap(result)
class JSONServerProxy(object):
def __init__(self, uri, transport, encoding, verbose, allow_none):
split_uri = urllib.parse.urlsplit(uri)
if split_uri.scheme not in ("http", "https"):
raise IOError("unsupported XML-RPC protocol")
self.__host = split_uri.netloc
self.__handler = split_uri.path
self.__transport = transport
assert encoding == 'UTF-8'
assert allow_none
self.__verbose = verbose
# FIXME: Some of our code requires ServerProxy internals.
# But, xmlrpc.client.ServerProxy's _ServerProxy__transport can be accessed
# by calling serverproxy('transport')
self._ServerProxy__transport = transport
def __request(self, name, args):
print_json = self.__verbose >= 2
payload = {'method': unicode(name), 'params': args, 'id': 0}
version = args[1].get('version', VERSION_WITHOUT_CAPABILITIES)
payload = json_encode_binary(
payload, version, pretty_print=print_json)
if print_json:
root_logger.info(
'Request: %s',
payload
)
response = self.__transport.request(
self.__host,
self.__handler,
payload.encode('utf-8'),
verbose=self.__verbose >= 3,
)
if print_json:
root_logger.info(
'Response: %s',
json.dumps(json.loads(response), sort_keys=True, indent=4)
)
try:
response = json_decode_binary(response)
except ValueError as e:
raise JSONError(error=str(e))
error = response.get('error')
if error:
try:
error_class = errors_by_code[error['code']]
except KeyError:
raise UnknownError(
code=error.get('code'),
error=error.get('message'),
server=self.__host,
)
else:
kw = error.get('data', {})
kw['message'] = error['message']
raise error_class(**kw)
return response['result']
def __getattr__(self, name):
def _call(*args):
return self.__request(name, args)
return _call
class jsonclient(RPCClient):
session_path = '/ipa/session/json'
server_proxy_class = JSONServerProxy
protocol = 'json'
env_rpc_uri_key = 'jsonrpc_uri'
|
clhernandez/foris | refs/heads/master | ex2_1.py | 1 | import math
def fib(n):
i = 1
j = 0
print n
for k in xrange(0,n-1):
t = i+j
i = j
j = t
return j
def divisoresPrimos(n):
primo = 1
listPrimos = {}
resultado = False
cnt=1
while resultado != True:
cociente = n // primo
resto = n % primo
if resto == 0 and primo == 1:
listPrimos[primo]= cnt
primo = nextPrimo(primo+1)
cnt=1
else:
if resto != 0:
primo = nextPrimo(primo+1)
cnt=1
else:
if resto == 0:
listPrimos[primo]=cnt
n=cociente
cnt+=1
if cociente == 1 and resto == 0:
resultado = True
suma = 1
if len(listPrimos) == 1:
return 1
else:
del listPrimos[1]
for item in listPrimos:
suma*= (listPrimos.get(item)+1)
return suma
def divisores(n):
fin = False
cnt = 0
while fin != True:
if n // 2 == 0 :
print "nugger"
def isPrimo(n):
cnt=0
for i in xrange(1,n+1):
if n % i == 0:
cnt+=1
if cnt <= 2:
return True
else:
return False
def nextPrimo(n):
while isPrimo(n) == False:
n+=1
return n
def matFib(n):
if n <= 0:
return 0
i = n-1
mat = {'a':1,'b':0,'c':0,'d':1}
while i > 0:
if i % 2 != 0:
print "impar ",i
mat['a']= (mat['d'] * mat['b']) + (mat['c'] * mat['a'])
mat['b']=(mat['d']*(mat['b']+mat['a'])+(mat['c']*mat['b']))
mat['c'] = (mat['c']**2) + (mat['d']**2)
mat['d'] = mat['d'] * ((2* mat['c'])+mat['d'])
i = i/2
print mat
return mat['a']+mat['b']
def fibIndex(n):
if n < 1:
return 1
index = math.log(n * math.sqrt(5) + 1.0/2.0, math.pi)
return index
#for i in xrange(0,10):
#print "Print fib 100: ",matFib(2)
print fibIndex(2) |
harmslab/pdbtools | refs/heads/master | pdbtools/dist_filter.py | 1 | #!/usr/bin/env python
# Copyright 2007, Michael J. Harms
# This program is distributed under General Public License v. 3. See the file
# COPYING for a copy of the license.
__author__ = "Michael J. Harms"
__date__ = "070709"
__description__ = \
"""
pdb_dist-filter.py
Takes pdb file and calculates the distance between "residue" "atom" and all
other atoms of this type in the pdb file with "column" matching "select".
"""
from math import sqrt
def extractCoor(line):
"""
Take a line out of a pdb and extract coordinates.
"""
return [float(line[30+8*i:38+8*i]) for i in range(3)]
def distFilter(pdb,residue,atom="N",column=[60,66],select=" NA"):
"""
Calculate the distance between "residue" "atom" and the "atom" of residues
that have column defined by "column" == "select". Default is to compare
nitrogen of "residue" to nitrogens or residues with b-factor == NA.
"""
# Make sure atom entry is in correct format
atom = "%-3s" % (atom.strip())
# Grab only "atom" lines out of pdb
pdb = [l for l in pdb if l[0:4] == "ATOM" and l[13:16] == atom]
if len(pdb) == 0:
err = "pdb file does not contain any atoms of type \"%s\"" % atom
raise IOError(err)
# Pull residue coordinates
res_coord = [extractCoor(l) for l in pdb if l[22:26] == "%4i" % residue][0]
# Pull selected residue coordinates
try:
na_coord = [extractCoor(l) for l in pdb
if l[column[0]:column[1]] == select]
except IndexError:
err = "Invalid column defined by line[%i:%i]" % (column[0],column[1])
raise IOErro(err)
if len(na_coord) == 0:
err = "Column line[%i:%i] does not contain any \"%s\" entries!" % \
(column[0],column[1],select)
raise IOError(err)
# Calculate distances
dist = []
for c in na_coord:
dist.append(sqrt(sum([(c[i]-res_coord[i])**2 for i in range(3)])))
return dist
|
ostrovok-team/django-mediagenerator | refs/heads/master | blocks_project/demo/views.py | 2 | # Create your views here.
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
|
jonashagstedt/django-jsx | refs/heads/master | tests/test_template_tag.py | 2 | from django.template import RequestContext
from django.test import TestCase
from django_jsx.templatetags import djangojs
from django_jsx.templatetags.djangojs import JsMissingTemplateDirException
class TestLoader(TestCase):
def test_render_template(self):
"""
Render a template with the template tag
"""
context = RequestContext(request=None)
template = djangojs.include_js(context, template_name='test-component.js')
self.assertEqual(template, '<span>Test component</span>')
def test_try_to_render_template_without_the_js_backend(self):
"""
Raises JsMissingTemplateDirException if the backend is not specified
"""
context = RequestContext(request=None)
with self.settings(TEMPLATES=[{'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': []}]):
with self.assertRaises(JsMissingTemplateDirException):
djangojs.include_js(context, template_name='test-component.js')
|
AltSchool/django | refs/heads/master | django/conf/locale/is/__init__.py | 12133432 | |
masonm12/SavePics | refs/heads/master | savepics.py | 1 | #!/usr/bin/env python
import argparse
import os
import re
import time
import filecmp
import shutil
import glob
version = 'v2.1.0'
ignored = {
'Thumbs.db'
}
fileNumberPattern = re.compile('([0-9]+)')
argParser = argparse.ArgumentParser(prog='savepics',
description='Sorts picture files from an input directory into an output directory, using a folder structure with the pattern YYYY/YYYY.MM SUFFIX.'
)
argParser.add_argument('--version', action='version', version='%(prog)s {}'.format(version))
argParser.add_argument('-n', '--dry-run', action='store_true',
help='prints out what would be written without actually writing'
)
argParser.add_argument('-s', '--suffix', action='store', default='Pictures', const=None, nargs='?',
help='suffix to insert after folder name, defaults to %(default)s, can be overriden to nothing'
)
argParser.add_argument('-v', '--verbose', action='count',
help='enable extra output'
)
argParser.add_argument('-m', '--month', action='store', type=int, choices=range(1, 13), metavar='MONTH',
help='override the detected month of files for sorting purposes'
)
argParser.add_argument('-y', '--year', action='store', type=int,
help='override the detected year of files for sorting purposes'
)
argParser.add_argument('input', action='store',
help='directory to read and sort'
)
argParser.add_argument('output', action='store',
help='directory to write sorted pictures to'
)
def getFileNumber(filename):
"""finds the number in a picture filename"""
result = fileNumberPattern.search(filename)
return result.group(0) if result else '0'
def getFileYearMonth(filename, args):
"""gets the year and month a picture was modified (taken)"""
mtime = time.localtime(os.path.getmtime(filename))
return args.year or mtime.tm_year, args.month or mtime.tm_mon
def walkFiles(inDir):
"""will iterate over non-ignored files"""
for path, dirnames, filenames in os.walk(inDir):
for filename in sorted(filenames):
if filename in ignored:
continue
yield os.path.join(path, filename)
def getFileOutPath(filename, outDir, args):
year, month = getFileYearMonth(filename, args)
formatStr = '{}.{:02}'
if args.suffix:
formatStr += ' {}'
return os.path.join(outDir, str(year), formatStr.format(year, month, args.suffix), os.path.basename(filename))
def splitFilename(filename):
"""splits filename into dirname, basename, ext"""
dirname, basename = os.path.split(filename)
basename, ext = os.path.splitext(basename)
return dirname, basename, ext
def getNextFilename(filename):
"""when a filename already exists for another picture this function can increment the number"""
dirname, basename, ext = splitFilename(filename)
number = getFileNumber(basename)
width = len(number)
formatStr = '{:0' + str(width) + '}'
basename = basename.replace(
number,
formatStr.format(int(number) + 1)
)
return os.path.join(dirname, basename + ext)
def getOrInsert(dictObj, key, default):
value = dictObj.get(key, None)
if value is None:
dictObj[key] = default
value = default
return value
cachedBasenameMap = {}
def isFilenameUsed(filename):
"""this function will check if a basename is in use for a file means that IMG_1111.MOV will be "in use" if IMG_1111.JPG exists"""
dirname, basename, ext = splitFilename(filename)
cachedBasenames = getOrInsert(cachedBasenameMap, dirname, set())
if basename in cachedBasenames:
return True
globPattern = filename[:-(len(ext) - 1)] + '*'
for match in glob.iglob(globPattern):
cachedBasenames.add(basename)
return True
return False
def useFilename(filename):
"""updates the above cached dict with a new basename usage"""
dirname, basename, ext = splitFilename(filename)
cachedBasenameMap[dirname].add(basename)
def main():
args = argParser.parse_args()
inDir = os.path.abspath(args.input)
outDir = os.path.abspath(args.output)
copies = []
fileCount = 0
duplicates = set()
renames = set()
for filename in walkFiles(inDir):
fileCount += 1
outpath = getFileOutPath(filename, outDir, args)
# see if this name is already in use
while isFilenameUsed(outpath):
# see if this file has already been copied
if os.path.exists(outpath) and filecmp.cmp(filename, outpath, False):
duplicates.add(filename)
break
# find a new name
else:
renames.add(filename)
outpath = getNextFilename(outpath)
if filename in duplicates:
if filename in renames:
renames.remove(filename)
continue
useFilename(outpath)
copies.append((filename, outpath))
for input, output in copies:
print('Copying {} to {}.'.format(input, output))
if args.dry_run:
continue
dirname = os.path.dirname(output)
if not os.path.exists(dirname):
os.makedirs(os.path.dirname(output))
shutil.copy2(input, output)
print('{} pictures scanned.'.format(fileCount))
print('{} pictures copied.'.format(len(copies)))
print('{} duplicates found.'.format(len(duplicates)))
if args.verbose:
for duplicate in sorted(duplicates):
print('\t{}'.format(duplicate))
print('{} renames.'.format(len(renames)))
if args.verbose:
for rename in sorted(renames):
print('\t{}'.format(rename))
if __name__ == '__main__':
main() |
marxin/youtube-dl | refs/heads/master | youtube_dl/extractor/keek.py | 119 | from __future__ import unicode_literals
from .common import InfoExtractor
class KeekIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<id>\w+)'
IE_NAME = 'keek'
_TEST = {
'url': 'https://www.keek.com/ytdl/keeks/NODfbab',
'md5': '09c5c109067536c1cec8bac8c21fea05',
'info_dict': {
'id': 'NODfbab',
'ext': 'mp4',
'uploader': 'youtube-dl project',
'uploader_id': 'ytdl',
'title': 'test chars: "\'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de .',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
video_url = 'http://cdn.keek.com/keek/video/%s' % video_id
thumbnail = 'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
webpage = self._download_webpage(url, video_id)
raw_desc = self._html_search_meta('description', webpage)
if raw_desc:
uploader = self._html_search_regex(
r'Watch (.*?)\s+\(', raw_desc, 'uploader', fatal=False)
uploader_id = self._html_search_regex(
r'Watch .*?\(@(.+?)\)', raw_desc, 'uploader_id', fatal=False)
else:
uploader = None
uploader_id = None
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': self._og_search_title(webpage),
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
}
|
stweil/letsencrypt | refs/heads/master | acme/acme/jose/jwa_test.py | 66 | """Tests for acme.jose.jwa."""
import unittest
from acme import test_util
from acme.jose import errors
RSA256_KEY = test_util.load_rsa_private_key('rsa256_key.pem')
RSA512_KEY = test_util.load_rsa_private_key('rsa512_key.pem')
RSA1024_KEY = test_util.load_rsa_private_key('rsa1024_key.pem')
class JWASignatureTest(unittest.TestCase):
"""Tests for acme.jose.jwa.JWASignature."""
def setUp(self):
from acme.jose.jwa import JWASignature
class MockSig(JWASignature):
# pylint: disable=missing-docstring,too-few-public-methods
# pylint: disable=abstract-class-not-used
def sign(self, key, msg):
raise NotImplementedError() # pragma: no cover
def verify(self, key, msg, sig):
raise NotImplementedError() # pragma: no cover
# pylint: disable=invalid-name
self.Sig1 = MockSig('Sig1')
self.Sig2 = MockSig('Sig2')
def test_eq(self):
self.assertEqual(self.Sig1, self.Sig1)
def test_ne(self):
self.assertNotEqual(self.Sig1, self.Sig2)
def test_ne_other_type(self):
self.assertNotEqual(self.Sig1, 5)
def test_repr(self):
self.assertEqual('Sig1', repr(self.Sig1))
self.assertEqual('Sig2', repr(self.Sig2))
def test_to_partial_json(self):
self.assertEqual(self.Sig1.to_partial_json(), 'Sig1')
self.assertEqual(self.Sig2.to_partial_json(), 'Sig2')
def test_from_json(self):
from acme.jose.jwa import JWASignature
from acme.jose.jwa import RS256
self.assertTrue(JWASignature.from_json('RS256') is RS256)
class JWAHSTest(unittest.TestCase): # pylint: disable=too-few-public-methods
def test_it(self):
from acme.jose.jwa import HS256
sig = (
b"\xceR\xea\xcd\x94\xab\xcf\xfb\xe0\xacA.:\x1a'\x08i\xe2\xc4"
b"\r\x85+\x0e\x85\xaeUZ\xd4\xb3\x97zO"
)
self.assertEqual(HS256.sign(b'some key', b'foo'), sig)
self.assertTrue(HS256.verify(b'some key', b'foo', sig) is True)
self.assertTrue(HS256.verify(b'some key', b'foo', sig + b'!') is False)
class JWARSTest(unittest.TestCase):
def test_sign_no_private_part(self):
from acme.jose.jwa import RS256
self.assertRaises(
errors.Error, RS256.sign, RSA512_KEY.public_key(), b'foo')
def test_sign_key_too_small(self):
from acme.jose.jwa import RS256
from acme.jose.jwa import PS256
self.assertRaises(errors.Error, RS256.sign, RSA256_KEY, b'foo')
self.assertRaises(errors.Error, PS256.sign, RSA256_KEY, b'foo')
def test_rs(self):
from acme.jose.jwa import RS256
sig = (
b'|\xc6\xb2\xa4\xab(\x87\x99\xfa*:\xea\xf8\xa0N&}\x9f\x0f\xc0O'
b'\xc6t\xa3\xe6\xfa\xbb"\x15Y\x80Y\xe0\x81\xb8\x88)\xba\x0c\x9c'
b'\xa4\x99\x1e\x19&\xd8\xc7\x99S\x97\xfc\x85\x0cOV\xe6\x07\x99'
b'\xd2\xb9.>}\xfd'
)
self.assertEqual(RS256.sign(RSA512_KEY, b'foo'), sig)
self.assertTrue(RS256.verify(RSA512_KEY.public_key(), b'foo', sig))
self.assertFalse(RS256.verify(
RSA512_KEY.public_key(), b'foo', sig + b'!'))
def test_ps(self):
from acme.jose.jwa import PS256
sig = PS256.sign(RSA1024_KEY, b'foo')
self.assertTrue(PS256.verify(RSA1024_KEY.public_key(), b'foo', sig))
self.assertFalse(PS256.verify(
RSA1024_KEY.public_key(), b'foo', sig + b'!'))
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
endolith/scipy | refs/heads/master | scipy/stats/_generate_pyx.py | 7 | import pathlib
from shutil import copyfile
import subprocess
import sys
def isNPY_OLD():
'''
A new random C API was added in 1.18 and became stable in 1.19.
Prefer the new random C API when building with recent numpy.
'''
import numpy as np
ver = tuple(int(num) for num in np.__version__.split('.')[:2])
return ver < (1, 19)
def make_biasedurn():
'''Substitute True/False values for NPY_OLD Cython build variable.'''
biasedurn_base = (pathlib.Path(__file__).parent / 'biasedurn').absolute()
with open(biasedurn_base.with_suffix('.pyx.templ'), 'r') as src:
contents = src.read()
with open(biasedurn_base.with_suffix('.pyx'), 'w') as dest:
dest.write(contents.format(NPY_OLD=str(bool(isNPY_OLD()))))
def make_boost():
# Call code generator inside _boost directory
code_gen = pathlib.Path(__file__).parent / '_boost/include/code_gen.py'
subprocess.run([sys.executable, str(code_gen)], check=True)
if __name__ == '__main__':
make_biasedurn()
make_boost()
|
minhphung171093/OpenERP_V7 | refs/heads/master | openerp/addons/base/tests/test_ir_attachment.py | 68 | import hashlib
import os
import unittest2
import openerp
import openerp.tests.common
class test_ir_attachment(openerp.tests.common.TransactionCase):
def test_00_attachment_flow(self):
registry, cr, uid = self.registry, self.cr, self.uid
root_path = openerp.tools.config['root_path']
ira = registry('ir.attachment')
# Blob1
blob1 = 'blob1'
blob1_b64 = blob1.encode('base64')
blob1_hash = hashlib.sha1(blob1).hexdigest()
blob1_fname = blob1_hash[:3] + '/' + blob1_hash
# Blob2
blob2 = 'blob2'
blob2_b64 = blob2.encode('base64')
blob2_hash = hashlib.sha1(blob2).hexdigest()
blob2_fname = blob2_hash[:3] + '/' + blob2_hash
# 'ir_attachment.location' is undefined test database storage
a1 = ira.create(cr, uid, {'name': 'a1', 'datas': blob1_b64})
a1_read = ira.read(cr, uid, [a1], ['datas'])
self.assertEqual(a1_read[0]['datas'], blob1_b64)
cr.execute("select id,db_datas from ir_attachment where id = %s", (a1,) )
a1_db_datas = str(cr.fetchall()[0][1])
self.assertEqual(a1_db_datas, blob1_b64)
# define a location for filestore
registry('ir.config_parameter').set_param(cr, uid, 'ir_attachment.location', 'file:///filestore')
# Test file storage
a2 = ira.create(cr, uid, {'name': 'a2', 'datas': blob1_b64})
a2_read = ira.read(cr, uid, [a2], ['datas'])
self.assertEqual(a2_read[0]['datas'], blob1_b64)
cr.execute("select id,store_fname from ir_attachment where id = %s", (a2,) )
a2_store_fname = cr.fetchall()[0][1]
self.assertEqual(a2_store_fname, blob1_fname)
a2_fn = os.path.join(root_path, 'filestore', cr.dbname, blob1_hash[:3], blob1_hash)
fc = file(a2_fn).read()
self.assertEqual(fc, blob1)
# create a3 with same blob
a3 = ira.create(cr, uid, {'name': 'a3', 'datas': blob1_b64})
a3_read = ira.read(cr, uid, [a3], ['datas'])
self.assertEqual(a3_read[0]['datas'], blob1_b64)
cr.execute("select id,store_fname from ir_attachment where id = %s", (a3,) )
a3_store_fname = cr.fetchall()[0][1]
self.assertEqual(a3_store_fname, a2_store_fname)
# create a4 blob2
a4 = ira.create(cr, uid, {'name': 'a4', 'datas': blob2_b64})
a4_read = ira.read(cr, uid, [a4], ['datas'])
self.assertEqual(a4_read[0]['datas'], blob2_b64)
a4_fn = os.path.join(root_path, 'filestore', cr.dbname, blob2_hash[:3], blob2_hash)
self.assertTrue(os.path.isfile(a4_fn))
# delete a3 but file stays
ira.unlink(cr, uid, [a3])
self.assertTrue(os.path.isfile(a2_fn))
# delete a2 it is unlinked
ira.unlink(cr, uid, [a2])
self.assertFalse(os.path.isfile(a2_fn))
# update a4 blob2 by blob1
ira.write(cr, uid, [a4], {'datas': blob1_b64})
a4_read = ira.read(cr, uid, [a4], ['datas'])
self.assertEqual(a4_read[0]['datas'], blob1_b64)
# file of a4 disapear and a2 reappear
self.assertFalse(os.path.isfile(a4_fn))
self.assertTrue(os.path.isfile(a2_fn))
# everybody applause
|
sfepy/sfepy | refs/heads/master | sfepy/homogenization/coefs_base.py | 3 | from __future__ import absolute_import
import os
import numpy as nm
import scipy as sc
from collections.abc import Iterable
from sfepy.base.base import output, assert_, get_default, debug, Struct
from sfepy.base.timing import Timer
from sfepy.discrete.evaluate import eval_equations
from sfepy.solvers.ts import TimeStepper
from sfepy.solvers import Solver, eig
from sfepy.linalg import MatrixAction
from .utils import iter_sym, iter_nonsym, create_pis, create_scalar_pis,\
rm_multi
import six
from six.moves import range
class MiniAppBase(Struct):
def any_from_conf(name, problem, kwargs):
try:
cls = kwargs['class']
except KeyError:
raise KeyError("set 'class' for MiniApp %s!" % name)
obj = cls(name, problem, kwargs)
return obj
any_from_conf = staticmethod(any_from_conf)
def __init__(self, name, problem, kwargs):
Struct.__init__(self, name=name, problem=problem, **kwargs)
if self.problem is not None:
self.problem.clear_equations()
self.set_default('requires', [])
self.set_default('is_linear', False)
self.set_default('dtype', nm.float64)
self.set_default('term_mode', None)
self.set_default('set_volume', 'total')
# Application-specific options.
self.app_options = self.process_options()
def process_options(self):
"""
Setup application-specific options.
Subclasses should implement this method as needed.
Returns
-------
app_options : Struct instance
The application options.
"""
def init_solvers(self, problem):
"""
Setup solvers. Use local options if these are defined,
otherwise use the global ones.
For linear problems, assemble the matrix and try to presolve the
linear system.
"""
if hasattr(self, 'solvers'):
opts = self.solvers
else:
opts = problem.conf.options
problem.set_conf_solvers(problem.conf.solvers, opts)
problem.init_solvers()
if self.is_linear:
output('linear problem, trying to presolve...')
timer = Timer(start=True)
ev = problem.get_evaluator()
state = problem.create_state()
try:
mtx_a = ev.eval_tangent_matrix(state(), is_full=True)
except ValueError:
output('matrix evaluation failed, giving up...')
raise
problem.set_linear(True)
problem.try_presolve(mtx_a)
output('...done in %.2f s' % timer.stop())
else:
problem.set_linear(False)
def _get_volume(self, volume):
if isinstance(volume, dict):
return volume[self.set_volume]
else:
return volume
class CorrSolution(Struct):
"""
Class for holding solutions of corrector problems.
"""
def iter_solutions(self):
if hasattr(self, 'components'):
for indx in self.components:
key = ('%d' * len(indx)) % indx
yield key, self.states[indx]
else:
yield '', self.state
def iter_time_steps(self):
if hasattr(self, 'n_step') and self.n_step > 0:
for ii in range(self.n_step):
yield self.get_ts_val(ii)
else:
yield self
def get_ts_val(self, step):
if hasattr(self, 'states'):
states = nm.zeros(self.states.shape, dtype=nm.object)
for idx in self.components:
state = {k: v[step] for k, v in\
six.iteritems(self.states[idx])}
states[idx] = state
out = CorrSolution(name=self.name,
states=states,
components=self.components)
else:
state = {k: v[step] for k, v in six.iteritems(self.state)}
out = CorrSolution(name=self.name,
state=state)
return out
class CorrMiniApp(MiniAppBase):
def __init__(self, name, problem, kwargs):
MiniAppBase.__init__(self, name, problem, kwargs)
self.output_dir = self.problem.output_dir
self.set_default('save_name', None)
if self.save_name is not None:
self.save_name = os.path.normpath(os.path.join(self.output_dir,
self.save_name))
def setup_output(self, save_formats=None, post_process_hook=None,
file_per_var=None):
"""Instance attributes have precedence!"""
self.set_default('save_formats', save_formats)
self.set_default('post_process_hook', post_process_hook)
self.set_default('file_per_var', file_per_var)
def get_save_name_base(self):
return self.save_name
def get_save_name(self, save_format='.h5', stamp=''):
save_name_base = self.get_save_name_base()
if save_name_base is not None:
return '.'.join((save_name_base + stamp, save_format))
def get_output(self, corr_sol, is_dump=False, extend=True,
variables=None, var_map=None):
if variables is None:
variables = self.problem.get_variables()
to_output = variables.state_to_output
if is_dump:
extend = False
out = {}
for key, sol in corr_sol.iter_solutions():
for var_name in six.iterkeys(sol):
if var_name not in variables.ordered_state\
and var_map is not None\
and var_name in var_map:
vname = var_map[var_name]
else:
vname = var_name
dof_vector = sol[var_name]
if is_dump:
skey = var_name + '_' + key if key else var_name
var = variables[vname]
shape = (var.n_dof // var.n_components,
var.n_components)
out[skey] = Struct(name='dump', mode='vertex',
data=dof_vector,
shape=shape,
var_name=vname)
else:
aux = to_output(dof_vector,
var_info={vname: (True, var_name)},
extend=extend)
if self.post_process_hook is not None:
aux = self.post_process_hook(aux, self.problem,
None,
extend=extend)
for _key, val in six.iteritems(aux):
if key:
new_key = _key + '_' + key
else:
new_key = _key
out[new_key] = val
return out
def save(self, state, problem, variables=None, ts=None, var_map=None):
if ts is not None:
n_digit = int(nm.log10(ts.n_step)) + 1
time_stamp = ('_%s' % ('%%0%dd' % n_digit)) % ts.step
else:
time_stamp = ''
for save_format in self.save_formats:
if self.get_save_name_base() is not None:
if save_format in ['h5']:
save_name = self.get_save_name(save_format)
is_dump, file_per_var, extend = True, False, False,
else:
save_name = self.get_save_name(save_format, time_stamp)
file_per_var, is_dump = self.file_per_var, False
extend = not file_per_var
out = self.get_output(state, extend=extend, is_dump=is_dump,
variables=variables, var_map=var_map)
problem.save_state(save_name, out=out,
file_per_var=file_per_var, ts=ts)
class ShapeDimDim(CorrMiniApp):
def __call__(self, problem=None, data=None):
problem = get_default(problem, self.problem)
clist, pis = create_pis(problem, self.variables[0])
corr_sol = CorrSolution(name=self.name,
states=pis,
components=clist)
self.save(corr_sol, problem,
variables=problem.create_variables([self.variables[0]]))
return corr_sol
class ShapeDim(CorrMiniApp):
def __call__(self, problem=None, data=None):
problem = get_default(problem, self.problem)
clist, pis = create_scalar_pis(problem, self.variables[0])
corr_sol = CorrSolution(name=self.name,
states=pis,
components=clist)
self.save(corr_sol, problem,
variables=problem.create_variables([self.variables[0]]))
return corr_sol
class OnesDim(CorrMiniApp):
def __call__(self, problem=None, data=None):
problem = get_default(problem, self.problem)
var_name = self.variables[0]
var = problem.get_variables(auto_create=True)[var_name]
dim = problem.domain.mesh.dim
nnod = var.n_nod
e00 = nm.zeros((nnod, dim), dtype=var.dtype)
e1 = nm.ones((nnod,), dtype=var.dtype)
ones = nm.zeros((dim,), dtype=nm.object)
clist = []
for ir in range(dim):
aux = e00.copy()
aux[:,ir] = e1
ones[ir] = {var_name : nm.ascontiguousarray(aux)}
clist.append((ir,))
corr_sol = CorrSolution(name=self.name,
states=ones,
components=clist)
self.save(corr_sol, problem,
variables=problem.create_variables([self.variables[0]]))
return corr_sol
class CorrEval(CorrMiniApp):
def __call__(self, problem=None, data=None):
problem = get_default(problem, self.problem)
expr = self.expression
for req in map(rm_multi, self.requires):
expr = expr.replace(req, "data['%s']" % req)
val = eval(expr)
if type(val) is dict:
corr_sol = CorrSolution(name=self.name,
state=val)
elif type(val) is nm.ndarray:
if val.dtype == nm.object:
corr_sol = CorrSolution(name=self.name,
states=val,
components=['data'])
else:
ndof, ndim = val.shape
state = {self.variable: val.reshape((ndof * ndim,))}
corr_sol = CorrSolution(name=self.name,
state=state)
else:
corr_sol = val
cvars = problem.create_variables([self.variable])
self.save(corr_sol, problem, variables=cvars)
return corr_sol
class CorrNN(CorrMiniApp):
""" __init__() kwargs:
{
'ebcs' : [],
'epbcs' : [],
'equations' : {},
'set_variables' : None,
},
"""
def set_variables_default(variables, ir, ic, set_var, data):
for (var, req, comp) in set_var:
variables[var].set_data(data[req].states[ir,ic][comp])
set_variables_default = staticmethod(set_variables_default)
def __init__(self, name, problem, kwargs):
"""When dim is not in kwargs, problem dimension is used."""
CorrMiniApp.__init__(self, name, problem, kwargs)
self.set_default('dim', problem.get_dim())
def __call__(self, problem=None, data=None):
problem = get_default(problem, self.problem)
problem.set_equations(self.equations)
problem.select_bcs(ebc_names=self.ebcs, epbc_names=self.epbcs,
lcbc_names=self.get('lcbcs', []))
problem.update_materials(problem.ts)
self.init_solvers(problem)
variables = problem.get_variables()
states = nm.zeros((self.dim, self.dim), dtype=nm.object)
clist = []
for ir in range(self.dim):
for ic in range(self.dim):
if isinstance(self.set_variables, list):
self.set_variables_default(variables, ir, ic,
self.set_variables, data)
else:
self.set_variables(variables, ir, ic, **data)
state = problem.solve(update_materials=False)
assert_(state.has_ebc())
states[ir,ic] = state.get_parts()
clist.append((ir, ic))
corr_sol = CorrSolution(name=self.name,
states=states,
components=clist)
self.save(corr_sol, problem)
return corr_sol
class CorrN(CorrMiniApp):
def set_variables_default(variables, ir, set_var, data):
for (var, req, comp) in set_var:
variables[var].set_data(data[req].states[ir][comp])
set_variables_default = staticmethod(set_variables_default)
def __init__(self, name, problem, kwargs):
"""When dim is not in kwargs, problem dimension is used."""
CorrMiniApp.__init__(self, name, problem, kwargs)
self.set_default('dim', problem.get_dim())
def __call__(self, problem=None, data=None):
problem = get_default(problem, self.problem)
problem.set_equations(self.equations)
problem.select_bcs(ebc_names=self.ebcs, epbc_names=self.epbcs,
lcbc_names=self.get('lcbcs', []))
problem.update_materials(problem.ts)
self.init_solvers(problem)
variables = problem.get_variables()
states = nm.zeros((self.dim,), dtype=nm.object)
clist = []
for ir in range(self.dim):
if isinstance(self.set_variables, list):
self.set_variables_default(variables, ir,
self.set_variables, data)
else:
self.set_variables(variables, ir, **data)
state = problem.solve()
assert_(state.has_ebc())
states[ir] = state.get_parts()
clist.append((ir,))
corr_sol = CorrSolution(name=self.name,
states=states,
components=clist)
self.save(corr_sol, problem)
return corr_sol
class CorrDimDim(CorrNN):
pass
class CorrDim(CorrN):
pass
class CorrOne(CorrMiniApp):
def set_variables_default(variables, set_var, data):
for (var, req, comp) in set_var:
variables[var].set_data(data[req].state[comp])
set_variables_default = staticmethod(set_variables_default)
def __call__(self, problem=None, data=None):
problem = get_default(problem, self.problem)
problem.set_equations(self.equations)
problem.select_bcs(ebc_names=self.ebcs, epbc_names=self.epbcs,
lcbc_names=self.get('lcbcs', []))
problem.update_materials(problem.ts)
self.init_solvers(problem)
variables = problem.get_variables()
if hasattr(self, 'set_variables'):
if isinstance(self.set_variables, list):
self.set_variables_default(variables, self.set_variables,
data)
else:
self.set_variables(variables, **data)
state = problem.solve()
assert_(state.has_ebc())
corr_sol = CorrSolution(name=self.name,
state=state.get_parts())
self.save(corr_sol, problem)
return corr_sol
class CorrSetBCS(CorrMiniApp):
def __call__(self, problem=None, data=None):
from sfepy.base.base import select_by_names
from sfepy.discrete.variables import Variables
from sfepy.discrete.state import State
from sfepy.discrete.conditions import Conditions
problem = get_default(problem, self.problem)
conf_ebc = select_by_names(problem.conf.ebcs, self.ebcs)
conf_epbc = select_by_names(problem.conf.epbcs, self.epbcs)
ebcs = Conditions.from_conf(conf_ebc, problem.domain.regions)
epbcs = Conditions.from_conf(conf_epbc, problem.domain.regions)
conf_variables = select_by_names(problem.conf.variables, self.variable)
variables = Variables.from_conf(conf_variables, problem.fields)
variables.equation_mapping(ebcs, epbcs, problem.ts, problem.functions)
state = State(variables)
state.fill(0.0)
state.apply_ebc()
corr_sol = CorrSolution(name=self.name,
state=state.get_parts())
self.save(corr_sol, problem, variables=variables)
return corr_sol
class CorrEqPar(CorrOne):
"""
The corrector which equation can be parametrized via 'eq_pars',
the dimension is given by the number of parameters.
Example:
'equations': 'dw_diffusion.5.Y(mat.k, q, p) =
dw_surface_integrate.5.%s(q)',
'eq_pars': ('bYMp', 'bYMm'),
'class': cb.CorrEqPar,
"""
def __init__(self, name, problem, kwargs):
"""When dim is not in kwargs, problem dimension is used."""
CorrMiniApp.__init__(self, name, problem, kwargs)
self.set_default('dim', len(self.eq_pars))
def __call__(self, problem=None, data=None):
problem = get_default(problem, self.problem)
states = nm.zeros((self.dim,), dtype=nm.object)
clist = []
eqns ={}
for ir in range(self.dim):
for key_eq, val_eq in six.iteritems(self.equations):
eqns[key_eq] = val_eq % self.eq_pars[ir]
problem.set_equations(eqns)
problem.select_bcs(ebc_names=self.ebcs, epbc_names=self.epbcs,
lcbc_names=self.get('lcbcs', []))
problem.update_materials(problem.ts)
self.init_solvers(problem)
variables = problem.get_variables()
if hasattr(self, 'set_variables'):
if isinstance(self.set_variables, list):
self.set_variables_default(variables, self.set_variables,
data)
else:
self.set_variables(variables, **data)
state = problem.solve()
assert_(state.has_ebc())
states[ir] = state.get_parts()
clist.append((ir,))
corr_sol = CorrSolution(name=self.name,
states=states,
components=clist)
self.save(corr_sol, problem)
return corr_sol
class PressureEigenvalueProblem(CorrMiniApp):
"""Pressure eigenvalue problem solver for time-dependent correctors."""
def presolve(self, mtx):
"""Prepare A^{-1} B^T for the Schur complement."""
mtx_a = mtx['A']
mtx_bt = mtx['BT']
output('full A size: %.3f MB' % (8.0 * nm.prod(mtx_a.shape) / 1e6))
output('full B size: %.3f MB' % (8.0 * nm.prod(mtx_bt.shape) / 1e6))
ls = Solver.any_from_conf(self.problem.ls_conf
+ Struct(use_presolve=True), mtx=mtx_a)
if self.mode == 'explicit':
timer = Timer(start=True)
mtx_aibt = nm.zeros(mtx_bt.shape, dtype=mtx_bt.dtype)
for ic in range(mtx_bt.shape[1]):
mtx_aibt[:,ic] = ls(mtx_bt[:,ic].toarray().squeeze())
output('mtx_aibt: %.2f s' % timer.stop())
action_aibt = MatrixAction.from_array(mtx_aibt)
else:
##
# c: 30.08.2007, r: 13.02.2008
def fun_aibt(vec):
# Fix me for sparse mtx_bt...
rhs = sc.dot(mtx_bt, vec)
out = ls(rhs)
return out
action_aibt = MatrixAction.from_function(fun_aibt,
(mtx_a.shape[0],
mtx_bt.shape[1]),
nm.float64)
mtx['action_aibt'] = action_aibt
def solve_pressure_eigenproblem(self, mtx, eig_problem=None,
n_eigs=0, check=False):
"""G = B*AI*BT or B*AI*BT+D"""
def get_slice(n_eigs, nn):
if n_eigs > 0:
ii = slice(0, n_eigs)
elif n_eigs < 0:
ii = slice(nn + n_eigs, nn)
else:
ii = slice(0, 0)
return ii
eig_problem = get_default(eig_problem, self.eig_problem)
n_eigs = get_default(n_eigs, self.n_eigs)
check = get_default(check, self.check)
mtx_c, mtx_b, action_aibt = mtx['C'], mtx['B'], mtx['action_aibt']
mtx_g = mtx_b * action_aibt.to_array() # mtx_b must be sparse!
if eig_problem == 'B*AI*BT+D':
mtx_g += mtx['D'].toarray()
mtx['G'] = mtx_g
output(mtx_c.shape, mtx_g.shape)
eigs, mtx_q = eig(mtx_c.toarray(), mtx_g, method='eig.sgscipy')
if check:
ee = nm.diag(sc.dot(mtx_q.T * mtx_c, mtx_q)).squeeze()
oo = nm.diag(sc.dot(sc.dot(mtx_q.T, mtx_g), mtx_q)).squeeze()
try:
assert_(nm.allclose(ee, eigs))
assert_(nm.allclose(oo, nm.ones_like(eigs)))
except ValueError:
debug()
nn = mtx_c.shape[0]
if isinstance(n_eigs, tuple):
output('required number of eigenvalues: (%d, %d)' % n_eigs)
if sum(n_eigs) < nn:
ii0 = get_slice(n_eigs[0], nn)
ii1 = get_slice(-n_eigs[1], nn)
eigs = nm.concatenate((eigs[ii0], eigs[ii1]))
mtx_q = nm.concatenate((mtx_q[:,ii0], mtx_q[:,ii1]), 1)
else:
output('required number of eigenvalues: %d' % n_eigs)
if (n_eigs != 0) and (abs(n_eigs) < nn):
ii = get_slice(n_eigs, nn)
eigs = eigs[ii]
mtx_q = mtx_q[:,ii]
out = Struct(eigs=eigs, mtx_q=mtx_q)
return out
def __call__(self, problem=None, data=None):
problem = get_default(problem, self.problem)
problem.set_equations(self.equations)
problem.select_bcs(ebc_names=self.ebcs, epbc_names=self.epbcs,
lcbc_names=self.get('lcbcs', []))
problem.update_materials()
mtx = problem.equations.eval_tangent_matrices(problem.create_state()(),
problem.mtx_a,
by_blocks=True)
self.presolve(mtx)
evp = self.solve_pressure_eigenproblem(mtx)
return Struct(name=self.name, ebcs=self.ebcs, epbcs=self.epbcs,
mtx=mtx, evp=evp)
class TCorrectorsViaPressureEVP(CorrMiniApp):
"""
Time correctors via the pressure eigenvalue problem.
"""
def setup_equations(self, equations, problem=None):
"""
Set equations, update boundary conditions and materials.
"""
problem = get_default(problem, self.problem)
problem.set_equations(equations)
problem.select_bcs(ebc_names=self.ebcs, epbc_names=self.epbcs,
lcbc_names=self.get('lcbcs', []))
problem.update_materials() # Assume parameters constant in time.
def compute_correctors(self, evp, sign, state0, ts,
problem=None, vec_g=None):
problem = get_default(problem, self.problem)
eigs = evp.evp.eigs
mtx_q = evp.evp.mtx_q
mtx = evp.mtx
nr, nc = mtx_q.shape
if vec_g is not None:
output('nonzero pressure EBC: max = %e, min = %e' \
% (vec_g.max(), vec_g.min()))
one = nm.ones((nc,), dtype=nm.float64)
vu, vp = self.up_variables
variables = problem.get_variables()
var_u = variables[vu]
var_p = variables[vp]
##
# follow_epbc = False -> R1 = - R2 as required. ? for other correctors?
vec_p0 = sign * var_p.get_reduced(state0[vp], follow_epbc=False)
# xi0 = Q^{-1} p(0) = Q^T G p(0)
vec_xi0 = sc.dot(mtx_q.T, sc.dot(mtx['G'],
vec_p0[:,nm.newaxis])).squeeze()
action_aibt = mtx['action_aibt']
e_e_qg = 0.0
iee_e_qg = 0.0
format = '====== time %%e (step %%%dd of %%%dd) ====='\
% ((ts.n_digit,) * 2)
vu, vp = self.up_variables
state = {k: [] for k in [vu, vp, 'd' + vp]}
for step, time in ts:
output(format % (time, step + 1, ts.n_step))
e_e = nm.exp(- eigs * time)
e_e_qp = e_e * vec_xi0 # exp(-Et) Q^{-1} p(0)
if vec_g is not None:
Qg = sc.dot(mtx_q.T, vec_g)
e_e_qg = e_e * Qg
iee_e_qg = ((one - e_e) / eigs) * Qg
vec_p = sc.dot(mtx_q, e_e_qp + iee_e_qg)
vec_dp = - sc.dot(mtx_q, (eigs * e_e_qp - e_e_qg))
vec_u = action_aibt(vec_dp)
vec_u = var_u.get_full(vec_u)
vec_p = var_p.get_full(vec_p)
# BC nodes - time derivative of constant is zero!
vec_dp = var_p.get_full(vec_dp, force_value=0.0)
state[vu].append(vec_u)
state[vp].append(vec_p)
state['d' + vp].append(vec_dp)
return {k: nm.asarray(v) for k, v in state.items()}
def save(self, corrs, problem, ts):
ts0 = TimeStepper(0, 1)
ts0.set_from_ts(ts, step=0)
_, vp = self.up_variables
for step, _ in ts0:
icorrs = corrs.get_ts_val(step)
super(TCorrectorsViaPressureEVP, self).save(icorrs, problem, ts=ts0,
var_map={'d' + vp: vp})
def create_ts_coef(cls):
"""
Define a new class with modified call method which accepts
time dependent data (correctors).
"""
class TSCoef(cls):
def __call__(self, volume=None, problem=None, data=None):
problem = get_default(problem, self.problem)
ts_keys = []
ts_data = {}
n_step = None
for key, val in six.iteritems(data):
if isinstance(val, CorrSolution) and hasattr(val, 'n_step'):
if n_step is None:
n_step = val.n_step
else:
if not(n_step == val.n_step):
raise ValueError('incorrect number of time' +\
'steps in %s!' % self.name)
ts_keys.append(key)
else:
ts_data[key] = val
if n_step is None:
raise ValueError('no time steps found in %s!' % self.name)
n_digit = int(nm.log10(n_step))+1
format = '====== step %%%dd of %%%dd =====' % ((n_digit,) * 2)
out = []
for step in range(n_step):
output(format % (step + 1, n_step))
for key in ts_keys:
ts_data[key] = data[key].get_ts_val(step)
out.append(cls.__call__(self, volume, problem, ts_data))
out = nm.asarray(out)
sh = out.shape
if len(sh) == 2:
out = out.reshape((sh[0], 1, sh[1]))
elif len(sh) == 1:
out = out.reshape((sh[0], 1, 1))
return out
return TSCoef
class CoefDummy(MiniAppBase):
"""
Dummy class serving for computing and returning its requirements.
"""
def __call__(self, volume=None, problem=None, data=None):
return data
class TSTimes(MiniAppBase):
"""Coefficient-like class, returns times of the time stepper."""
def __call__(self, volume=None, problem=None, data=None):
problem = get_default(problem, self.problem)
problem.init_solvers()
return problem.get_timestepper().times
class VolumeFractions(MiniAppBase):
"""Coefficient-like class, returns volume fractions of given regions within
the whole domain."""
def __call__(self, volume=None, problem=None, data=None):
problem = get_default(problem, self.problem)
vf = {}
for region_name in self.regions:
vkey = 'volume_%s' % region_name
key = 'fraction_%s' % region_name
equations, variables = problem.create_evaluable(
self.expression % region_name)
val = eval_equations(equations, variables).real
vf[vkey] = nm.asarray(val, dtype=nm.float64)
vf[key] = vf[vkey] / self._get_volume(volume)
return vf
class CoefMN(MiniAppBase):
@staticmethod
def set_variables_default(variables, ir, ic, mode, set_var, data, dtype):
def get_corr_state(corr, ir, ic):
if hasattr(corr, 'states'):
if ir is None:
return corr.states[ic]
elif ic is None:
return corr.states[ir]
else:
return corr.states[ir, ic]
else:
return corr.state
if mode == 'row_only':
act_set_var = set_var
else:
mode2var = {'row': 0, 'col': 1}
aux = set_var[mode2var[mode]]
act_set_var = aux[:] if isinstance(aux, list) else [aux]
act_set_var += set_var[2:]
for (var, req, comp) in act_set_var:
if type(req) is tuple:
val = get_corr_state(data[req[0]], ir, ic)[comp].copy()
val = nm.asarray(val, dtype=dtype)
for ii in req[1:]:
val += get_corr_state(data[ii], ir, ic)[comp]
else:
val = get_corr_state(data[req], ir, ic)[comp]
variables[var].set_data(val)
def __init__(self, name, problem, kwargs):
"""When dim is not in kwargs, problem dimension is used."""
MiniAppBase.__init__(self, name, problem, kwargs)
self.set_default('dim', problem.get_dim())
def get_coef(self, row, col, volume, problem, data):
problem = get_default(problem, self.problem)
term_mode = self.term_mode
equations, variables = problem.create_evaluable(self.expression,
term_mode=term_mode)
coef = nm.zeros((len(row), len(col)), dtype=self.dtype)
for ir, (irr, icr) in enumerate(row):
if isinstance(self.set_variables, list):
self.set_variables_default(variables, irr, icr, 'row',
self.set_variables, data,
self.dtype)
else:
self.set_variables(variables, irr, icr, 'row', **data)
for ic, (irc, icc) in enumerate(col):
if isinstance(self.set_variables, list):
self.set_variables_default(variables, irc, icc, 'col',
self.set_variables, data,
self.dtype)
else:
self.set_variables(variables, irc, icc, 'col', **data)
val = eval_equations(equations, variables, term_mode=term_mode)
coef[ir, ic] = val
coef /= self._get_volume(volume)
return coef
def __call__(self, volume, problem=None, data=None):
if isinstance(self.dim, Iterable) and len(self.dim) >= 2:
dim1, dim2 = self.dim[:2]
else:
dim1 = dim2 = self.dim
row = [(ii, None) for ii in range(dim1)]
col = [(None, ii) for ii in range(dim2)]
return self.get_coef(row, col, volume, problem, data)
class CoefDimDim(CoefMN):
pass
class CoefSymSym(CoefMN):
iter_sym = staticmethod(iter_sym)
is_sym = True
def __call__(self, volume, problem=None, data=None):
problem = get_default(problem, self.problem)
isym = [ii for ii in self.iter_sym(problem.get_dim())]
return self.get_coef(isym, isym, volume, problem, data)
class CoefNonSymNonSym(CoefSymSym):
iter_sym = staticmethod(iter_nonsym)
is_sym = False
class CoefDimSym(CoefMN):
def __call__(self, volume, problem=None, data=None):
problem = get_default(problem, self.problem)
dim = problem.get_dim()
row = [(ii, None) for ii in range(dim)]
col = [ii for ii in iter_sym(dim)]
return self.get_coef(row, col, volume, problem, data)
class CoefN(CoefMN):
@staticmethod
def set_variables_default(variables, ir, ic, mode, set_var, data, dtype):
mode = mode + '_only'
CoefMN.set_variables_default(variables, ir, ic, mode, set_var, data,
dtype)
def get_coef(self, row, volume, problem, data):
problem = get_default(problem, self.problem)
term_mode = self.term_mode
equations, variables = problem.create_evaluable(self.expression,
term_mode=term_mode)
coef = nm.zeros((len(row),), dtype=self.dtype)
for ii, (ir, ic) in enumerate(row):
if isinstance(self.set_variables, list):
self.set_variables_default(variables, ir, ic, 'row',
self.set_variables, data, self.dtype)
else:
self.set_variables(variables, ir, ic, 'row', **data)
val = eval_equations(equations, variables, term_mode=term_mode)
coef[ii] = val
coef /= self._get_volume(volume)
return coef
def __call__(self, volume, problem=None, data=None):
row = [(ii, None) for ii in range(self.dim)]
return self.get_coef(row, volume, problem, data)
class CoefDim(CoefN):
pass
class CoefSym(CoefN):
iter_sym = staticmethod(iter_sym)
is_sym = True
def __call__(self, volume, problem=None, data=None):
problem = get_default(problem, self.problem)
isym = [ii for ii in self.iter_sym(problem.get_dim())]
return self.get_coef(isym, volume, problem, data)
class CoefNonSym(CoefSym):
iter_sym = staticmethod(iter_nonsym)
is_sym = False
class CoefOne(MiniAppBase):
def set_variables_default(variables, set_var, data, dtype):
for (var, req, comp) in set_var:
if type(req) is tuple:
val = data[req[0]].state[comp].copy()
val = nm.asarray(val, dtype=dtype)
for ii in req[1:]:
val += data[ii].state[comp]
else:
val = data[req].state[comp]
variables[var].set_data(val)
set_variables_default = staticmethod(set_variables_default)
def __call__(self, volume, problem=None, data=None):
problem = get_default(problem, self.problem)
term_mode = self.term_mode
equations, variables = problem.create_evaluable(self.expression,
term_mode=term_mode)
if hasattr(self, 'set_variables'):
if isinstance(self.set_variables, list):
self.set_variables_default(variables, self.set_variables,
data, self.dtype)
else:
self.set_variables(variables, **data)
val = eval_equations(equations, variables,
term_mode=term_mode)
coef = val / self._get_volume(volume)
return coef
class CoefSum(MiniAppBase):
def __call__(self, volume, problem=None, data=None):
coef = nm.zeros_like(data[self.requires[0]])
for req in map(rm_multi, self.requires):
coef += data[req]
return coef
class CoefEval(MiniAppBase):
"""
Evaluate expression.
"""
def __call__(self, volume, problem=None, data=None):
expr = self.expression
for req in map(rm_multi, self.requires):
expr = expr.replace(req, "data['%s']" % req)
coef = eval(expr)
return coef
class CoefNone(MiniAppBase):
def __call__(self, volume, problem=None, data=None):
coef = 0.0
return coef
class CoefExprPar(MiniAppBase):
"""
The coefficient which expression can be parametrized via 'expr_pars',
the dimension is given by the number of parameters.
Example:
'expression': 'dw_surface_ndot.5.Ys(mat_norm.k%d, corr1)',
'expr_pars': [ii for ii in range(dim)],
'class': cb.CoefExprPar,
"""
def set_variables_default(variables, ir, set_var, data):
for (var, req, comp) in set_var:
if hasattr(data[req], 'states'):
variables[var].set_data(data[req].states[ir][comp])
else:
variables[var].set_data(data[req].state[comp])
set_variables_default = staticmethod(set_variables_default)
def __init__(self, name, problem, kwargs):
"""When dim is not in kwargs, problem dimension is used."""
MiniAppBase.__init__(self, name, problem, kwargs)
dim = len(self.expr_pars)
self.set_default('dim', dim)
def __call__(self, volume, problem=None, data=None):
problem = get_default(problem, self.problem)
coef = nm.zeros((self.dim,), dtype=self.dtype)
term_mode = self.term_mode
for ir in range(self.dim):
expression = self.expression % self.expr_pars[ir]
equations, variables = \
problem.create_evaluable(expression, term_mode=term_mode)
if isinstance(self.set_variables, list):
self.set_variables_default(variables, ir, self.set_variables,
data)
else:
self.set_variables(variables, ir, **data)
val = eval_equations(equations, variables,
term_mode=term_mode)
coef[ir] = val
coef /= self._get_volume(volume)
return coef
|
DLR-SC/tigl | refs/heads/master | thirdparty/googletest/googletest/test/googletest-param-test-invalid-name2-test.py | 122 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
import gtest_test_utils
binary_name = 'googletest-param-test-invalid-name2-test_'
COMMAND = gtest_test_utils.GetTestExecutablePath(binary_name)
def Assert(condition):
if not condition:
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
err = ('Duplicate parameterized test name \'a\'')
p = gtest_test_utils.Subprocess(command)
Assert(p.terminated_by_signal)
# Check for appropriate output
Assert(err in p.output)
class GTestParamTestInvalidName2Test(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
|
tboyce1/home-assistant | refs/heads/dev | homeassistant/components/snips.py | 4 | """
Support for Snips on-device ASR and NLU.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/snips/
"""
import asyncio
import json
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.helpers import intent, config_validation as cv
import homeassistant.components.mqtt as mqtt
DOMAIN = 'snips'
DEPENDENCIES = ['mqtt']
CONF_INTENTS = 'intents'
CONF_ACTION = 'action'
SERVICE_SAY = 'say'
SERVICE_SAY_ACTION = 'say_action'
INTENT_TOPIC = 'hermes/intent/#'
ATTR_TEXT = 'text'
ATTR_SITE_ID = 'site_id'
ATTR_CUSTOM_DATA = 'custom_data'
ATTR_CAN_BE_ENQUEUED = 'can_be_enqueued'
ATTR_INTENT_FILTER = 'intent_filter'
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {}
}, extra=vol.ALLOW_EXTRA)
INTENT_SCHEMA = vol.Schema({
vol.Required('input'): str,
vol.Required('intent'): {
vol.Required('intentName'): str
},
vol.Optional('slots'): [{
vol.Required('slotName'): str,
vol.Required('value'): {
vol.Required('kind'): str,
vol.Optional('value'): cv.match_all,
vol.Optional('rawValue'): cv.match_all
}
}]
}, extra=vol.ALLOW_EXTRA)
SERVICE_SCHEMA_SAY = vol.Schema({
vol.Required(ATTR_TEXT): str,
vol.Optional(ATTR_SITE_ID, default='default'): str,
vol.Optional(ATTR_CUSTOM_DATA, default=''): str
})
SERVICE_SCHEMA_SAY_ACTION = vol.Schema({
vol.Required(ATTR_TEXT): str,
vol.Optional(ATTR_SITE_ID, default='default'): str,
vol.Optional(ATTR_CUSTOM_DATA, default=''): str,
vol.Optional(ATTR_CAN_BE_ENQUEUED, default=True): cv.boolean,
vol.Optional(ATTR_INTENT_FILTER): vol.All(cv.ensure_list),
})
@asyncio.coroutine
def async_setup(hass, config):
"""Activate Snips component."""
@asyncio.coroutine
def message_received(topic, payload, qos):
"""Handle new messages on MQTT."""
_LOGGER.debug("New intent: %s", payload)
try:
request = json.loads(payload)
except TypeError:
_LOGGER.error('Received invalid JSON: %s', payload)
return
try:
request = INTENT_SCHEMA(request)
except vol.Invalid as err:
_LOGGER.error('Intent has invalid schema: %s. %s', err, request)
return
if request['intent']['intentName'].startswith('user_'):
intent_type = request['intent']['intentName'].split('__')[-1]
else:
intent_type = request['intent']['intentName'].split(':')[-1]
snips_response = None
slots = {}
for slot in request.get('slots', []):
slots[slot['slotName']] = {'value': resolve_slot_values(slot)}
try:
intent_response = yield from intent.async_handle(
hass, DOMAIN, intent_type, slots, request['input'])
if 'plain' in intent_response.speech:
snips_response = intent_response.speech['plain']['speech']
except intent.UnknownIntent as err:
_LOGGER.warning("Received unknown intent %s",
request['intent']['intentName'])
except intent.IntentError:
_LOGGER.exception("Error while handling intent: %s.", intent_type)
if snips_response:
notification = {'sessionId': request.get('sessionId', 'default'),
'text': snips_response}
_LOGGER.debug("send_response %s", json.dumps(notification))
mqtt.async_publish(hass, 'hermes/dialogueManager/endSession',
json.dumps(notification))
yield from hass.components.mqtt.async_subscribe(
INTENT_TOPIC, message_received)
@asyncio.coroutine
def snips_say(call):
"""Send a Snips notification message."""
notification = {'siteId': call.data.get(ATTR_SITE_ID, 'default'),
'customData': call.data.get(ATTR_CUSTOM_DATA, ''),
'init': {'type': 'notification',
'text': call.data.get(ATTR_TEXT)}}
mqtt.async_publish(hass, 'hermes/dialogueManager/startSession',
json.dumps(notification))
return
@asyncio.coroutine
def snips_say_action(call):
"""Send a Snips action message."""
notification = {'siteId': call.data.get(ATTR_SITE_ID, 'default'),
'customData': call.data.get(ATTR_CUSTOM_DATA, ''),
'init': {'type': 'action',
'text': call.data.get(ATTR_TEXT),
'canBeEnqueued': call.data.get(
ATTR_CAN_BE_ENQUEUED, True),
'intentFilter':
call.data.get(ATTR_INTENT_FILTER, [])}}
mqtt.async_publish(hass, 'hermes/dialogueManager/startSession',
json.dumps(notification))
return
hass.services.async_register(
DOMAIN, SERVICE_SAY, snips_say,
schema=SERVICE_SCHEMA_SAY)
hass.services.async_register(
DOMAIN, SERVICE_SAY_ACTION, snips_say_action,
schema=SERVICE_SCHEMA_SAY_ACTION)
return True
def resolve_slot_values(slot):
"""Convert snips builtin types to usable values."""
if 'value' in slot['value']:
value = slot['value']['value']
else:
value = slot['rawValue']
if slot.get('entity') == "snips/duration":
delta = timedelta(weeks=slot['value']['weeks'],
days=slot['value']['days'],
hours=slot['value']['hours'],
minutes=slot['value']['minutes'],
seconds=slot['value']['seconds'])
value = delta.seconds
return value
|
duncanHsu/CowMQ-Python | refs/heads/master | cow_mq/__init__.py | 12133432 | |
tralamazza/micropython | refs/heads/master | tests/cpydiff/modules2/subpkg/bar.py | 12133432 | |
eneldoserrata/marcos_openerp | refs/heads/master | addons/report_geraldo/lib/geraldo/site/newsite/site-geraldo/django/contrib/localflavor/no/__init__.py | 12133432 | |
varunarya10/rally | refs/heads/master | tests/unit/benchmark/__init__.py | 12133432 | |
jpirates1/Django-python-pro | refs/heads/master | venv/lib/python2.7/site-packages/setuptools/command/build_py.py | 301 | from glob import glob
from distutils.util import convert_path
import distutils.command.build_py as orig
import os
import sys
import fnmatch
import textwrap
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
class build_py(orig.build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
orig.build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = (self.distribution.exclude_package_data or
{})
if 'data_files' in self.__dict__:
del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
if attr == 'data_files': # lazily compute data files
self.data_files = files = self._get_data_files()
return files
return orig.build_py.__getattr__(self, attr)
def build_module(self, module, module_file, package):
outfile, copied = orig.build_py.build_module(self, module, module_file,
package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
data = []
for package in self.packages or ():
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = len(src_dir) + 1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = self.manifest_files.get(package, [])[:]
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
if (copied and
srcfile in self.distribution.convert_2to3_doctests):
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d, f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d != prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f == oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d], []).append(path)
def get_data_files(self):
pass # Lazily compute data files in _get_data_files() function.
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = orig.build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg == package or pkg.startswith(package + '.'):
break
else:
return init_py
f = open(init_py, 'rbU')
if 'declare_namespace'.encode() not in f.read():
from distutils.errors import DistutilsError
raise DistutilsError(
"Namespace package problem: %s is a namespace package, but "
"its\n__init__.py does not call declare_namespace()! Please "
'fix it.\n(See the setuptools manual under '
'"Namespace Packages" for details.)\n"' % (package,)
)
f.close()
return init_py
def initialize_options(self):
self.packages_checked = {}
orig.build_py.initialize_options(self)
def get_package_dir(self, package):
res = orig.build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
globs = (self.exclude_package_data.get('', [])
+ self.exclude_package_data.get(package, []))
bad = []
for pattern in globs:
bad.extend(
fnmatch.filter(
files, os.path.join(src_dir, convert_path(pattern))
)
)
bad = dict.fromkeys(bad)
seen = {}
return [
f for f in files if f not in bad
and f not in seen and seen.setdefault(f, 1) # ditch dupes
]
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
|
jeremycline/pulp | refs/heads/master | playpen/deploy/utils/__init__.py | 12133432 | |
dusenberrymw/incubator-systemml | refs/heads/master | src/main/python/systemml/random/sampling.py | 13 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = ['normal', 'uniform', 'poisson']
from ..defmatrix import *
# Special object used internally to specify the placeholder which will be replaced by output ID
# This helps to provide dml containing output ID in constructSamplingNode
OUTPUT_ID = '$$OutputID$$'
def constructSamplingNode(inputs, dml):
"""
Convenient utility to create an intermediate of AST.
Parameters
----------
inputs = list of input matrix objects and/or DMLOp
dml = list of DML string (which will be eventually joined before execution). To specify out.ID, please use the placeholder
"""
dmlOp = DMLOp(inputs)
out = matrix(None, op=dmlOp)
dmlOp.dml = [out.ID if x==OUTPUT_ID else x for x in dml]
return out
INPUTS = []
def asStr(arg):
"""
Internal use only: Convenient utility to update inputs and return appropriate string value
"""
if isinstance(arg, matrix):
INPUTS = INPUTS + [ arg ]
return arg.ID
else:
return str(arg)
def normal(loc=0.0, scale=1.0, size=(1,1), sparsity=1.0):
"""
Draw random samples from a normal (Gaussian) distribution.
Parameters
----------
loc: Mean ("centre") of the distribution.
scale: Standard deviation (spread or "width") of the distribution.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.normal(loc=3, scale=2, size=(3,3))
>>> m1.toNumPy()
array([[ 3.48857226, 6.17261819, 2.51167259],
[ 3.60506708, -1.90266305, 3.97601633],
[ 3.62245706, 5.9430881 , 2.53070413]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
loc = asStr(loc)
scale = asStr(scale)
sparsity = asStr(sparsity)
# loc + scale*standard normal
return constructSamplingNode(INPUTS, [OUTPUT_ID, ' = ', loc,' + ', scale,' * random.normal(', rows, ',', cols, ',', sparsity, ')\n'])
def uniform(low=0.0, high=1.0, size=(1,1), sparsity=1.0):
"""
Draw samples from a uniform distribution.
Parameters
----------
low: Lower boundary of the output interval.
high: Upper boundary of the output interval.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.uniform(size=(3,3))
>>> m1.toNumPy()
array([[ 0.54511396, 0.11937437, 0.72975775],
[ 0.14135946, 0.01944448, 0.52544478],
[ 0.67582422, 0.87068849, 0.02766852]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
low = asStr(low)
high = asStr(high)
sparsity = asStr(sparsity)
return constructSamplingNode(INPUTS, [OUTPUT_ID, ' = random.uniform(', rows, ',', cols, ',', sparsity, ',', low, ',', high, ')\n'])
def poisson(lam=1.0, size=(1,1), sparsity=1.0):
"""
Draw samples from a Poisson distribution.
Parameters
----------
lam: Expectation of interval, should be > 0.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.poisson(lam=1, size=(3,3))
>>> m1.toNumPy()
array([[ 1., 0., 2.],
[ 1., 0., 0.],
[ 0., 0., 0.]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
lam = asStr(lam)
sparsity = asStr(sparsity)
return constructSamplingNode(INPUTS, [OUTPUT_ID, ' = random.poisson(', rows, ',', cols, ',', sparsity, ',', lam, ')\n'])
|
donkirkby/django | refs/heads/master | django/contrib/gis/gdal/field.py | 355 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
This class wraps an OGR Field, and needs to be instantiated
from a Feature object.
"""
def __init__(self, feat, index):
"""
Initializes on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self, is_64=False):
"Retrieves the Field's value as an integer."
if is_64:
return capi.get_field_as_integer64(self._feat.ptr, self._index)
else:
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException('Unable to retrieve date & time information from the field.')
# #### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
_bit64 = False
@property
def value(self):
"Returns an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int(self._bit64)
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, GDALException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTInteger64(OFTInteger):
_bit64 = True
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
class OFTInteger64List(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
# New 64-bit integer types in GDAL 2
12: OFTInteger64,
13: OFTInteger64List,
}
ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
|
JazzeYoung/VeryDeepAutoEncoder | refs/heads/master | pylearn2/pylearn2/costs/mlp/missing_target_cost.py | 34 | """
The MissingTargetCost class.
"""
__author__ = 'Vincent Archambault-Bouffard'
from functools import wraps
import theano.tensor as T
from pylearn2.costs.cost import Cost
from pylearn2.space import CompositeSpace
class MissingTargetCost(Cost):
"""
Dropout but with some targets optionally missing. The missing target is
indicated by a value of -1.
Parameters
----------
dropout_args : WRITEME
"""
supervised = True
def __init__(self, dropout_args=None):
self.__dict__.update(locals())
del self.self
@wraps(Cost.expr)
def expr(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
(X, Y) = data
if self.dropout_args:
Y_hat = model.dropout_fprop(X, **self.dropout_args)
else:
Y_hat = model.fprop(X)
costMatrix = model.layers[-1].cost_matrix(Y, Y_hat)
# This sets to zero all elements where Y == -1
costMatrix *= T.neq(Y, -1)
return model.cost_from_cost_matrix(costMatrix)
@wraps(Cost.get_data_specs)
def get_data_specs(self, model):
space = CompositeSpace([model.get_input_space(),
model.get_output_space()])
sources = (model.get_input_source(), model.get_target_source())
return (space, sources)
|
grahamu/django-test-plus | refs/heads/master | test_plus/test.py | 1 | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.core.exceptions import ImproperlyConfigured
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import Q
from django.shortcuts import resolve_url
from django.test import RequestFactory, signals, TestCase as DjangoTestCase
from django.test.client import store_rendered_templates
from django.test.utils import CaptureQueriesContext
from django.utils.functional import curry
from .compat import reverse, NoReverseMatch, APIClient
class NoPreviousResponse(Exception):
pass
# Build a real context
User = get_user_model()
CAPTURE = True
class _AssertNumQueriesLessThanContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super(_AssertNumQueriesLessThanContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertNumQueriesLessThanContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertTrue(
executed < self.num, "%d queries executed, expected less than %d" % (
executed, self.num
)
)
class login(object):
"""
A useful login context for Django tests. If the first argument is
a User, we will login with that user's username. If no password is
given we will use 'password'.
"""
def __init__(self, testcase, *args, **credentials):
self.testcase = testcase
if args and isinstance(args[0], User):
USERNAME_FIELD = getattr(User, 'USERNAME_FIELD', 'username')
credentials.update({
USERNAME_FIELD: getattr(args[0], USERNAME_FIELD),
})
if not credentials.get('password', False):
credentials['password'] = 'password'
success = testcase.client.login(**credentials)
self.testcase.assertTrue(
success,
"login failed with credentials=%r" % (credentials)
)
def __enter__(self):
pass
def __exit__(self, *args):
self.testcase.client.logout()
class TestCase(DjangoTestCase):
"""
Django TestCase with helpful additional features
"""
user_factory = None
def __init__(self, *args, **kwargs):
self.last_response = None
super(TestCase, self).__init__(*args, **kwargs)
def tearDown(self):
self.client.logout()
def print_form_errors(self, response_or_form=None):
"""A utility method for quickly debugging responses with form errors."""
if response_or_form is None:
response_or_form = self.last_response
if hasattr(response_or_form, 'errors'):
form = response_or_form
elif hasattr(response_or_form, 'context'):
form = response_or_form.context['form']
else:
raise Exception('print_form_errors requires the response_or_form argument to either be a Django http response or a form instance.')
print(form.errors.as_text())
def request(self, method_name, url_name, *args, **kwargs):
"""
Request url by name using reverse() through method
If reverse raises NoReverseMatch attempt to use it as a URL.
"""
follow = kwargs.pop("follow", False)
extra = kwargs.pop("extra", {})
data = kwargs.pop("data", {})
valid_method_names = [
'get',
'post',
'put',
'patch',
'head',
'trace',
'options',
'delete'
]
if method_name in valid_method_names:
method = getattr(self.client, method_name)
else:
raise LookupError("Cannot find the method {0}".format(method_name))
try:
self.last_response = method(reverse(url_name, args=args, kwargs=kwargs), data=data, follow=follow, **extra)
except NoReverseMatch:
self.last_response = method(url_name, data=data, follow=follow, **extra)
self.context = self.last_response.context
return self.last_response
def get(self, url_name, *args, **kwargs):
return self.request('get', url_name, *args, **kwargs)
def post(self, url_name, *args, **kwargs):
return self.request('post', url_name, *args, **kwargs)
def put(self, url_name, *args, **kwargs):
return self.request('put', url_name, *args, **kwargs)
def patch(self, url_name, *args, **kwargs):
return self.request('patch', url_name, *args, **kwargs)
def head(self, url_name, *args, **kwargs):
return self.request('head', url_name, *args, **kwargs)
# def trace(self, url_name, *args, **kwargs):
# if LooseVersion(django.get_version()) >= LooseVersion('1.8.2'):
# return self.request('trace', url_name, *args, **kwargs)
# else:
# raise LookupError("client.trace is not available for your version of django. Please\
# update your django version.")
def options(self, url_name, *args, **kwargs):
return self.request('options', url_name, *args, **kwargs)
def delete(self, url_name, *args, **kwargs):
return self.request('delete', url_name, *args, **kwargs)
def _which_response(self, response=None):
if response is None and self.last_response is not None:
return self.last_response
else:
return response
def response_200(self, response=None):
""" Given response has status_code 200 """
response = self._which_response(response)
self.assertEqual(response.status_code, 200)
def response_201(self, response=None):
""" Given response has status_code 201 """
response = self._which_response(response)
self.assertEqual(response.status_code, 201)
def response_301(self, response=None):
""" Given response has status_code 301 """
response = self._which_response(response)
self.assertEqual(response.status_code, 301)
def response_302(self, response=None):
""" Given response has status_code 302 """
response = self._which_response(response)
self.assertEqual(response.status_code, 302)
def response_400(self, response=None):
""" Given response has status_code 400 """
response = self._which_response(response)
self.assertEqual(response.status_code, 400)
def response_401(self, response=None):
""" Given response has status_code 401 """
response = self._which_response(response)
self.assertEqual(response.status_code, 401)
def response_403(self, response=None):
""" Given response has status_code 403 """
response = self._which_response(response)
self.assertEqual(response.status_code, 403)
def response_404(self, response=None):
""" Given response has status_code 404 """
response = self._which_response(response)
self.assertEqual(response.status_code, 404)
def response_405(self, response=None):
""" Given response has status_code 405 """
response = self._which_response(response)
self.assertEqual(response.status_code, 405)
def response_410(self, response=None):
""" Given response has status_code 410 """
response = self._which_response(response)
self.assertEqual(response.status_code, 410)
def get_check_200(self, url, *args, **kwargs):
""" Test that we can GET a page and it returns a 200 """
response = self.get(url, *args, **kwargs)
self.response_200(response)
return response
def assertLoginRequired(self, url, *args, **kwargs):
""" Ensure login is required to GET this URL """
response = self.get(url, *args, **kwargs)
reversed_url = reverse(url, args=args, kwargs=kwargs)
login_url = str(resolve_url(settings.LOGIN_URL))
expected_url = "{0}?next={1}".format(login_url, reversed_url)
self.assertRedirects(response, expected_url)
def login(self, *args, **credentials):
""" Login a user """
return login(self, *args, **credentials)
def reverse(self, name, *args, **kwargs):
""" Reverse a url, convenience to avoid having to import reverse in tests """
return reverse(name, args=args, kwargs=kwargs)
def make_user(self, username='testuser', password='password', perms=None):
"""
Build a user with <username> and password of 'password' for testing
purposes.
"""
if self.user_factory:
USERNAME_FIELD = getattr(
self.user_factory._meta.model, 'USERNAME_FIELD', 'username')
test_user = self.user_factory(**{
USERNAME_FIELD: username,
})
test_user.set_password(password)
test_user.save()
else:
test_user = User.objects.create_user(
username,
'{0}@example.com'.format(username),
password,
)
if perms:
_filter = Q()
for perm in perms:
if '.' not in perm:
raise ImproperlyConfigured(
'The permission in the perms argument needs to be either '
'app_label.codename or app_label.* (e.g. accounts.change_user or accounts.*)'
)
app_label, codename = perm.split('.')
if codename == '*':
_filter = _filter | Q(content_type__app_label=app_label)
else:
_filter = _filter | Q(content_type__app_label=app_label, codename=codename)
test_user.user_permissions.add(*list(Permission.objects.filter(_filter)))
return test_user
def assertNumQueriesLessThan(self, num, *args, **kwargs):
func = kwargs.pop('func', None)
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertNumQueriesLessThanContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def assertGoodView(self, url_name, *args, **kwargs):
"""
Quick-n-dirty testing of a given url name.
Ensures URL returns a 200 status and that generates less than 50
database queries.
"""
query_count = kwargs.pop('test_query_count', 50)
with self.assertNumQueriesLessThan(query_count):
response = self.get(url_name, *args, **kwargs)
self.response_200(response)
return response
def assertInContext(self, key):
if self.last_response is not None:
self.assertTrue(key in self.last_response.context)
else:
raise NoPreviousResponse("There isn't a previous response to query")
def assertResponseContains(self, text, response=None, html=True, **kwargs):
""" Convenience wrapper for assertContains """
response = self._which_response(response)
self.assertContains(response, text, html=html, **kwargs)
def assertResponseNotContains(self, text, response=None, html=True, **kwargs):
""" Convenience wrapper for assertNotContains """
response = self._which_response(response)
self.assertNotContains(response, text, html=html, **kwargs)
def assertResponseHeaders(self, headers, response=None):
"""
Check that the headers in the response are as expected.
Only headers defined in `headers` are compared, other keys present on
the `response` will be ignored.
:param headers: Mapping of header names to expected values
:type headers: :class:`collections.Mapping`
:param response: Response to check headers against
:type response: :class:`django.http.response.HttpResponse`
"""
response = self._which_response(response)
compare = {h: response.get(h) for h in headers}
self.assertEqual(compare, headers)
def get_context(self, key):
if self.last_response is not None:
self.assertTrue(key in self.last_response.context)
return self.last_response.context[key]
else:
raise NoPreviousResponse("There isn't a previous response to query")
def assertContext(self, key, value):
if self.last_response is not None:
self.assertEqual(self.last_response.context[key], value)
else:
raise NoPreviousResponse("There isn't a previous response to query")
class APITestCase(TestCase):
client_class = APIClient
# Note this class inherits from TestCase defined above.
class CBVTestCase(TestCase):
"""
Directly calls class-based generic view methods,
bypassing the Django test Client.
This process bypasses middleware invocation and URL resolvers.
Example usage:
from myapp.views import MyClass
class MyClassTest(CBVTestCase):
def test_special_method(self):
request = RequestFactory().get('/')
instance = self.get_instance(MyClass, request=request)
# invoke a MyClass method
result = instance.special_method()
# make assertions
self.assertTrue(result)
"""
def get_instance(self, cls, *args, **kwargs):
"""
Returns a decorated instance of a class-based generic view class.
Use `initkwargs` to set expected class attributes.
For example, set the `object` attribute on MyDetailView class:
instance = self.get_instance(MyDetailView, initkwargs={'object': obj}, request)
because SingleObjectMixin (part of generic.DetailView)
expects self.object to be set before invoking get_context_data().
Pass a "request" kwarg in order for your tests to have particular
request attributes.
"""
initkwargs = kwargs.pop('initkwargs', None)
request = kwargs.pop('request', None)
if initkwargs is None:
initkwargs = {}
instance = cls(**initkwargs)
instance.request = request
instance.args = args
instance.kwargs = kwargs
return instance
def get(self, cls, *args, **kwargs):
"""
Calls cls.get() method after instantiating view class.
Renders view templates and sets context if appropriate.
"""
instance = self.get_instance(cls, *args, **kwargs)
if not instance.request:
# Use a basic request
instance.request = RequestFactory().get('/')
self.last_response = self.get_response(instance.request, instance.get)
self.context = self.last_response.context
return self.last_response
def post(self, cls, *args, **kwargs):
"""
Calls cls.post() method after instantiating view class.
Renders view templates and sets context if appropriate.
"""
data = kwargs.pop('data', None)
if data is None:
data = {}
instance = self.get_instance(cls, *args, **kwargs)
if not instance.request:
# Use a basic request
instance.request = RequestFactory().post('/', data)
self.last_response = self.get_response(instance.request, instance.post)
self.context = self.last_response.context
return self.last_response
def get_response(self, request, view_func):
"""
Obtain response from view class method (typically get or post).
No middleware is invoked, but templates are rendered
and context saved if appropriate.
"""
# Curry a data dictionary into an instance of
# the template renderer callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
try:
response = view_func(request)
if hasattr(response, 'render') and callable(response.render):
response = response.render()
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
else:
response.templates = None
response.context = None
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
def get_check_200(self, url, *args, **kwargs):
""" Test that we can GET a page and it returns a 200 """
response = super(CBVTestCase, self).get(url, *args, **kwargs)
self.response_200(response)
return response
def assertLoginRequired(self, url, *args, **kwargs):
""" Ensure login is required to GET this URL """
response = super(CBVTestCase, self).get(url, *args, **kwargs)
reversed_url = reverse(url, args=args, kwargs=kwargs)
login_url = str(resolve_url(settings.LOGIN_URL))
expected_url = "{0}?next={1}".format(login_url, reversed_url)
self.assertRedirects(response, expected_url)
def assertGoodView(self, url_name, *args, **kwargs):
"""
Quick-n-dirty testing of a given view.
Ensures view returns a 200 status and that generates less than 50
database queries.
"""
query_count = kwargs.pop('test_query_count', 50)
with self.assertNumQueriesLessThan(query_count):
response = super(CBVTestCase, self).get(url_name, *args, **kwargs)
self.response_200(response)
return response
|
WelcomeHUME/svn-caucho-com-resin | refs/heads/master | doc.py | 5 | #! /usr/bin/env python
import argparse as ap
import os
import sys
import re
from string import join
from glob import glob
#re_title_element = re.compile(r'<title>(.*)</title>')
#re_description = re.compile(r'<description>(.*)</description>')
#re_product = re.compile(r'<description>(.*)</description>')
re_title_attribute = re.compile(r'title=["|\'](.*)["|\']')
re_viewfile_link = re.compile(r'<viewfile-link\s+file=["|\'](.*)["|\']\s*/>')
re_link = re.compile(r'<a\s+href=["|\'](.*)["|\']\s*>(.*)</a>')
def die (msg, code) :
print(msg)
sys.exit(code)
def read_tag(line, f, tag):
content = " "
endtag = "</%s>" % tag
starttag = "<%s>" % tag
if endtag in line:
content = line.replace(endtag, " ")
content = content.replace(starttag, " ")
return content.strip()
while line:
if endtag in line:
break
line = f.readline()
if endtag in line:
break
content = content + line
return content
def read_title(line):
if not "title" in line:
return ""
m = re_title_attribute.search(line)
if not m:
return ""
title = m.group(1)
return title
def replace_line(line):
line = line.replace("<var>", "'''''")
line = line.replace("</var>", "'''''")
line = line.replace("<deftable>", "<table>")
line = line.replace("</deftable>", "</table>")
if "<viewfile-link" in line:
m = re_viewfile_link.search(line)
if m:
file_name=m.group(1)
line = line.replace(m.group(0),"<code>%s</code>" % file_name)
if "<a" in line:
m = re_link.search(line)
if m:
href=m.group(1)
body=m.group(2)
line = line.replace(m.group(0),"[%s %s]" % (href,body))
return line
def example_extract(line, content, f):
if "<example" in line:
title = read_title(line)
if verbose: print "in example " + title
content = content + "====%s====\n" % title
content = content + "<pre>\n"
while line:
line = f.readline()
if "</example>" in line:
content = content + "</pre>\n"
return f.readline(), content
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace("</pre>", "</pre>")
content = content + line
if verbose: print "done with example " + title
else:
return line, content
def result_extract(line, content, f):
if "<results" in line:
if verbose: print "in results "
content = content + "====%s====\n" % "results"
content = content + "<pre>\n"
while line:
line = f.readline()
if "</results>" in line:
content = content + "</pre>\n"
return f.readline(), content
content = content + line
else: return line, content
class Document:
def read_file(self, line, f):
if verbose: print "read document"
while line:
line = f.readline()
if "<header>" in line:
if verbose: print "read header"
self.header = Header()
self.header.read_file(line, f)
elif "<body>" in line:
if verbose: print "read body"
self.body = Body()
self.body.read_file(line, f)
class Header:
def __init__(self):
self.description=None
def read_file(self, line, f):
#print "read header"
while line:
line = f.readline()
#print "header out", line
if "<title>" in line:
self.title = read_tag(line, f, "title")
elif "<description>" in line:
self.description = read_tag(line, f, "description")
elif "</header>" in line:
break
#print "last line", line
#print "description", self.description
class Body:
def read_file(self, line, f):
sections = []
while line:
line = f.readline()
line = replace_line(line)
if "<s1" in line:
if verbose: print "reading section 1"
section = Section()
section.read_file(line, f)
sections.append(section)
if verbose: "done reading section 1"
if "<summary>" in line:
if verbose: print "reading summary"
self.summary = read_tag(line, f, "summary")
if verbose: print "done reading summary"
if "</body>" in line:
break
self.sections = sections
class Section:
def read_file(self, line, f):
self.title = read_title(line)
if verbose: print "section title " + self.title
content = ""
while line:
line = f.readline()
line = replace_line(line)
line,content = example_extract(line, content, f)
#print ("is tuple example", type(line))
line,content = result_extract(line, content,f)
#print ("is tuple results", type(line))
if "<s2" in line or "<s3" in line:
title = read_title(line)
if "<s2" in line:
content = content + "===%s===\n" % title
if "<s3" in line:
content = content + "====%s====\n" % title
content = content + "\n"
while line:
line = f.readline()
line = replace_line(line)
line,content = example_extract(line, content, f)
#print ("1", type(line))
line,content = result_extract(line, content,f)
#print ("2", type(line))
if "</s2>" in line:
break
if "</s3>" in line:
break
content = content + line
elif "</s1>" in line:
break
else:
content = content + line
self.content = content
class FileProcessor:
def __init__(self, args):
self.verbose = args.verbose
self.files = args.files
if args.output:
self.output = args.output
def create_wiki_pages (self):
if self.verbose: print ('files', self.files)
for f in self.files:
files = glob(f)
for myfile in files:
self.create_wiki_page(myfile)
def create_wiki_page (self, file):
if self.verbose: print ('file', file)
if not os.path.exists(file):
die ("%s file does not exist" % file, 6)
f = open (file, 'r')
line = "used to read line by line from the file"
try:
while line:
line = f.readline()
#print (line)
if "<document>" in line:
doc = Document()
doc.read_file(line, f)
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
finally:
f.close()
global outputfile
if self.output:
if not outputfile:
outputfile = open(self.output, 'w')
def printit(it):
global outputfile
if self.output:
outputfile.write(it + "\n")
else:
print (it)
if verbose: print ("\n" * 5)
if verbose: print ("DOC:", doc.header.title, file)
printit ("=%s=" % doc.header.title)
if doc.header.description:
printit(doc.header.description)
for section in doc.body.sections:
if section.title:
printit("==%s==" % section.title)
printit (section.content)
verbose = False
outputfile = None
def create_wiki_pages(args) :
global verbose
verbose = args.verbose
fp = FileProcessor(args)
fp.create_wiki_pages()
arg_parser = ap.ArgumentParser(description="Convert an xtp into a wiki page", epilog=
"""Generates files for Eclipse and Wiki.""")
arg_parser.add_argument('files', metavar='FILES', nargs="+",
help="Files to process")
arg_parser.add_argument('--verbose', '-v', dest='verbose', action='store_true',
help='Verbose mode')
arg_parser.add_argument('--wiki', '-w', dest='action', action='store_const',
const=create_wiki_pages, default=create_wiki_pages,
help='Create a wiki page based on a xtp test')
arg_parser.add_argument('--output', '-o', dest='output', action='store')
args = arg_parser.parse_args()
args.action(args)
if outputfile:
print "Done"
outputfile.close()
|
nrwahl2/ansible | refs/heads/devel | test/integration/targets/module_utils/module_utils/sub/bam/bam.py | 298 | #!/usr/bin/env python
bam = "BAM FROM sub/bam/bam.py"
|
takaaptech/sky_engine | refs/heads/master | build/gypi_to_gn.py | 106 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Converts a given gypi file to a python scope and writes the result to stdout.
It is assumed that the file contains a toplevel dictionary, and this script
will return that dictionary as a GN "scope" (see example below). This script
does not know anything about GYP and it will not expand variables or execute
conditions.
It will strip conditions blocks.
A variables block at the top level will be flattened so that the variables
appear in the root dictionary. This way they can be returned to the GN code.
Say your_file.gypi looked like this:
{
'sources': [ 'a.cc', 'b.cc' ],
'defines': [ 'ENABLE_DOOM_MELON' ],
}
You would call it like this:
gypi_values = exec_script("//build/gypi_to_gn.py",
[ rebase_path("your_file.gypi") ],
"scope",
[ "your_file.gypi" ])
Notes:
- The rebase_path call converts the gypi file from being relative to the
current build file to being system absolute for calling the script, which
will have a different current directory than this file.
- The "scope" parameter tells GN to interpret the result as a series of GN
variable assignments.
- The last file argument to exec_script tells GN that the given file is a
dependency of the build so Ninja can automatically re-run GN if the file
changes.
Read the values into a target like this:
component("mycomponent") {
sources = gypi_values.sources
defines = gypi_values.defines
}
Sometimes your .gypi file will include paths relative to a different
directory than the current .gn file. In this case, you can rebase them to
be relative to the current directory.
sources = rebase_path(gypi_values.sources, ".",
"//path/gypi/input/values/are/relative/to")
This script will tolerate a 'variables' in the toplevel dictionary or not. If
the toplevel dictionary just contains one item called 'variables', it will be
collapsed away and the result will be the contents of that dictinoary. Some
.gypi files are written with or without this, depending on how they expect to
be embedded into a .gyp file.
This script also has the ability to replace certain substrings in the input.
Generally this is used to emulate GYP variable expansion. If you passed the
argument "--replace=<(foo)=bar" then all instances of "<(foo)" in strings in
the input will be replaced with "bar":
gypi_values = exec_script("//build/gypi_to_gn.py",
[ rebase_path("your_file.gypi"),
"--replace=<(foo)=bar"],
"scope",
[ "your_file.gypi" ])
"""
import gn_helpers
from optparse import OptionParser
import sys
def LoadPythonDictionary(path):
file_string = open(path).read()
try:
file_data = eval(file_string, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = path
raise
except Exception, e:
raise Exception("Unexpected error while reading %s: %s" % (path, str(e)))
assert isinstance(file_data, dict), "%s does not eval to a dictionary" % path
# Flatten any variables to the top level.
if 'variables' in file_data:
file_data.update(file_data['variables'])
del file_data['variables']
# Strip any conditions.
if 'conditions' in file_data:
del file_data['conditions']
if 'target_conditions' in file_data:
del file_data['target_conditions']
# Strip targets in the toplevel, since some files define these and we can't
# slurp them in.
if 'targets' in file_data:
del file_data['targets']
return file_data
def ReplaceSubstrings(values, search_for, replace_with):
"""Recursively replaces substrings in a value.
Replaces all substrings of the "search_for" with "repace_with" for all
strings occurring in "values". This is done by recursively iterating into
lists as well as the keys and values of dictionaries."""
if isinstance(values, str):
return values.replace(search_for, replace_with)
if isinstance(values, list):
return [ReplaceSubstrings(v, search_for, replace_with) for v in values]
if isinstance(values, dict):
# For dictionaries, do the search for both the key and values.
result = {}
for key, value in values.items():
new_key = ReplaceSubstrings(key, search_for, replace_with)
new_value = ReplaceSubstrings(value, search_for, replace_with)
result[new_key] = new_value
return result
# Assume everything else is unchanged.
return values
def main():
parser = OptionParser()
parser.add_option("-r", "--replace", action="append",
help="Replaces substrings. If passed a=b, replaces all substrs a with b.")
(options, args) = parser.parse_args()
if len(args) != 1:
raise Exception("Need one argument which is the .gypi file to read.")
data = LoadPythonDictionary(args[0])
if options.replace:
# Do replacements for all specified patterns.
for replace in options.replace:
split = replace.split('=')
# Allow "foo=" to replace with nothing.
if len(split) == 1:
split.append('')
assert len(split) == 2, "Replacement must be of the form 'key=value'."
data = ReplaceSubstrings(data, split[0], split[1])
# Sometimes .gypi files use the GYP syntax with percents at the end of the
# variable name (to indicate not to overwrite a previously-defined value):
# 'foo%': 'bar',
# Convert these to regular variables.
for key in data:
if len(key) > 1 and key[len(key) - 1] == '%':
data[key[:-1]] = data[key]
del data[key]
print gn_helpers.ToGNString(data)
if __name__ == '__main__':
try:
main()
except Exception, e:
print str(e)
sys.exit(1)
|
simplyguru-dot/ansible-modules-core | refs/heads/devel | web_infrastructure/django_manage.py | 55 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Scott Anderson <scottanderson42@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application.
description:
- Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag.
required: true
app_path:
description:
- The path to the root of the Django application where B(manage.py) lives.
required: true
settings:
description:
- The Python path to the application's settings module, such as 'myapp.settings'.
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory.
required: false
virtualenv:
description:
- An optional path to a I(virtualenv) installation to use while running the manage application.
required: false
apps:
description:
- A list of space-delimited apps to target. Used by the 'test' command.
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the 'createcachetable' command.
required: false
database:
description:
- The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands.
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the 'test' command.
required: false
default: "no"
choices: [ "yes", "no" ]
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command.
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
version_added: "1.3"
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
version_added: "1.3"
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
version_added: "1.3"
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
- django_manage: command=cleanup app_path={{ django_dir }}
# Load the initial_data fixture into the application
- django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }}
# Run syncdb on the application
- django_manage: >
command=syncdb
app_path={{ django_dir }}
settings={{ settings_app_name }}
pythonpath={{ settings_dir }}
virtualenv={{ virtualenv_dir }}
# Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest
# Create an initial superuser.
- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }}
"""
import os
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(os.path.expanduser(venv_param), 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
virtualenv = module.get_bin_path('virtualenv', True)
vcmd = '%s %s' % (virtualenv, venv_param)
vcmd = [virtualenv, venv_param]
rc, out_venv, err_venv = module.run_command(vcmd)
if rc != 0:
_fail(module, vcmd, out_venv, err_venv)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_filter_output(line):
return "Already exists" not in line
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def syncdb_filter_output(line):
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
def migrate_filter_output(line):
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
def collectstatic_filter_output(line):
return "0 static files" not in line
def main():
command_allowed_param_map = dict(
cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
syncdb=('database', ),
test=('failfast', 'testrunner', 'liveserver', 'apps', ),
validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
createcachetable=('cache_table', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'syncdb',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command = dict(default=None, required=True),
app_path = dict(default=None, required=True),
settings = dict(default=None, required=False),
pythonpath = dict(default=None, required=False, aliases=['python_path']),
virtualenv = dict(default=None, required=False, aliases=['virtual_env']),
apps = dict(default=None, required=False),
cache_table = dict(default=None, required=False),
clear = dict(default=None, required=False, type='bool'),
database = dict(default=None, required=False),
failfast = dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures = dict(default=None, required=False),
liveserver = dict(default=None, required=False, aliases=['live_server']),
testrunner = dict(default=None, required=False, aliases=['test_runner']),
skip = dict(default=None, required=False, type='bool'),
merge = dict(default=None, required=False, type='bool'),
link = dict(default=None, required=False, type='bool'),
),
)
command = module.params['command']
app_path = os.path.expanduser(module.params['app_path'])
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if param in specific_boolean_params:
value = module.boolean(value)
if value and param not in command_allowed_param_map[command]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
for param in command_required_param_map.get(command, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command))
_ensure_virtualenv(module)
cmd = "./manage.py %s" % (command, )
if command in noinput_commands:
cmd = '%s --noinput' % cmd
for param in general_params:
if module.params[param]:
cmd = '%s --%s=%s' % (cmd, param, module.params[param])
for param in specific_boolean_params:
if module.boolean(module.params[param]):
cmd = '%s --%s' % (cmd, param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
rc, out, err = module.run_command(cmd, cwd=os.path.expanduser(app_path))
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
else:
if "Unknown command:" in err:
_fail(module, cmd, err, "Unknown django command: %s" % command)
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command + "_filter_output", None)
if filt:
filtered_output = filter(filt, out.split('\n'))
if len(filtered_output):
changed = filtered_output
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv,
settings=module.params['settings'], pythonpath=module.params['pythonpath'])
# import module snippets
from ansible.module_utils.basic import *
main()
|
harry-ops/opencloud | refs/heads/master | webvirtmgr/storages/views.py | 1 | from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from servers.models import Compute
from storages.forms import AddStgPool, AddImage, CloneImage
from vrtManager.storage import wvmStorage, wvmStorages
from libvirt import libvirtError
def storages(request, host_id):
"""
Storage pool block
"""
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse('login'))
errors = []
compute = Compute.objects.get(id=host_id)
try:
conn = wvmStorages(compute.hostname,
compute.login,
compute.password,
compute.type)
storages = conn.get_storages_info()
secrets = conn.get_secrets()
if request.method == 'POST':
if 'create' in request.POST:
form = AddStgPool(request.POST)
if form.is_valid():
data = form.cleaned_data
if data['name'] in storages:
msg = _("Pool name already use")
errors.append(msg)
if data['stg_type'] == 'rbd':
if not data['secret']:
msg = _("You need create secret for pool")
errors.append(msg)
if not data['ceph_pool'] and not data['ceph_host'] and not data['ceph_user']:
msg = _("You need input all fields for creating ceph pool")
errors.append(msg)
if not errors:
if data['stg_type'] == 'rbd':
conn.create_storage_ceph(data['stg_type'], data['name'],
data['ceph_pool'], data['ceph_host'],
data['ceph_user'], data['secret'])
elif data['stg_type'] == 'netfs':
conn.create_storage_netfs(data['stg_type'], data['name'],
data['netfs_host'], data['source'],
data['source_format'], data['target'])
else:
conn.create_storage(data['stg_type'], data['name'], data['source'], data['target'])
return HttpResponseRedirect(reverse('storage', args=[host_id, data['name']]))
conn.close()
except libvirtError as err:
errors.append(err)
return render_to_response('storages.html', locals(), context_instance=RequestContext(request))
def storage(request, host_id, pool):
"""
Storage pool block
"""
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse('login'))
def handle_uploaded_file(path, f_name):
target = path + '/' + str(f_name)
destination = open(target, 'wb+')
for chunk in f_name.chunks():
destination.write(chunk)
destination.close()
errors = []
compute = Compute.objects.get(id=host_id)
meta_prealloc = False
try:
conn = wvmStorage(compute.hostname,
compute.login,
compute.password,
compute.type,
pool)
storages = conn.get_storages()
state = conn.is_active()
size, free = conn.get_size()
used = (size - free)
if state:
percent = (used * 100) / size
else:
percent = 0
status = conn.get_status()
path = conn.get_target_path()
type = conn.get_type()
autostart = conn.get_autostart()
if state:
conn.refresh()
volumes = conn.update_volumes()
else:
volumes = None
except libvirtError as err:
errors.append(err)
if request.method == 'POST':
if 'start' in request.POST:
try:
conn.start()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'stop' in request.POST:
try:
conn.stop()
return HttpResponseRedirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'delete' in request.POST:
try:
conn.delete()
return HttpResponseRedirect(reverse('storages', args=[host_id]))
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'set_autostart' in request.POST:
try:
conn.set_autostart(1)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'unset_autostart' in request.POST:
try:
conn.set_autostart(0)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'add_volume' in request.POST:
form = AddImage(request.POST)
if form.is_valid():
data = form.cleaned_data
if data['meta_prealloc'] and data['format'] == 'qcow2':
meta_prealloc = True
try:
conn.create_volume(data['name'], data['size'], data['format'], meta_prealloc)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as err:
errors.append(err)
if 'del_volume' in request.POST:
volname = request.POST.get('volname', '')
try:
vol = conn.get_volume(volname)
vol.delete(0)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as error_msg:
errors.append(error_msg.message)
if 'iso_upload' in request.POST:
if str(request.FILES['file']) in conn.update_volumes():
msg = _("ISO image already exist")
errors.append(msg)
else:
handle_uploaded_file(path, request.FILES['file'])
return HttpResponseRedirect(request.get_full_path())
if 'cln_volume' in request.POST:
form = CloneImage(request.POST)
if form.is_valid():
data = form.cleaned_data
img_name = data['name'] + '.img'
meta_prealloc = 0
if img_name in conn.update_volumes():
msg = _("Name of volume name already use")
errors.append(msg)
if not errors:
if data['convert']:
format = data['format']
if data['meta_prealloc'] and data['format'] == 'qcow2':
meta_prealloc = True
else:
format = None
try:
conn.clone_volume(data['image'], data['name'], format, meta_prealloc)
return HttpResponseRedirect(request.get_full_path())
except libvirtError as err:
errors.append(err)
conn.close()
return render_to_response('storage.html', locals(), context_instance=RequestContext(request))
|
cdht/androguard | refs/heads/master | androguard/core/analysis/sign.py | 22 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from androguard.core.analysis.analysis import TAINTED_PACKAGE_CREATE, TAINTED_PACKAGE_CALL
from androguard.core.bytecodes import dvm
TAINTED_PACKAGE_INTERNAL_CALL = 2
FIELD_ACCESS = { "R" : 0, "W" : 1 }
PACKAGE_ACCESS = { TAINTED_PACKAGE_CREATE : 0, TAINTED_PACKAGE_CALL : 1, TAINTED_PACKAGE_INTERNAL_CALL : 2 }
class Sign(object):
def __init__(self):
self.levels = {}
self.hlevels = []
def add(self, level, value):
self.levels[ level ] = value
self.hlevels.append( level )
def get_level(self, l):
return self.levels[ "L%d" % l ]
def get_string(self):
buff = ""
for i in self.hlevels:
buff += self.levels[ i ]
return buff
def get_list(self):
return self.levels[ "sequencebb" ]
class Signature(object):
def __init__(self, vmx):
self.vmx = vmx
self.tainted_packages = self.vmx.get_tainted_packages()
self.tainted_variables = self.vmx.get_tainted_variables()
self._cached_signatures = {}
self._cached_fields = {}
self._cached_packages = {}
self._global_cached = {}
self.levels = {
# Classical method signature with basic blocks, strings, fields, packages
"L0" : {
0 : ( "_get_strings_a", "_get_fields_a", "_get_packages_a" ),
1 : ( "_get_strings_pa", "_get_fields_a", "_get_packages_a" ),
2 : ( "_get_strings_a", "_get_fields_a", "_get_packages_pa_1" ),
3 : ( "_get_strings_a", "_get_fields_a", "_get_packages_pa_2" ),
},
# strings
"L1" : [ "_get_strings_a1" ],
# exceptions
"L2" : [ "_get_exceptions" ],
# fill array data
"L3" : [ "_get_fill_array_data" ],
}
self.classes_names = None
self._init_caches()
def _get_method_info(self, m):
m1 = m.get_method()
return "%s-%s-%s" % (m1.get_class_name(), m1.get_name(), m1.get_descriptor())
def _get_sequence_bb(self, analysis_method):
l = []
for i in analysis_method.basic_blocks.get():
buff = ""
instructions = [j for j in i.get_instructions()]
if len(instructions) > 5:
for ins in instructions:
buff += ins.get_name()
if buff != "":
l.append( buff )
return l
def _get_hex(self, analysis_method):
code = analysis_method.get_method().get_code()
if code == None:
return ""
buff = ""
for i in code.get_bc().get_instructions():
buff += dvm.clean_name_instruction( i )
buff += dvm.static_operand_instruction( i )
return buff
def _get_bb(self, analysis_method, functions, options):
bbs = []
for b in analysis_method.basic_blocks.get():
l = []
l.append( (b.start, "B") )
l.append( (b.start, "[") )
internal = []
op_value = b.get_last().get_op_value()
# return
if op_value >= 0x0e and op_value <= 0x11:
internal.append( (b.end-1, "R") )
# if
elif op_value >= 0x32 and op_value <= 0x3d:
internal.append( (b.end-1, "I") )
# goto
elif op_value >= 0x28 and op_value <= 0x2a:
internal.append( (b.end-1, "G") )
# sparse or packed switch
elif op_value >= 0x2b and op_value <= 0x2c:
internal.append( (b.end-1, "G") )
for f in functions:
try:
internal.extend( getattr( self, f )( analysis_method, options ) )
except TypeError:
internal.extend( getattr( self, f )( analysis_method ) )
internal.sort()
for i in internal:
if i[0] >= b.start and i[0] < b.end:
l.append( i )
del internal
l.append( (b.end, "]") )
bbs.append( ''.join(i[1] for i in l) )
return bbs
def _init_caches(self):
if self._cached_fields == {}:
for f_t, f in self.tainted_variables.get_fields():
self._cached_fields[ f ] = f_t.get_paths_length()
n = 0
for f in sorted( self._cached_fields ):
self._cached_fields[ f ] = n
n += 1
if self._cached_packages == {}:
for m_t, m in self.tainted_packages.get_packages():
self._cached_packages[ m ] = m_t.get_paths_length()
n = 0
for m in sorted( self._cached_packages ):
self._cached_packages[ m ] = n
n += 1
def _get_fill_array_data(self, analysis_method):
buff = ""
for b in analysis_method.basic_blocks.get():
for i in b.get_instructions():
if i.get_name() == "FILL-ARRAY-DATA":
buff_tmp = i.get_operands()
for j in range(0, len(buff_tmp)):
buff += "\\x%02x" % ord( buff_tmp[j] )
return buff
def _get_exceptions(self, analysis_method):
buff = ""
method = analysis_method.get_method()
code = method.get_code()
if code == None or code.get_tries_size() <= 0:
return buff
handler_catch_list = code.get_handlers()
for handler_catch in handler_catch_list.get_list():
for handler in handler_catch.get_handlers():
buff += analysis_method.get_vm().get_cm_type( handler.get_type_idx() )
return buff
def _get_strings_a1(self, analysis_method):
buff = ""
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method:
for path in strings_method[s]:
buff += s.replace('\n', ' ')
return buff
def _get_strings_pa(self, analysis_method):
l = []
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method:
for path in strings_method[s]:
l.append( ( path[1], "S%d" % len(s.var) ) )
return l
def _get_strings_a(self, analysis_method):
key = "SA-%s" % self._get_method_info(analysis_method)
if key in self._global_cached:
return self._global_cached[ key ]
l = []
strings_method = self.tainted_variables.get_strings_by_method( analysis_method.get_method() )
for s in strings_method:
for path in strings_method[s]:
l.append( ( path[1], "S") )
self._global_cached[ key ] = l
return l
def _get_fields_a(self, analysis_method):
key = "FA-%s" % self._get_method_info(analysis_method)
if key in self._global_cached:
return self._global_cached[ key ]
fields_method = self.tainted_variables.get_fields_by_method( analysis_method.get_method() )
l = []
for f in fields_method:
for path in fields_method[ f ]:
l.append( (path[1], "F%d" % FIELD_ACCESS[ path[0] ]) )
self._global_cached[ key ] = l
return l
def _get_packages_a(self, analysis_method):
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
l = []
for m in packages_method:
for path in packages_method[ m ]:
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
return l
def _get_packages(self, analysis_method, include_packages):
l = self._get_packages_pa_1( analysis_method, include_packages )
return "".join([ i[1] for i in l ])
def _get_packages_pa_1(self, analysis_method, include_packages):
key = "PA1-%s-%s" % (self._get_method_info(analysis_method), include_packages)
if key in self._global_cached:
return self._global_cached[ key ]
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
if self.classes_names == None:
self.classes_names = analysis_method.get_vm().get_classes_names()
l = []
for m in packages_method:
for path in packages_method[ m ]:
present = False
for i in include_packages:
if m.find(i) == 0:
present = True
break
if path.get_access_flag() == 1:
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( analysis_method.get_vm().get_class_manager() )
if dst_class_name in self.classes_names:
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ 2 ]) ) )
else:
if present == True:
l.append( (path.get_idx(), "P%s{%s%s%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], dst_class_name, dst_method_name, dst_descriptor ) ) )
else:
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
else:
if present == True:
l.append( (path.get_idx(), "P%s{%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], m) ) )
else:
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
self._global_cached[ key ] = l
return l
def _get_packages_pa_2(self, analysis_method, include_packages):
packages_method = self.tainted_packages.get_packages_by_method( analysis_method.get_method() )
l = []
for m in packages_method:
for path in packages_method[ m ]:
present = False
for i in include_packages:
if m.find(i) == 0:
present = True
break
if present == True:
l.append( (path.get_idx(), "P%s" % (PACKAGE_ACCESS[ path.get_access_flag() ]) ) )
continue
if path.get_access_flag() == 1:
dst_class_name, dst_method_name, dst_descriptor = path.get_dst( analysis_method.get_vm().get_class_manager() )
l.append( (path.get_idx(), "P%s{%s%s%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], dst_class_name, dst_method_name, dst_descriptor ) ) )
else:
l.append( (path.get_idx(), "P%s{%s}" % (PACKAGE_ACCESS[ path.get_access_flag() ], m) ) )
return l
def get_method(self, analysis_method, signature_type, signature_arguments={}):
key = "%s-%s-%s" % (self._get_method_info(analysis_method), signature_type, signature_arguments)
if key in self._cached_signatures:
return self._cached_signatures[ key ]
s = Sign()
#print signature_type, signature_arguments
for i in signature_type.split(":"):
# print i, signature_arguments[ i ]
if i == "L0":
_type = self.levels[ i ][ signature_arguments[ i ][ "type" ] ]
try:
_arguments = signature_arguments[ i ][ "arguments" ]
except KeyError:
_arguments = []
value = self._get_bb( analysis_method, _type, _arguments )
s.add( i, ''.join(z for z in value) )
elif i == "L4":
try:
_arguments = signature_arguments[ i ][ "arguments" ]
except KeyError:
_arguments = []
value = self._get_packages( analysis_method, _arguments )
s.add( i , value )
elif i == "hex":
value = self._get_hex( analysis_method )
s.add( i, value )
elif i == "sequencebb":
_type = ('_get_strings_a', '_get_fields_a', '_get_packages_pa_1')
_arguments = ['Landroid', 'Ljava']
#value = self._get_bb( analysis_method, _type, _arguments )
#s.add( i, value )
value = self._get_sequence_bb( analysis_method )
s.add( i, value )
else:
for f in self.levels[ i ]:
value = getattr( self, f )( analysis_method )
s.add( i, value )
self._cached_signatures[ key ] = s
return s
|
40223151/2014c2g9 | refs/heads/master | w2/static/Brython2.0.0-20140209-164925/Lib/_functools.py | 727 | def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def reduce(func,iterable,initializer=None):
args = iter(iterable)
if initializer is not None:
res = initializer
else:
res = next(args)
while True:
try:
res = func(res,next(args))
except StopIteration:
return res
|
niceguydave/wagtail-cookiecutter-foundation | refs/heads/master | {{cookiecutter.repo_name}}/products/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
phvu/CarND-behavioral-cloning | refs/heads/master | test.py | 1 | from keras.models import load_model
from scipy.ndimage import imread
model = load_model('model.h5')
print('Start')
image_array = imread('./data/IMG/center_2016_12_01_13_36_16_767.jpg')
transformed_image_array = image_array[None, :, 1:-1, :]
transformed_image_array = ((transformed_image_array / 255.) - 0.5) * 2
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
print(steering_angle)
|
mgbarrero/xbob.db.biosecuridsigngf | refs/heads/master | xbob/db/biosecuridsigngf/__init__.py | 1 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The BiosecurID Signature Global Features database
"""
from .query import Database
from .models import Client, File, Protocol, ProtocolPurpose
__all__ = dir()
|
DDShadoww/grab | refs/heads/master | grab/script/start_project.py | 12 | import os
import logging
import shutil
import re
from grab.error import GrabError
logger = logging.getLogger('grab.script.start_project')
def setup_arg_parser(parser):
parser.add_argument('project_name')
parser.add_argument('--template')
def process_content(content, context):
for key, value in context.items():
re_macros = re.compile(r'\{\{\s*%s\s*\}\}' % re.escape(key))
if re_macros.search(content):
content = re_macros.sub(value, content)
return content
def process_file_path(path, context):
for key, value in context.items():
path = path.replace(key, value)
return path
def underscore_to_camelcase(val):
items = val.lower().split('_')
return ''.join(x.title() for x in items)
def main(project_name, template, **kwargs):
cur_dir = os.getcwd()
project_dir = os.path.join(cur_dir, project_name)
if template is None:
grab_root = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
template_path = os.path.join(grab_root, 'util/default_project')
else:
template_path = template
if os.path.exists(project_dir):
raise GrabError('Directory %s already exists' % project_dir)
else:
logger.debug('Copying %s to %s' % (template_path, project_dir))
shutil.copytree(template_path, project_dir)
project_name_camelcase = underscore_to_camelcase(project_name)
context = {
'PROJECT_NAME': project_name,
'PROJECT_NAME_CAMELCASE': project_name_camelcase,
}
for base, dir_names, file_names in os.walk(project_dir):
for file_name in file_names:
if file_name.endswith('.py'):
file_path = os.path.join(base, file_name)
content = process_content(open(file_path).read(), context)
new_file_path = process_file_path(file_path, context)
with open(new_file_path, 'w') as out:
out.write(content)
if file_path != new_file_path:
os.unlink(file_path)
print('%s: OK' % new_file_path)
else:
print('%s: OK' % file_path)
|
zstackorg/zstack-woodpecker | refs/heads/master | integrationtest/vm/virtualrouter/vlan/test_reboot_vm.py | 4 | '''
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
global test_obj_dict
test_util.test_dsc('Create test vm with Vlan SR and check')
vm = test_stub.create_vlan_vm()
test_obj_dict.add_vm(vm)
vm.check()
test_util.test_dsc('Reboot vm and check again')
vm.reboot()
vm.check()
vm.destroy()
test_util.test_pass('Vlan VR VM reboot Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
|
alexsanjoseph/duolingo-save-streak | refs/heads/master | urllib3/util/__init__.py | 204 | from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
IS_PYOPENSSL,
IS_SECURETRANSPORT,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import (
current_time,
Timeout,
)
from .retry import Retry
from .url import (
get_host,
parse_url,
split_first,
Url,
)
from .wait import (
wait_for_read,
wait_for_write
)
__all__ = (
'HAS_SNI',
'IS_PYOPENSSL',
'IS_SECURETRANSPORT',
'SSLContext',
'Retry',
'Timeout',
'Url',
'assert_fingerprint',
'current_time',
'is_connection_dropped',
'is_fp_closed',
'get_host',
'parse_url',
'make_headers',
'resolve_cert_reqs',
'resolve_ssl_version',
'split_first',
'ssl_wrap_socket',
'wait_for_read',
'wait_for_write'
)
|
HoracioAlvarado/fwd | refs/heads/master | venv/Lib/site-packages/setuptools/command/sdist.py | 111 | from glob import glob
from distutils import log
import distutils.command.sdist as orig
import os
import sys
import io
from setuptools.extern import six
from setuptools.utils import cs_path_exists
import pkg_resources
READMES = 'README', 'README.rst', 'README.txt'
_default_revctrl = list
def walk_revctrl(dirname=''):
"""Find all files under revision control"""
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
for item in ep.load()(dirname):
yield item
class sdist(orig.sdist):
"""Smart sdist that finds anything supported by revision control"""
user_options = [
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
]
negative_opt = {}
def run(self):
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
self.filelist = ei_cmd.filelist
self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
self.check_readme()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Call check_metadata only if no 'check' command
# (distutils <= 2.6)
import distutils.command
if 'check' not in distutils.command.__all__:
self.check_metadata()
self.make_distribution()
dist_files = getattr(self.distribution, 'dist_files', [])
for file in self.archive_files:
data = ('sdist', '', file)
if data not in dist_files:
dist_files.append(data)
def __read_template_hack(self):
# This grody hack closes the template file (MANIFEST.in) if an
# exception occurs during read_template.
# Doing so prevents an error when easy_install attempts to delete the
# file.
try:
orig.sdist.read_template(self)
except:
_, _, tb = sys.exc_info()
tb.tb_next.tb_frame.f_locals['template'].close()
raise
# Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
# has been fixed, so only override the method if we're using an earlier
# Python.
has_leaky_handle = (
sys.version_info < (2, 7, 2)
or (3, 0) <= sys.version_info < (3, 1, 4)
or (3, 2) <= sys.version_info < (3, 2, 1)
)
if has_leaky_handle:
read_template = __read_template_hack
def add_defaults(self):
standards = [READMES,
self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = 0
for fn in alts:
if cs_path_exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = list(filter(cs_path_exists, glob(pattern)))
if files:
self.filelist.extend(files)
# getting python files
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
# This functionality is incompatible with include_package_data, and
# will in fact create an infinite recursion if include_package_data
# is True. Use of include_package_data will imply that
# distutils-style automatic handling of package_data is disabled
if not self.distribution.include_package_data:
for _, src_dir, _, filenames in build_py.data_files:
self.filelist.extend([os.path.join(src_dir, filename)
for filename in filenames])
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def check_readme(self):
for f in READMES:
if os.path.exists(f):
return
else:
self.warn(
"standard file not found: should have one of " +
', '.join(READMES)
)
def make_release_tree(self, base_dir, files):
orig.sdist.make_release_tree(self, base_dir, files)
# Save any egg_info command line options used to create this sdist
dest = os.path.join(base_dir, 'setup.cfg')
if hasattr(os, 'link') and os.path.exists(dest):
# unlink and re-copy, since it might be hard-linked, and
# we don't want to change the source version
os.unlink(dest)
self.copy_file('setup.cfg', dest)
self.get_finalized_command('egg_info').save_version_info(dest)
def _manifest_is_not_generated(self):
# check for special comment used in 2.7.1 and higher
if not os.path.isfile(self.manifest):
return False
with io.open(self.manifest, 'rb') as fp:
first_line = fp.readline()
return (first_line !=
'# file GENERATED by distutils, do NOT edit\n'.encode())
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rbU')
for line in manifest:
# The manifest must contain UTF-8. See #303.
if six.PY3:
try:
line = line.decode('UTF-8')
except UnicodeDecodeError:
log.warn("%r not UTF-8 decodable -- skipping" % line)
continue
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
|
etherkit/OpenBeacon2 | refs/heads/master | client/win/venv/Lib/site-packages/setuptools/command/install.py | 529 | from distutils.errors import DistutilsArgError
import inspect
import glob
import warnings
import platform
import distutils.command.install as orig
import setuptools
# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
# now. See https://github.com/pypa/setuptools/issues/199/
_install = orig.install
class install(orig.install):
"""Use easy_install to install the package, w/dependencies"""
user_options = orig.install.user_options + [
('old-and-unmanageable', None, "Try not to use this!"),
('single-version-externally-managed', None,
"used by system package builders to create 'flat' eggs"),
]
boolean_options = orig.install.boolean_options + [
'old-and-unmanageable', 'single-version-externally-managed',
]
new_commands = [
('install_egg_info', lambda self: True),
('install_scripts', lambda self: True),
]
_nc = dict(new_commands)
def initialize_options(self):
orig.install.initialize_options(self)
self.old_and_unmanageable = None
self.single_version_externally_managed = None
def finalize_options(self):
orig.install.finalize_options(self)
if self.root:
self.single_version_externally_managed = True
elif self.single_version_externally_managed:
if not self.root and not self.record:
raise DistutilsArgError(
"You must specify --record or --root when building system"
" packages"
)
def handle_extra_path(self):
if self.root or self.single_version_externally_managed:
# explicit backward-compatibility mode, allow extra_path to work
return orig.install.handle_extra_path(self)
# Ignore extra_path when installing an egg (or being run by another
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
def run(self):
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return orig.install.run(self)
if not self._called_from_setup(inspect.currentframe()):
# Run in backward-compatibility mode to support bdist_* commands.
orig.install.run(self)
else:
self.do_egg_install()
@staticmethod
def _called_from_setup(run_frame):
"""
Attempt to detect whether run() was called from setup() or by another
command. If called by setup(), the parent caller will be the
'run_command' method in 'distutils.dist', and *its* caller will be
the 'run_commands' method. If called any other way, the
immediate caller *might* be 'run_command', but it won't have been
called by 'run_commands'. Return True in that case or if a call stack
is unavailable. Return False otherwise.
"""
if run_frame is None:
msg = "Call stack not available. bdist_* commands may fail."
warnings.warn(msg)
if platform.python_implementation() == 'IronPython':
msg = "For best results, pass -X:Frames to enable call stack."
warnings.warn(msg)
return True
res = inspect.getouterframes(run_frame)[2]
caller, = res[:1]
info = inspect.getframeinfo(caller)
caller_module = caller.f_globals.get('__name__', '')
return (
caller_module == 'distutils.dist'
and info.function == 'run_commands'
)
def do_egg_install(self):
easy_install = self.distribution.get_command_class('easy_install')
cmd = easy_install(
self.distribution, args="x", root=self.root, record=self.record,
)
cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
cmd.always_copy_from = '.' # make sure local-dir eggs get installed
# pick up setup-dir .egg files only: no .egg-info
cmd.package_index.scan(glob.glob('*.egg'))
self.run_command('bdist_egg')
args = [self.distribution.get_command_obj('bdist_egg').egg_output]
if setuptools.bootstrap_install_from:
# Bootstrap self-installation of setuptools
args.insert(0, setuptools.bootstrap_install_from)
cmd.args = args
cmd.run()
setuptools.bootstrap_install_from = None
# XXX Python 3.1 doesn't see _nc if this is inside the class
install.sub_commands = (
[cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
install.new_commands
)
|
caioserra/apiAdwords | refs/heads/master | tests/adspygoogle/dfp/v201211/creative_set_service_unittest.py | 4 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover company service examples."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
import unittest
from examples.adspygoogle.dfp.v201211.creative_set_service import associate_creative_set_to_line_item
from examples.adspygoogle.dfp.v201211.creative_set_service import create_creative_set
from examples.adspygoogle.dfp.v201211.creative_set_service import get_all_creative_sets
from examples.adspygoogle.dfp.v201211.creative_set_service import get_creative_sets_by_statement
from examples.adspygoogle.dfp.v201211.creative_set_service import update_creative_set
from tests.adspygoogle.dfp import client
from tests.adspygoogle.dfp import SERVER_V201211
from tests.adspygoogle.dfp import TEST_VERSION_V201211
from tests.adspygoogle.dfp import util
from tests.adspygoogle.dfp import VERSION_V201211
class CreativeSetServiceTest(unittest.TestCase):
"""Unittest suite for CreativeSetService."""
client.debug = False
loaded = False
def setUp(self):
"""Prepare unittest."""
if not self.__class__.loaded:
advertiser_id = util.CreateTestAdvertiser(client, SERVER_V201211,
VERSION_V201211)
self.__class__.test_master_creative_id = util.CreateTestCreative(
client, SERVER_V201211, VERSION_V201211, advertiser_id)
self.__class__.test_companion_creative_id = util.CreateTestCreative(
client, SERVER_V201211, VERSION_V201211, advertiser_id)
trafficker_id = util.GetTrafficker(client, SERVER_V201211,
VERSION_V201211)
order_id = util.CreateTestOrder(client, SERVER_V201211, VERSION_V201211,
advertiser_id, trafficker_id)
ad_unit_id = util.CreateTestAdUnit(client, SERVER_V201211,
VERSION_V201211)
placement_id = util.CreateTestPlacement(client, SERVER_V201211,
VERSION_V201211, [ad_unit_id])
self.__class__.test_line_item_id = util.CreateTestLineItem(
client, SERVER_V201211, VERSION_V201211, order_id, [placement_id])
self.__class__.loaded = True
def testAssociateCreativeSetToLineItem(self):
"""Test whether we can associate creative set to a line item."""
creative_set_id = util.CreateTestCreativeSet(
client, SERVER_V201211, VERSION_V201211,
self.__class__.test_master_creative_id,
self.__class__.test_companion_creative_id)
associate_creative_set_to_line_item.main(client, creative_set_id,
self.__class__.test_line_item_id)
def testCreateCreativeSets(self):
"""Test whether we can create creative sets."""
create_creative_set.main(client, self.__class__.test_master_creative_id,
self.__class__.test_companion_creative_id)
def testGetAllCreativeSets(self):
"""Test whether we can get all creative sets."""
get_all_creative_sets.main(client)
def testGetCreativeSetsByStatement(self):
"""Test whether we can get creative sets by statement."""
get_creative_sets_by_statement.main(client,
self.__class__.test_master_creative_id)
def testUpdateCreativeSets(self):
"""Test whether we can update creative sets."""
creative_set_id = util.CreateTestCreativeSet(
client, SERVER_V201211, VERSION_V201211,
self.__class__.test_master_creative_id,
self.__class__.test_companion_creative_id)
update_creative_set.main(client, creative_set_id,
self.__class__.test_companion_creative_id)
if __name__ == '__main__':
if TEST_VERSION_V201211:
unittest.main()
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-3.3.0/Lib/encodings/utf_32_be.py | 703 | """
Python 'utf-32-be' Codec
"""
import codecs
### Codec APIs
encode = codecs.utf_32_be_encode
def decode(input, errors='strict'):
return codecs.utf_32_be_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_32_be_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_32_be_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_32_be_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_32_be_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-32-be',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
miyazaki-tm/aoj | refs/heads/master | Volume0/0043.py | 2 | """
Puzzle
"""
def solve(hand, count, janto):
if count == 0:
return True
i = [x for x in xrange(9) if hand[x] > 0][0]
if not janto and hand[i] >= 2:
if solve(decrease(hand, [i, i]), count - 2, not janto):
return True
if hand[i] >= 3:
if solve(decrease(hand, [i, i, i]), count - 3, janto):
return True
if i <= 6 and hand[i] >= 1 and hand[i + 1] >= 1 and hand[i + 2] >= 1:
if solve(decrease(hand, [i, i + 1, i + 2]), count - 3, janto):
return True
return False
def decrease(hand, remove):
result = hand[:]
for i in remove:
result[i] -= 1
return result
if __name__ == '__main__':
while True:
try:
result = []
hand = [0 for i in xrange(9)]
for x in raw_input():
hand[int(x) - 1] += 1
for x in xrange(9):
if hand[x] < 4:
hand[x] += 1
if solve(hand, sum(hand), False):
result.append(x + 1)
hand[x] -= 1
if len(result) == 0:
result.append(0)
print " ".join(map(str, sorted(result)))
except:
break
|
winnerineast/Origae-6 | refs/heads/master | origae/inference/text/__init__.py | 1 | from __future__ import absolute_import
from .job import TextInferenceJob
__all__ = ['TextInferenceJob']
|
xbmc/xbmc-antiquated | refs/heads/master | xbmc/lib/libPython/Python/Demo/tkinter/guido/brownian.py | 50 | # Brownian motion -- an example of a multi-threaded Tkinter program.
from Tkinter import *
import random
import threading
import time
import sys
WIDTH = 400
HEIGHT = 300
SIGMA = 10
BUZZ = 2
RADIUS = 2
LAMBDA = 10
FILL = 'red'
stop = 0 # Set when main loop exits
def particle(canvas):
r = RADIUS
x = random.gauss(WIDTH/2.0, SIGMA)
y = random.gauss(HEIGHT/2.0, SIGMA)
p = canvas.create_oval(x-r, y-r, x+r, y+r, fill=FILL)
while not stop:
dx = random.gauss(0, BUZZ)
dy = random.gauss(0, BUZZ)
dt = random.expovariate(LAMBDA)
try:
canvas.move(p, dx, dy)
except TclError:
break
time.sleep(dt)
def main():
global stop
root = Tk()
canvas = Canvas(root, width=WIDTH, height=HEIGHT)
canvas.pack(fill='both', expand=1)
np = 30
if sys.argv[1:]:
np = int(sys.argv[1])
for i in range(np):
t = threading.Thread(target=particle, args=(canvas,))
t.start()
try:
root.mainloop()
finally:
stop = 1
main()
|
Microsoft/PTVS | refs/heads/master | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32comext/axdebug/stackframe.py | 11 | """Support for stack-frames.
Provides Implements a nearly complete wrapper for a stack frame.
"""
import sys
from .util import _wrap, RaiseNotImpl
import expressions, gateways, axdebug, winerror
import pythoncom
from win32com.server.exception import COMException
from .util import trace
#def trace(*args):
# pass
class EnumDebugStackFrames(gateways.EnumDebugStackFrames):
"""A class that given a debugger object, can return an enumerator
of DebugStackFrame objects.
"""
def __init__(self, debugger):
infos = []
frame = debugger.currentframe
# print "Stack check"
while frame:
# print " Checking frame", frame.f_code.co_filename, frame.f_lineno-1, frame.f_trace,
# Get a DebugCodeContext for the stack frame. If we fail, then it
# is not debuggable, and therefore not worth displaying.
cc = debugger.codeContainerProvider.FromFileName(frame.f_code.co_filename)
if cc is not None:
try:
address = frame.f_locals['__axstack_address__']
except KeyError:
# print "Couldnt find stack address for",frame.f_code.co_filename, frame.f_lineno-1
# Use this one, even tho it is wrong :-(
address = axdebug.GetStackAddress()
frameInfo = DebugStackFrame(frame, frame.f_lineno-1, cc), address, address+1, 0, None
infos.append(frameInfo)
# print "- Kept!"
# else:
# print "- rejected"
frame = frame.f_back
gateways.EnumDebugStackFrames.__init__(self, infos, 0)
# def __del__(self):
# print "EnumDebugStackFrames dieing"
def Next(self, count):
return gateways.EnumDebugStackFrames.Next(self, count)
# def _query_interface_(self, iid):
# from win32com.util import IIDToInterfaceName
# print "EnumDebugStackFrames QI with %s (%s)" % (IIDToInterfaceName(iid), str(iid))
# return 0
def _wrap(self, obj):
# This enum returns a tuple, with 2 com objects in it.
obFrame, min, lim, fFinal, obFinal = obj
obFrame = _wrap(obFrame, axdebug.IID_IDebugStackFrame)
if obFinal:
obFinal = _wrap(obFinal, pythoncom.IID_IUnknown)
return obFrame, min, lim, fFinal, obFinal
class DebugStackFrame(gateways.DebugStackFrame):
def __init__(self, frame, lineno, codeContainer):
self.frame = frame
self.lineno = lineno
self.codeContainer = codeContainer
self.expressionContext = None
# def __del__(self):
# print "DSF dieing"
def _query_interface_(self, iid):
if iid==axdebug.IID_IDebugExpressionContext:
if self.expressionContext is None:
self.expressionContext = _wrap(expressions.ExpressionContext(self.frame), axdebug.IID_IDebugExpressionContext)
return self.expressionContext
# from win32com.util import IIDToInterfaceName
# print "DebugStackFrame QI with %s (%s)" % (IIDToInterfaceName(iid), str(iid))
return 0
#
# The following need implementation
def GetThread(self):
""" Returns the thread associated with this stack frame.
Result must be a IDebugApplicationThread
"""
RaiseNotImpl("GetThread")
def GetCodeContext(self):
offset = self.codeContainer.GetPositionOfLine(self.lineno)
return self.codeContainer.GetCodeContextAtPosition(offset)
#
# The following are usefully implemented
def GetDescriptionString(self, fLong):
filename = self.frame.f_code.co_filename
s = ""
if 0: #fLong:
s = s + filename
if self.frame.f_code.co_name:
s = s + self.frame.f_code.co_name
else:
s = s + "<lambda>"
return s
def GetLanguageString(self, fLong):
if fLong:
return "Python ActiveX Scripting Engine"
else:
return "Python"
def GetDebugProperty(self):
return _wrap(StackFrameDebugProperty(self.frame), axdebug.IID_IDebugProperty)
class DebugStackFrameSniffer:
_public_methods_ = ["EnumStackFrames"]
_com_interfaces_ = [axdebug.IID_IDebugStackFrameSniffer]
def __init__(self, debugger):
self.debugger = debugger
trace("DebugStackFrameSniffer instantiated")
# def __del__(self):
# print "DSFS dieing"
def EnumStackFrames(self):
trace("DebugStackFrameSniffer.EnumStackFrames called")
return _wrap(EnumDebugStackFrames(self.debugger), axdebug.IID_IEnumDebugStackFrames)
# A DebugProperty for a stack frame.
class StackFrameDebugProperty:
_com_interfaces_ = [axdebug.IID_IDebugProperty]
_public_methods_ = ['GetPropertyInfo', 'GetExtendedInfo', 'SetValueAsString',
'EnumMembers', 'GetParent'
]
def __init__(self, frame):
self.frame = frame
def GetPropertyInfo(self, dwFieldSpec, nRadix):
RaiseNotImpl("StackFrameDebugProperty::GetPropertyInfo")
def GetExtendedInfo(self): ### Note - not in the framework.
RaiseNotImpl("StackFrameDebugProperty::GetExtendedInfo")
def SetValueAsString(self, value, radix):
#
RaiseNotImpl("DebugProperty::SetValueAsString")
def EnumMembers(self, dwFieldSpec, nRadix, iid):
print("EnumMembers", dwFieldSpec, nRadix, iid)
from . import expressions
return expressions.MakeEnumDebugProperty(self.frame.f_locals, dwFieldSpec, nRadix, iid, self.frame)
def GetParent(self):
# return IDebugProperty
RaiseNotImpl("DebugProperty::GetParent")
|
twiest/openshift-tools | refs/heads/stg | openshift/installer/vendored/openshift-ansible-git-2016-04-27/roles/os_firewall/library/os_firewall_manage_iptables.py | 68 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=fixme, missing-docstring
from subprocess import call, check_output
DOCUMENTATION = '''
---
module: os_firewall_manage_iptables
short_description: This module manages iptables rules for a given chain
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
class IpTablesError(Exception):
def __init__(self, msg, cmd, exit_code, output):
super(IpTablesError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
self.output = output
class IpTablesAddRuleError(IpTablesError):
pass
class IpTablesRemoveRuleError(IpTablesError):
pass
class IpTablesSaveError(IpTablesError):
pass
class IpTablesCreateChainError(IpTablesError):
def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
# TODO: impliment rollbacks for any events that where successful and an
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def __init__(self, module):
self.module = module
self.ip_version = module.params['ip_version']
self.check_mode = module.check_mode
self.chain = module.params['chain']
self.create_jump_rule = module.params['create_jump_rule']
self.jump_rule_chain = module.params['jump_rule_chain']
self.cmd = self.gen_cmd()
self.save_cmd = self.gen_save_cmd()
self.output = []
self.changed = False
def save(self):
try:
self.output.append(check_output(self.save_cmd,
stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as ex:
raise IpTablesSaveError(
msg="Failed to save iptables rules",
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def verify_chain(self):
if not self.chain_exists():
self.create_chain()
if self.create_jump_rule and not self.jump_rule_exists():
self.create_jump()
def add_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if not self.rule_exists(rule):
self.verify_chain()
if self.check_mode:
self.changed = True
self.output.append("Create rule for %s %s" % (proto, port))
else:
cmd = self.cmd + ['-A'] + rule
try:
self.output.append(check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create rule for "
"%s %s" % (proto, port),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
def remove_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if self.rule_exists(rule):
if self.check_mode:
self.changed = True
self.output.append("Remove rule for %s %s" % (proto, port))
else:
cmd = self.cmd + ['-D'] + rule
try:
self.output.append(check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesRemoveRuleError(
chain=self.chain,
msg="Failed to remove rule for %s %s" % (proto, port),
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def rule_exists(self, rule):
check_cmd = self.cmd + ['-C'] + rule
return True if call(check_cmd) == 0 else False
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
'-m', proto, '--dport', str(port), '-j', 'ACCEPT']
def create_jump(self):
if self.check_mode:
self.changed = True
self.output.append("Create jump rule for chain %s" % self.chain)
else:
try:
cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
input_rules = [s.split() for s in output.split('\n')]
# Find the last numbered rule
last_rule_num = None
last_rule_target = None
for rule in input_rules[:-1]:
if rule:
try:
last_rule_num = int(rule[0])
except ValueError:
continue
last_rule_target = rule[1]
# Naively assume that if the last row is a REJECT rule, then
# we can add insert our rule right before it, otherwise we
# assume that we can just append the rule.
if (last_rule_num and last_rule_target
and last_rule_target == 'REJECT'):
# insert rule
cmd = self.cmd + ['-I', self.jump_rule_chain,
str(last_rule_num)]
else:
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
self.changed = True
self.output.append(output)
self.save()
except subprocess.CalledProcessError as ex:
if '--line-numbers' in ex.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
msg=("Failed to query existing " +
self.jump_rule_chain +
" rules to determine jump rule location"),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
else:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
msg=("Failed to create jump rule for chain " +
self.chain),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
def create_chain(self):
if self.check_mode:
self.changed = True
self.output.append("Create chain %s" % self.chain)
else:
try:
cmd = self.cmd + ['-N', self.chain]
self.output.append(check_output(cmd,
stderr=subprocess.STDOUT))
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create chain: %s" % self.chain,
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
)
def jump_rule_exists(self):
cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
return True if call(cmd) == 0 else False
def chain_exists(self):
cmd = self.cmd + ['-L', self.chain]
return True if call(cmd) == 0 else False
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
return ["/usr/sbin/%s" % cmd]
def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
action=dict(required=True, choices=['add', 'remove',
'verify_chain']),
chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
create_jump_rule=dict(required=False, type='bool', default=True),
jump_rule_chain=dict(required=False, default='INPUT'),
protocol=dict(required=False, choices=['tcp', 'udp']),
port=dict(required=False, type='int'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
supports_check_mode=True
)
action = module.params['action']
protocol = module.params['protocol']
port = module.params['port']
if action in ['add', 'remove']:
if not protocol:
error = "protocol is required when action is %s" % action
module.fail_json(msg=error)
if not port:
error = "port is required when action is %s" % action
module.fail_json(msg=error)
iptables_manager = IpTablesManager(module)
try:
if action == 'add':
iptables_manager.add_rule(port, protocol)
elif action == 'remove':
iptables_manager.remove_rule(port, protocol)
elif action == 'verify_chain':
iptables_manager.verify_chain()
except IpTablesError as ex:
module.fail_json(msg=ex.msg)
return module.exit_json(changed=iptables_manager.changed,
output=iptables_manager.output)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
harikrishnakanchi/jeeves | refs/heads/master | grammifier/__init__.py | 12133432 | |
filias/django | refs/heads/master | tests/i18n/other/__init__.py | 12133432 | |
jaeilepp/eggie | refs/heads/master | ui/__init__.py | 12133432 | |
Maccimo/intellij-community | refs/heads/master | python/testData/completion/relativeFromImportInNamespacePackage/nspkg1/nspkg2/bar.py | 12133432 | |
harshilasu/GraphicMelon | refs/heads/master | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/cloudfront/identity.py | 170 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
class OriginAccessIdentity(object):
def __init__(self, connection=None, config=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.config = config
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
if name == 'CloudFrontOriginAccessIdentityConfig':
self.config = OriginAccessIdentityConfig()
return self.config
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def update(self, comment=None):
new_config = OriginAccessIdentityConfig(self.connection,
self.config.caller_reference,
self.config.comment)
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config)
self.config = new_config
def delete(self):
return self.connection.delete_origin_access_identity(self.id, self.etag)
def uri(self):
return 'origin-access-identity/cloudfront/%s' % self.id
class OriginAccessIdentityConfig(object):
def __init__(self, connection=None, caller_reference='', comment=''):
self.connection = connection
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.comment = comment
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<CloudFrontOriginAccessIdentityConfig xmlns="http://cloudfront.amazonaws.com/doc/2009-09-09/">\n'
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += '</CloudFrontOriginAccessIdentityConfig>\n'
return s
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Comment':
self.comment = value
elif name == 'CallerReference':
self.caller_reference = value
else:
setattr(self, name, value)
class OriginAccessIdentitySummary(object):
def __init__(self, connection=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def get_origin_access_identity(self):
return self.connection.get_origin_access_identity_info(self.id)
|
alvin319/CarnotKE | refs/heads/master | jyhton/lib-python/2.7/plat-mac/Carbon/OSA.py | 81 | from _OSA import *
|
sdodson/openshift-ansible | refs/heads/master | roles/lib_openshift/src/lib/clusterrole.py | 64 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-public-methods
class ClusterRole(Yedit):
''' Class to model an openshift ClusterRole'''
rules_path = "rules"
def __init__(self, name=None, content=None):
''' Constructor for clusterrole '''
if content is None:
content = ClusterRole.builder(name).yaml_dict
super(ClusterRole, self).__init__(content=content)
self.__rules = Rule.parse_rules(self.get(ClusterRole.rules_path)) or []
@property
def rules(self):
return self.__rules
@rules.setter
def rules(self, data):
self.__rules = data
self.put(ClusterRole.rules_path, self.__rules)
def rule_exists(self, inc_rule):
'''attempt to find the inc_rule in the rules list'''
for rule in self.rules:
if rule == inc_rule:
return True
return False
def compare(self, other, verbose=False):
'''compare function for clusterrole'''
for rule in other.rules:
if rule not in self.rules:
if verbose:
print('Rule in other not found in self. [{}]'.format(rule))
return False
for rule in self.rules:
if rule not in other.rules:
if verbose:
print('Rule in self not found in other. [{}]'.format(rule))
return False
return True
@staticmethod
def builder(name='default_clusterrole', rules=None):
'''return a clusterrole with name and/or rules'''
if rules is None:
rules = [{'apiGroups': [""],
'attributeRestrictions': None,
'verbs': [],
'resources': []}]
content = {
'apiVersion': 'v1',
'kind': 'ClusterRole',
'metadata': {'name': '{}'.format(name)},
'rules': rules,
}
return ClusterRole(content=content)
|
bytor99999/vertx-web | refs/heads/master | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py | 169 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=
token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
|
shahar-stratoscale/nova | refs/heads/master | nova/api/openstack/compute/contrib/flavormanage.py | 8 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute import flavors as flavors_api
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'flavormanage')
class FlavorManageController(wsgi.Controller):
"""The Flavor Lifecycle API controller for the OpenStack API."""
_view_builder_class = flavors_view.ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
return webob.Response(status_int=202)
@wsgi.action("create")
@wsgi.serializers(xml=flavors_api.FlavorTemplate)
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'flavor'):
msg = _("Invalid request body")
raise webob.exc.HTTPBadRequest(explanation=msg)
vals = body['flavor']
name = vals.get('name')
flavorid = vals.get('id')
memory = vals.get('ram')
vcpus = vals.get('vcpus')
root_gb = vals.get('disk')
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
req.cache_db_flavor(flavor)
except (exception.FlavorExists,
exception.FlavorIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
return self._view_builder.show(req, flavor)
class Flavormanage(extensions.ExtensionDescriptor):
"""Flavor create/delete API support."""
name = "FlavorManage"
alias = "os-flavor-manage"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_manage/api/v1.1")
updated = "2012-01-19T00:00:00+00:00"
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
|
52-41-4d/fs-master | refs/heads/master | spec/spec_base.py | 4 | import unittest
import fslib.common as fscommon
class FsTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''Set up logging; turn on debug messages'''
fscommon.setup_logger(None, True)
|
doom369/FrameworkBenchmarks | refs/heads/master | frameworks/Python/turbogears/app.py | 51 | from functools import partial
from operator import attrgetter
import os
from random import randint
import sys
import json
from jinja2 import Environment, PackageLoader
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from tg import expose, TGController, AppConfig
from models.Fortune import Fortune
from models.World import World
DBDRIVER = 'mysql'
DBHOSTNAME = os.environ.get('DBHOST', 'localhost')
DATABASE_URI = '%s://benchmarkdbuser:benchmarkdbpass@%s:3306/hello_world?charset=utf8' % (DBDRIVER, DBHOSTNAME)
db_engine = create_engine(DATABASE_URI)
Session = sessionmaker(bind=db_engine)
db_session = Session()
env = Environment(loader=PackageLoader("app", "templates"), autoescape=True, auto_reload=False)
def getQueryNum(queryString):
try:
num_queries = int(queryString)
if num_queries < 1:
return 1
if num_queries > 500:
return 500
return num_queries
except ValueError:
return 1
class RootController(TGController):
@expose(content_type="text/plain")
def plaintext(self):
return "Hello, World!"
@expose("json")
def json(self):
return {"message": "Hello, World!"}
@expose("json")
def db(self):
wid = randint(1, 10000)
world = db_session.query(World).get(wid).serialize()
return world
@expose("json")
def updates(self, queries=1):
num_queries = getQueryNum(queries)
worlds = []
rp = partial(randint, 1, 10000)
ids = [rp() for _ in xrange(num_queries)]
ids.sort()
for id in ids:
world = db_session.query(World).get(id)
world.randomNumber = rp()
worlds.append(world.serialize())
db_session.commit()
return json.dumps(worlds)
@expose("json")
def queries(self, queries=1):
num_queries = getQueryNum(queries)
rp = partial(randint, 1, 10000)
get = db_session.query(World).get
worlds = [get(rp()).serialize() for _ in xrange(num_queries)]
return json.dumps(worlds)
@expose()
def fortune(self):
fortunes = db_session.query(Fortune).all()
fortunes.append(Fortune(id=0, message="Additional fortune added at request time."))
fortunes.sort(key=attrgetter("message"))
template = env.get_template("fortunes.html")
return template.render(fortunes=fortunes)
config = AppConfig(minimal=True, root_controller=RootController())
config.renderers.append("jinja")
tg_app = config.make_wsgi_app()
def app(env, start):
try:
return tg_app(env, start)
finally:
db_session.close()
|
sestrella/ansible | refs/heads/devel | test/units/modules/network/iosxr/test_iosxr_command.py | 54 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.iosxr import iosxr_command
from units.modules.utils import set_module_args
from .iosxr_module import TestIosxrModule, load_fixture
class TestIosxrCommandModule(TestIosxrModule):
module = iosxr_command
def setUp(self):
super(TestIosxrCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.iosxr.iosxr_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestIosxrCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
command = item['command']
except Exception:
command = item
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_iosxr_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Cisco IOS XR Software'))
def test_iosxr_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Cisco IOS XR Software'))
def test_iosxr_command_wait_for(self):
wait_for = 'result[0] contains "Cisco IOS"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_iosxr_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_iosxr_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_iosxr_command_match_any(self):
wait_for = ['result[0] contains "Cisco IOS"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_iosxr_command_match_all(self):
wait_for = ['result[0] contains "Cisco IOS"',
'result[0] contains "XR Software"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_iosxr_command_match_all_failure(self):
wait_for = ['result[0] contains "Cisco IOS"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
|
neopoly/rubyfox-server | refs/heads/master | lib/rubyfox/server/data/lib/Lib/test/zxjdbc/sptest.py | 5 |
# Jython Database Specification API 2.0
#
# $Id: sptest.py 2101 2002-05-10 16:11:41Z bzimmer $
#
# Copyright (c) 2001 brian zimmer <bzimmer@ziclix.com>
from zxtest import zxCoreTestCase
class OracleSPTest(zxCoreTestCase):
def setUp(self):
zxCoreTestCase.setUp(self)
c = self.cursor()
try:
try:
c.execute("drop table sptest")
except:
self.db.rollback()
try:
c.execute("create table sptest (x varchar2(20))")
c.execute("create or replace procedure procnone is begin insert into sptest values ('testing'); end;")
c.execute("create or replace procedure procin (y in varchar2) is begin insert into sptest values (y); end;")
c.execute("create or replace procedure procout (y out varchar2) is begin y := 'tested'; end;")
c.execute("create or replace procedure procinout (y out varchar2, z in varchar2) is begin insert into sptest values (z); y := 'tested'; end;")
c.execute("create or replace function funcnone return varchar2 is begin return 'tested'; end;")
c.execute("create or replace function funcin (y varchar2) return varchar2 is begin return y || y; end;")
c.execute("create or replace function funcout (y out varchar2) return varchar2 is begin y := 'tested'; return 'returned'; end;")
self.db.commit()
except:
self.db.rollback()
self.fail("procedure creation failed")
self.proc_errors("PROC")
self.proc_errors("FUNC")
finally:
c.close()
def tearDown(self):
zxCoreTestCase.tearDown(self)
def proc_errors(self, name):
c = self.cursor()
try:
c.execute("select * from user_errors where name like '%s%%'" % (name.upper()))
errors = c.fetchall()
try:
assert not errors, "found errors"
except AssertionError, e:
print "printing errors:"
for a in errors:
print a
raise e
finally:
c.close()
def testCursor(self):
c = self.cursor()
try:
c.execute("insert into sptest values ('a')")
c.execute("insert into sptest values ('b')")
c.execute("insert into sptest values ('c')")
c.execute("insert into sptest values ('d')")
c.execute("insert into sptest values ('e')")
c.execute("""
CREATE OR REPLACE PACKAGE types
AS
TYPE ref_cursor IS REF CURSOR;
END;
""")
c.execute("""
CREATE OR REPLACE FUNCTION funccur(v_x IN VARCHAR)
RETURN types.ref_cursor
AS
funccur_cursor types.ref_cursor;
BEGIN
OPEN funccur_cursor FOR
SELECT x FROM sptest WHERE x < v_x;
RETURN funccur_cursor;
END;
""")
self.proc_errors("funccur")
c.callproc("funccur", ("z",))
data = c.fetchall()
self.assertEquals(5, len(data))
c.callproc("funccur", ("c",))
data = c.fetchall()
self.assertEquals(2, len(data))
finally:
c.close()
def testProcin(self):
c = self.cursor()
try:
params = ["testProcin"]
c.callproc("procin", params)
self.assertEquals([], c.fetchall())
c.execute("select * from sptest")
self.assertEquals(1, len(c.fetchall()))
finally:
c.close()
def testProcinout(self):
c = self.cursor()
try:
params = [None, "testing"]
c.callproc("procinout", params)
data = c.fetchone()
assert data is None, "data was not None"
c.execute("select * from sptest")
data = c.fetchone()
self.assertEquals("testing", data[0])
self.assertEquals("tested", params[0])
finally:
c.close()
def testFuncnone(self):
c = self.cursor()
try:
c.callproc("funcnone")
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("tested", data[0])
finally:
c.close()
def testFuncin(self):
c = self.cursor()
try:
params = ["testing"]
c.callproc("funcin", params)
self.assertEquals(1, c.rowcount)
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("testingtesting", data[0])
finally:
c.close()
def testCallingWithKws(self):
c = self.cursor()
try:
params = ["testing"]
c.callproc("funcin", params=params)
self.assertEquals(1, c.rowcount)
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("testingtesting", data[0])
finally:
c.close()
def testFuncout(self):
c = self.cursor()
try:
params = [None]
c.callproc("funcout", params)
data = c.fetchone()
assert data is not None, "data was None"
self.assertEquals(1, len(data))
self.assertEquals("returned", data[0])
self.assertEquals("tested", params[0].strip())
finally:
c.close()
def testMultipleFetch(self):
"""testing the second fetch call to a callproc() is None"""
c = self.cursor()
try:
c.callproc("funcnone")
data = c.fetchone()
assert data is not None, "data was None"
data = c.fetchone()
assert data is None, "data was not None"
finally:
c.close()
class SQLServerSPTest(zxCoreTestCase):
def testProcWithResultSet(self):
c = self.cursor()
try:
for a in (("table", "sptest"), ("procedure", "sp_proctest")):
try:
c.execute("drop %s %s" % (a))
except:
pass
c.execute("create table sptest (a int, b varchar(32))")
c.execute("insert into sptest values (1, 'hello')")
c.execute("insert into sptest values (2, 'there')")
c.execute("insert into sptest values (3, 'goodbye')")
c.execute(""" create procedure sp_proctest (@A int) as select a, b from sptest where a <= @A """)
self.db.commit()
c.callproc("sp_proctest", (2,))
data = c.fetchall()
self.assertEquals(2, len(data))
self.assertEquals(2, len(c.description))
assert c.nextset() is not None, "expected an additional result set"
data = c.fetchall()
self.assertEquals(1, len(data))
self.assertEquals(1, len(c.description))
finally:
c.close()
# def testSalesByCategory(self):
# c = self.cursor()
# try:
# c.execute("use northwind")
# c.callproc(("northwind", "dbo", "SalesByCategory"), ["Seafood", "1998"])
# data = c.fetchall()
# assert data is not None, "no results from SalesByCategory"
# assert len(data) > 0, "expected numerous results"
# finally:
# c.close()
|
spacy-io/spaCy | refs/heads/master | spacy/tests/matcher/test_pattern_validation.py | 2 | import pytest
from spacy.matcher import Matcher
from spacy.errors import MatchPatternError
from spacy.schemas import validate_token_pattern
# (pattern, num errors with validation, num errors identified with minimal
# checks)
TEST_PATTERNS = [
# Bad patterns flagged in all cases
([{"XX": "foo"}], 1, 1),
([{"IS_ALPHA": {"==": True}}, {"LIKE_NUM": None}], 2, 1),
([{"IS_PUNCT": True, "OP": "$"}], 1, 1),
([{"_": "foo"}], 1, 1),
('[{"TEXT": "foo"}, {"LOWER": "bar"}]', 1, 1),
([1, 2, 3], 3, 1),
# Bad patterns flagged outside of Matcher
([{"_": {"foo": "bar", "baz": {"IN": "foo"}}}], 2, 0), # prev: (1, 0)
# Bad patterns not flagged with minimal checks
([{"LENGTH": "2", "TEXT": 2}, {"LOWER": "test"}], 2, 0),
([{"LENGTH": {"IN": [1, 2, "3"]}}, {"POS": {"IN": "VERB"}}], 4, 0), # prev: (2, 0)
([{"LENGTH": {"VALUE": 5}}], 2, 0), # prev: (1, 0)
([{"TEXT": {"VALUE": "foo"}}], 2, 0), # prev: (1, 0)
([{"IS_DIGIT": -1}], 1, 0),
([{"ORTH": -1}], 1, 0),
# Good patterns
([{"TEXT": "foo"}, {"LOWER": "bar"}], 0, 0),
([{"LEMMA": {"IN": ["love", "like"]}}, {"POS": "DET", "OP": "?"}], 0, 0),
([{"LIKE_NUM": True, "LENGTH": {">=": 5}}], 0, 0),
([{"LENGTH": 2}], 0, 0),
([{"LOWER": {"REGEX": "^X", "NOT_IN": ["XXX", "XY"]}}], 0, 0),
([{"NORM": "a"}, {"POS": {"IN": ["NOUN"]}}], 0, 0),
([{"_": {"foo": {"NOT_IN": ["bar", "baz"]}, "a": 5, "b": {">": 10}}}], 0, 0),
([{"orth": "foo"}], 0, 0), # prev: xfail
([{"IS_SENT_START": True}], 0, 0),
([{"SENT_START": True}], 0, 0),
]
@pytest.mark.parametrize(
"pattern", [[{"XX": "y"}, {"LENGTH": "2"}, {"TEXT": {"IN": 5}}]]
)
def test_matcher_pattern_validation(en_vocab, pattern):
matcher = Matcher(en_vocab, validate=True)
with pytest.raises(MatchPatternError):
matcher.add("TEST", [pattern])
@pytest.mark.parametrize("pattern,n_errors,_", TEST_PATTERNS)
def test_pattern_validation(pattern, n_errors, _):
errors = validate_token_pattern(pattern)
assert len(errors) == n_errors
@pytest.mark.parametrize("pattern,n_errors,n_min_errors", TEST_PATTERNS)
def test_minimal_pattern_validation(en_vocab, pattern, n_errors, n_min_errors):
matcher = Matcher(en_vocab)
if n_min_errors > 0:
with pytest.raises(ValueError):
matcher.add("TEST", [pattern])
elif n_errors == 0:
matcher.add("TEST", [pattern])
def test_pattern_errors(en_vocab):
matcher = Matcher(en_vocab)
# normalize "regex" to upper like "text"
matcher.add("TEST1", [[{"text": {"regex": "regex"}}]])
# error if subpattern attribute isn't recognized and processed
with pytest.raises(MatchPatternError):
matcher.add("TEST2", [[{"TEXT": {"XX": "xx"}}]])
|
arengela/AngelaUCSFCodeAll | refs/heads/master | libsvm-3.12/tools/grid.py | 20 | #!/usr/bin/env python
import os, sys, traceback
import getpass
from threading import Thread
from subprocess import *
if(sys.hexversion < 0x03000000):
import Queue
else:
import queue as Queue
# svmtrain and gnuplot executable
is_win32 = (sys.platform == 'win32')
if not is_win32:
svmtrain_exe = "../svm-train"
gnuplot_exe = "/usr/bin/gnuplot"
else:
# example for windows
svmtrain_exe = r"..\windows\svm-train.exe"
# svmtrain_exe = r"c:\Program Files\libsvm\windows\svm-train.exe"
gnuplot_exe = r"c:\tmp\gnuplot\binary\pgnuplot.exe"
# global parameters and their default values
fold = 5
c_begin, c_end, c_step = -5, 15, 2
g_begin, g_end, g_step = 3, -15, -2
global dataset_pathname, dataset_title, pass_through_string
global out_filename, png_filename
# experimental
telnet_workers = []
ssh_workers = []
nr_local_worker = 1
# process command line options, set global parameters
def process_options(argv=sys.argv):
global fold
global c_begin, c_end, c_step
global g_begin, g_end, g_step
global dataset_pathname, dataset_title, pass_through_string
global svmtrain_exe, gnuplot_exe, gnuplot, out_filename, png_filename
usage = """\
Usage: grid.py [-log2c begin,end,step] [-log2g begin,end,step] [-v fold]
[-svmtrain pathname] [-gnuplot pathname] [-out pathname] [-png pathname]
[additional parameters for svm-train] dataset"""
if len(argv) < 2:
print(usage)
sys.exit(1)
dataset_pathname = argv[-1]
dataset_title = os.path.split(dataset_pathname)[1]
out_filename = '{0}.out'.format(dataset_title)
png_filename = '{0}.png'.format(dataset_title)
pass_through_options = []
i = 1
while i < len(argv) - 1:
if argv[i] == "-log2c":
i = i + 1
(c_begin,c_end,c_step) = map(float,argv[i].split(","))
elif argv[i] == "-log2g":
i = i + 1
(g_begin,g_end,g_step) = map(float,argv[i].split(","))
elif argv[i] == "-v":
i = i + 1
fold = argv[i]
elif argv[i] in ('-c','-g'):
print("Option -c and -g are renamed.")
print(usage)
sys.exit(1)
elif argv[i] == '-svmtrain':
i = i + 1
svmtrain_exe = argv[i]
elif argv[i] == '-gnuplot':
i = i + 1
gnuplot_exe = argv[i]
elif argv[i] == '-out':
i = i + 1
out_filename = argv[i]
elif argv[i] == '-png':
i = i + 1
png_filename = argv[i]
else:
pass_through_options.append(argv[i])
i = i + 1
pass_through_string = " ".join(pass_through_options)
assert os.path.exists(svmtrain_exe),"svm-train executable not found"
assert os.path.exists(gnuplot_exe),"gnuplot executable not found"
assert os.path.exists(dataset_pathname),"dataset not found"
gnuplot = Popen(gnuplot_exe,stdin = PIPE).stdin
def range_f(begin,end,step):
# like range, but works on non-integer too
seq = []
while True:
if step > 0 and begin > end: break
if step < 0 and begin < end: break
seq.append(begin)
begin = begin + step
return seq
def permute_sequence(seq):
n = len(seq)
if n <= 1: return seq
mid = int(n/2)
left = permute_sequence(seq[:mid])
right = permute_sequence(seq[mid+1:])
ret = [seq[mid]]
while left or right:
if left: ret.append(left.pop(0))
if right: ret.append(right.pop(0))
return ret
def redraw(db,best_param,tofile=False):
if len(db) == 0: return
begin_level = round(max(x[2] for x in db)) - 3
step_size = 0.5
best_log2c,best_log2g,best_rate = best_param
# if newly obtained c, g, or cv values are the same,
# then stop redrawing the contour.
if all(x[0] == db[0][0] for x in db): return
if all(x[1] == db[0][1] for x in db): return
if all(x[2] == db[0][2] for x in db): return
if tofile:
gnuplot.write(b"set term png transparent small linewidth 2 medium enhanced\n")
gnuplot.write("set output \"{0}\"\n".format(png_filename.replace('\\','\\\\')).encode())
#gnuplot.write(b"set term postscript color solid\n")
#gnuplot.write("set output \"{0}.ps\"\n".format(dataset_title).encode().encode())
elif is_win32:
gnuplot.write(b"set term windows\n")
else:
gnuplot.write( b"set term x11\n")
gnuplot.write(b"set xlabel \"log2(C)\"\n")
gnuplot.write(b"set ylabel \"log2(gamma)\"\n")
gnuplot.write("set xrange [{0}:{1}]\n".format(c_begin,c_end).encode())
gnuplot.write("set yrange [{0}:{1}]\n".format(g_begin,g_end).encode())
gnuplot.write(b"set contour\n")
gnuplot.write("set cntrparam levels incremental {0},{1},100\n".format(begin_level,step_size).encode())
gnuplot.write(b"unset surface\n")
gnuplot.write(b"unset ztics\n")
gnuplot.write(b"set view 0,0\n")
gnuplot.write("set title \"{0}\"\n".format(dataset_title).encode())
gnuplot.write(b"unset label\n")
gnuplot.write("set label \"Best log2(C) = {0} log2(gamma) = {1} accuracy = {2}%\" \
at screen 0.5,0.85 center\n". \
format(best_log2c, best_log2g, best_rate).encode())
gnuplot.write("set label \"C = {0} gamma = {1}\""
" at screen 0.5,0.8 center\n".format(2**best_log2c, 2**best_log2g).encode())
gnuplot.write(b"set key at screen 0.9,0.9\n")
gnuplot.write(b"splot \"-\" with lines\n")
db.sort(key = lambda x:(x[0], -x[1]))
prevc = db[0][0]
for line in db:
if prevc != line[0]:
gnuplot.write(b"\n")
prevc = line[0]
gnuplot.write("{0[0]} {0[1]} {0[2]}\n".format(line).encode())
gnuplot.write(b"e\n")
gnuplot.write(b"\n") # force gnuplot back to prompt when term set failure
gnuplot.flush()
def calculate_jobs():
c_seq = permute_sequence(range_f(c_begin,c_end,c_step))
g_seq = permute_sequence(range_f(g_begin,g_end,g_step))
nr_c = float(len(c_seq))
nr_g = float(len(g_seq))
i = 0
j = 0
jobs = []
while i < nr_c or j < nr_g:
if i/nr_c < j/nr_g:
# increase C resolution
line = []
for k in range(0,j):
line.append((c_seq[i],g_seq[k]))
i = i + 1
jobs.append(line)
else:
# increase g resolution
line = []
for k in range(0,i):
line.append((c_seq[k],g_seq[j]))
j = j + 1
jobs.append(line)
return jobs
class WorkerStopToken: # used to notify the worker to stop
pass
class Worker(Thread):
def __init__(self,name,job_queue,result_queue):
Thread.__init__(self)
self.name = name
self.job_queue = job_queue
self.result_queue = result_queue
def run(self):
while True:
(cexp,gexp) = self.job_queue.get()
if cexp is WorkerStopToken:
self.job_queue.put((cexp,gexp))
# print('worker {0} stop.'.format(self.name))
break
try:
rate = self.run_one(2.0**cexp,2.0**gexp)
if rate is None: raise RuntimeError("get no rate")
except:
# we failed, let others do that and we just quit
traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
self.job_queue.put((cexp,gexp))
print('worker {0} quit.'.format(self.name))
break
else:
self.result_queue.put((self.name,cexp,gexp,rate))
class LocalWorker(Worker):
def run_one(self,c,g):
cmdline = '{0} -c {1} -g {2} -v {3} {4} {5}'.format \
(svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname)
result = Popen(cmdline,shell=True,stdout=PIPE).stdout
for line in result.readlines():
if str(line).find("Cross") != -1:
return float(line.split()[-1][0:-1])
class SSHWorker(Worker):
def __init__(self,name,job_queue,result_queue,host):
Worker.__init__(self,name,job_queue,result_queue)
self.host = host
self.cwd = os.getcwd()
def run_one(self,c,g):
cmdline = 'ssh -x {0} "cd {1}; {2} -c {3} -g {4} -v {5} {6} {7}"'.format \
(self.host,self.cwd, \
svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname)
result = Popen(cmdline,shell=True,stdout=PIPE).stdout
for line in result.readlines():
if str(line).find("Cross") != -1:
return float(line.split()[-1][0:-1])
class TelnetWorker(Worker):
def __init__(self,name,job_queue,result_queue,host,username,password):
Worker.__init__(self,name,job_queue,result_queue)
self.host = host
self.username = username
self.password = password
def run(self):
import telnetlib
self.tn = tn = telnetlib.Telnet(self.host)
tn.read_until("login: ")
tn.write(self.username + "\n")
tn.read_until("Password: ")
tn.write(self.password + "\n")
# XXX: how to know whether login is successful?
tn.read_until(self.username)
#
print('login ok', self.host)
tn.write("cd "+os.getcwd()+"\n")
Worker.run(self)
tn.write("exit\n")
def run_one(self,c,g):
cmdline = '{0} -c {1} -g {2} -v {3} {4} {5}'.format \
(svmtrain_exe,c,g,fold,pass_through_string,dataset_pathname)
result = self.tn.write(cmdline+'\n')
(idx,matchm,output) = self.tn.expect(['Cross.*\n'])
for line in output.split('\n'):
if str(line).find("Cross") != -1:
return float(line.split()[-1][0:-1])
def main():
# set parameters
process_options()
# put jobs in queue
jobs = calculate_jobs()
job_queue = Queue.Queue(0)
result_queue = Queue.Queue(0)
for line in jobs:
for (c,g) in line:
job_queue.put((c,g))
# hack the queue to become a stack --
# this is important when some thread
# failed and re-put a job. It we still
# use FIFO, the job will be put
# into the end of the queue, and the graph
# will only be updated in the end
job_queue._put = job_queue.queue.appendleft
# fire telnet workers
if telnet_workers:
nr_telnet_worker = len(telnet_workers)
username = getpass.getuser()
password = getpass.getpass()
for host in telnet_workers:
TelnetWorker(host,job_queue,result_queue,
host,username,password).start()
# fire ssh workers
if ssh_workers:
for host in ssh_workers:
SSHWorker(host,job_queue,result_queue,host).start()
# fire local workers
for i in range(nr_local_worker):
LocalWorker('local',job_queue,result_queue).start()
# gather results
done_jobs = {}
result_file = open(out_filename, 'w')
db = []
best_rate = -1
best_c1,best_g1 = None,None
for line in jobs:
for (c,g) in line:
while (c, g) not in done_jobs:
(worker,c1,g1,rate) = result_queue.get()
done_jobs[(c1,g1)] = rate
result_file.write('{0} {1} {2}\n'.format(c1,g1,rate))
result_file.flush()
if (rate > best_rate) or (rate==best_rate and g1==best_g1 and c1<best_c1):
best_rate = rate
best_c1,best_g1=c1,g1
best_c = 2.0**c1
best_g = 2.0**g1
print("[{0}] {1} {2} {3} (best c={4}, g={5}, rate={6})".format \
(worker,c1,g1,rate, best_c, best_g, best_rate))
db.append((c,g,done_jobs[(c,g)]))
redraw(db,[best_c1, best_g1, best_rate])
redraw(db,[best_c1, best_g1, best_rate],True)
job_queue.put((WorkerStopToken,None))
print("{0} {1} {2}".format(best_c, best_g, best_rate))
main()
|
ropik/chromium | refs/heads/master | chrome/PRESUBMIT.py | 14 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting chrome/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitplatformsupport_impl\.cc$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def _CheckChangeLintsClean(input_api, output_api):
"""Makes sure that the chrome/ code is cpplint clean."""
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
return input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources)
def _CheckNoContentUnitTestsInChrome(input_api, output_api):
"""Makes sure that no unit tests from content/ are included in unit_tests."""
problems = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('chrome_tests.gypi'):
continue
for line_num, line in f.ChangedContents():
m = re.search(r"'(.*\/content\/.*unittest.*)'", line)
if m:
problems.append(m.group(1))
if not problems:
return []
return [output_api.PresubmitPromptWarning(
'Unit tests located in content/ should be added to the ' +
'content_tests.gypi:content_unittests target.',
items=problems)]
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoContentUnitTestsInChrome(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLintsClean(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
|
KyleAMoore/KanjiNani | refs/heads/master | Android/.buildozer/android/platform/build/build/python-installs/KanjiNani/usr/local/share/kivy-examples/widgets/colorusage.py | 21 | from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
Builder.load_string("""
#:import hex kivy.utils.get_color_from_hex
<Root>:
cols: 2
canvas:
Color:
rgba: 1, 1, 1, 1
Rectangle:
pos: self.pos
size: self.size
Label:
canvas.before:
Color:
rgb: 39/255., 174/255., 96/255.
Rectangle:
pos: self.pos
size: self.size
text: "rgb: 39/255., 174/255., 96/255."
Label:
canvas.before:
Color:
rgba: 39/255., 174/255., 96/255., 1
Rectangle:
pos: self.pos
size: self.size
text: "rgba: 39/255., 174/255., 96/255., 1"
Label:
canvas.before:
Color:
hsv: 145/360., 77.6/100, 68.2/100
Rectangle:
pos: self.pos
size: self.size
text: "hsv: 145/360., 77.6/100, 68.2/100"
Label:
canvas.before:
Color:
rgba: hex('#27ae60')
Rectangle:
pos: self.pos
size: self.size
text: "rgba: hex('#27ae60')"
""")
class Root(GridLayout):
pass
class ColorusageApp(App):
def build(self):
return Root()
if __name__ == "__main__":
ColorusageApp().run()
|
MuSystemsAnalysis/craigslist_area_search | refs/heads/master | checkPage.py | 1 | #!/usr/bin/python
# coding: utf-8
import urllib.request
import re
def thereAreResults(searchUrl):
#Create the soup object from the HTML data
page = urllib.request.urlopen(searchUrl).read()
if not re.findall(b'no results', page):
#print("There was something there.")
return True
if re.findall(b'no results', page):
#print("No results were found.")
return False
if __name__=="__main__":
argh = 'http://elmira.craigslist.org/search/sss?sort=rel&query=gimp%20suits'
thereAreResults( argh )
|
errx/django | refs/heads/master | tests/utils_tests/test_safestring.py | 29 | from __future__ import unicode_literals
from django.template import Template, Context
from django.test import TestCase
from django.utils.encoding import force_text, force_bytes
from django.utils.functional import lazy
from django.utils.safestring import mark_safe, mark_for_escaping, SafeData, EscapeData
from django.utils import six
lazystr = lazy(force_text, six.text_type)
lazybytes = lazy(force_bytes, bytes)
class SafeStringTest(TestCase):
def assertRenderEqual(self, tpl, expected, **context):
context = Context(context)
tpl = Template(tpl)
self.assertEqual(tpl.render(context), expected)
def test_mark_safe(self):
s = mark_safe('a&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
self.assertRenderEqual('{{ s|force_escape }}', 'a&b', s=s)
def test_mark_safe_lazy(self):
s = lazystr('a&b')
b = lazybytes(b'a&b')
self.assertIsInstance(mark_safe(s), SafeData)
self.assertIsInstance(mark_safe(b), SafeData)
self.assertRenderEqual('{{ s }}', 'a&b', s=mark_safe(s))
def test_mark_for_escaping(self):
s = mark_for_escaping('a&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
self.assertRenderEqual('{{ s }}', 'a&b', s=mark_for_escaping(s))
def test_mark_for_escaping_lazy(self):
s = lazystr('a&b')
b = lazybytes(b'a&b')
self.assertIsInstance(mark_for_escaping(s), EscapeData)
self.assertIsInstance(mark_for_escaping(b), EscapeData)
self.assertRenderEqual('{% autoescape off %}{{ s }}{% endautoescape %}', 'a&b', s=mark_for_escaping(s))
def test_html(self):
s = '<h1>interop</h1>'
self.assertEqual(s, mark_safe(s).__html__())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.