repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
westinedu/wrgroups | refs/heads/master | django/contrib/sitemaps/tests/urls.py | 233 | from datetime import datetime
from django.conf.urls.defaults import *
from django.contrib.sitemaps import Sitemap, GenericSitemap, FlatPageSitemap
from django.contrib.auth.models import User
class SimpleSitemap(Sitemap):
changefreq = "never"
priority = 0.5
location = '/location/'
lastmod = datetime.now()
def items(self):
return [object()]
simple_sitemaps = {
'simple': SimpleSitemap,
}
generic_sitemaps = {
'generic': GenericSitemap({
'queryset': User.objects.all()
}),
}
flatpage_sitemaps = {
'flatpages': FlatPageSitemap,
}
urlpatterns = patterns('django.contrib.sitemaps.views',
(r'^simple/index\.xml$', 'index', {'sitemaps': simple_sitemaps}),
(r'^simple/custom-index\.xml$', 'index', {'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap_index.xml'}),
(r'^simple/sitemap-(?P<section>.+)\.xml$', 'sitemap', {'sitemaps': simple_sitemaps}),
(r'^simple/sitemap\.xml$', 'sitemap', {'sitemaps': simple_sitemaps}),
(r'^simple/custom-sitemap\.xml$', 'sitemap', {'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap.xml'}),
(r'^generic/sitemap\.xml$', 'sitemap', {'sitemaps': generic_sitemaps}),
(r'^flatpages/sitemap\.xml$', 'sitemap', {'sitemaps': flatpage_sitemaps}),
)
|
guillermooo/dart-sublime-bundle | refs/heads/master | execute.py | 3 | # Copyright (c) 2014, Guillermo López-Anglada. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
'''Our own ST `exec` command.
Mostly lifted from Default.exec.py
'''
import sublime
import sublime_plugin
import os
import functools
import time
from Default.exec import ProcessListener
from Default.exec import AsyncProcess
from Dart.sublime_plugin_lib.sublime import after
from Dart.sublime_plugin_lib.panels import OutputPanel
class DartExecCommand(sublime_plugin.WindowCommand, ProcessListener):
def run(self,
cmd=None,
shell_cmd=None,
file_regex="",
line_regex="",
working_dir="",
encoding="utf-8",
env={},
quiet=False,
kill=False,
word_wrap=True,
syntax='',
preamble='',
panel_name='dart.out',
# Catches "path" and "shell"
**kwargs):
if kill:
if hasattr(self, 'proc') and self.proc:
self.proc.kill()
self.proc = None
self.append_string(None, "[Cancelled]")
return
# TODO(guillermooo): We cannot have multiple processes running at the
# same time, or processes that use separate output panels.
if not hasattr(self, 'out_panel'):
# Try not to call get_output_panel until the regexes are assigned
self.out_panel = OutputPanel(panel_name)
# Default to the current files directory if no working directory was given
if (not working_dir and
self.window.active_view() and
self.window.active_view().file_name()):
working_dir = os.path.dirname(
self.window.active_view().file_name())
self.out_panel.set('result_file_regex', file_regex)
self.out_panel.set('result_line_regex', line_regex)
self.out_panel.set('result_base_dir', working_dir)
self.out_panel.set('word_wrap', word_wrap)
self.out_panel.set('line_numbers', False)
self.out_panel.set('gutter', False)
self.out_panel.set('rulers', [])
self.out_panel.set('scroll_past_end', False)
self.out_panel.view.assign_syntax(syntax)
self.out_panel.set('color_scheme', '')
self.encoding = encoding
self.quiet = quiet
self.proc = None
if not self.quiet:
if shell_cmd:
print("Running " + shell_cmd)
else:
print("Running " + " ".join(cmd))
sublime.status_message("Building")
if preamble:
self.append_string(self.proc, preamble)
show_panel_on_build = sublime.load_settings(
"Dart - Plugin Settings.sublime-settings").get("show_panel_on_build", True)
if show_panel_on_build:
self.out_panel.show()
merged_env = env.copy()
if self.window.active_view():
user_env = self.window.active_view().settings().get('build_env')
if user_env:
merged_env.update(user_env)
# Change to the working dir, rather than spawning the process with it,
# so that emitted working dir relative path names make sense
if working_dir:
os.chdir(working_dir)
self.debug_text = ""
if shell_cmd:
self.debug_text += "[shell_cmd: " + shell_cmd + "]\n"
else:
self.debug_text += "[cmd: " + str(cmd) + "]\n"
self.debug_text += "[dir: " + str(os.getcwd()) + "]\n"
if "PATH" in merged_env:
self.debug_text += "[path: " + str(merged_env["PATH"]) + "]"
else:
self.debug_text += "[path: " + str(os.environ["PATH"]) + "]"
try:
# Forward kwargs to AsyncProcess
self.proc = AsyncProcess(cmd, shell_cmd, merged_env, self,
**kwargs)
except Exception as e:
self.append_string(None, str(e) + "\n")
self.append_string(None, self.debug_text + "\n")
if not self.quiet:
self.append_string(None, "[Finished]")
def append_data(self, proc, data):
if proc != self.proc:
# a second call to exec has been made before the first one
# finished, ignore it instead of intermingling the output.
if proc:
proc.kill()
return
try:
str_ = data.decode(self.encoding)
except UnicodeEncodeError:
str_ = "[Decode error - output not " + self.encoding + "]\n"
proc = None
return
# Normalize newlines, Sublime Text always uses a single \n separator
# in memory.
str_ = str_.replace('\r\n', '\n').replace('\r', '\n')
self.out_panel.write(str_)
def append_string(self, proc, str):
self.append_data(proc, str.encode(self.encoding))
def finish(self, proc):
if not self.quiet:
elapsed = time.time() - proc.start_time
exit_code = proc.exit_code()
if (exit_code == 0) or (exit_code == None):
self.append_string(proc, "[Finished in %.1fs]" % (elapsed))
else:
self.append_string(proc,
"[Finished in %.1fs with exit code %d]\n" %
(elapsed, exit_code))
self.append_string(proc, self.debug_text)
if proc != self.proc:
return
# XXX: What's this for?
errs = self.out_panel.view.find_all_results()
if len(errs) == 0:
sublime.status_message("Build finished")
else:
sublime.status_message(("Build finished with %d errors") % len(errs))
def on_data(self, proc, data):
after(0, functools.partial(self.append_data, proc, data))
def on_finished(self, proc):
after(0, functools.partial(self.finish, proc))
|
saukrIppl/seahub | refs/heads/master | thirdpart/openpyxl-2.3.0-py2.7.egg/openpyxl/writer/etree_worksheet.py | 13 | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from operator import itemgetter
from openpyxl.compat import safe_string
from openpyxl.xml.functions import xmlfile, Element, SubElement
def get_rows_to_write(worksheet):
"""Return all rows, and any cells that they contain"""
# order cells by row
rows = {}
for (row, col), cell in worksheet._cells.items():
rows.setdefault(row, []).append((col, cell))
# add empty rows if styling has been applied
for row_idx in worksheet.row_dimensions:
if row_idx not in rows:
rows[row_idx] = []
return sorted(rows.items())
def write_rows(xf, worksheet):
"""Write worksheet data to xml."""
all_rows = get_rows_to_write(worksheet)
dims = worksheet.row_dimensions
max_column = worksheet.max_column
with xf.element("sheetData"):
for row_idx, row in all_rows:
attrs = {'r': '%d' % row_idx, 'spans': '1:%d' % max_column}
if row_idx in dims:
row_dimension = dims[row_idx]
attrs.update(dict(row_dimension))
with xf.element("row", attrs):
for col, cell in sorted(row, key=itemgetter(0)):
if cell.value is None and not cell.has_style:
continue
el = write_cell(worksheet, cell, cell.has_style)
xf.write(el)
def write_cell(worksheet, cell, styled=None):
coordinate = cell.coordinate
attributes = {'r': coordinate}
if styled:
attributes['s'] = '%d' % cell.style_id
if cell.data_type != 'f':
attributes['t'] = cell.data_type
value = cell._value
el = Element("c", attributes)
if value is None or value == "":
return el
if cell.data_type == 'f':
shared_formula = worksheet.formula_attributes.get(coordinate, {})
formula = SubElement(el, 'f', shared_formula)
if value is not None:
formula.text = value[1:]
value = None
if cell.data_type == 's':
value = worksheet.parent.shared_strings.add(value)
cell_content = SubElement(el, 'v')
if value is not None:
cell_content.text = safe_string(value)
return el
|
mark-ignacio/phantomjs | refs/heads/master | src/breakpad/src/tools/gyp/test/copies/gyptest-all.py | 137 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies.gyp', test.ALL, chdir='relocate/src')
test.must_match(['relocate', 'src', 'copies-out', 'file1'], 'file1 contents\n')
test.built_file_must_match('copies-out/file2',
'file2 contents\n',
chdir='relocate/src')
test.pass_test()
|
sergiohgz/incubator-airflow | refs/heads/master | airflow/migrations/versions/1b38cef5b76e_add_dagrun.py | 9 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add dagrun
Revision ID: 1b38cef5b76e
Revises: 52d714495f0
Create Date: 2015-10-27 08:31:48.475140
"""
# revision identifiers, used by Alembic.
revision = '1b38cef5b76e'
down_revision = '502898887f84'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('dag_run',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('dag_id', sa.String(length=250), nullable=True),
sa.Column('execution_date', sa.DateTime(), nullable=True),
sa.Column('state', sa.String(length=50), nullable=True),
sa.Column('run_id', sa.String(length=250), nullable=True),
sa.Column('external_trigger', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('dag_id', 'execution_date'),
sa.UniqueConstraint('dag_id', 'run_id'),
)
def downgrade():
op.drop_table('dag_run')
|
StrikeForceZero/PJSip-CSharp | refs/heads/master | tests/pjsua/scripts-sendto/301_srtp0_recv_savp.py | 42 | # $Id: 301_srtp0_recv_savp.py 3713 2011-08-18 18:11:08Z nanang $
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=tester
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/SAVP 0 101
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:WnD7c1ksDGs+dIefCEo8omPg4uO8DYIinNGL5yxQ
a=crypto:2 AES_CM_128_HMAC_SHA1_32 inline:t0r0/apkukU7JjjfR0mY8GEimBq4OiPEm9eKSFOx
"""
args = "--null-audio --auto-answer 200 --max-calls 1 --use-srtp 0 --srtp-secure 0"
include = []
exclude = []
sendto_cfg = sip.SendtoCfg( "Callee has SRTP disabled but receive RTP/SAVP, should reject the call",
pjsua_args=args, sdp=sdp, resp_code=488,
resp_inc=include, resp_exc=exclude)
|
imply/chuu | refs/heads/master | third_party/pexpect/ANSI.py | 171 | """This implements an ANSI (VT100) terminal emulator as a subclass of screen.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# references:
# http://en.wikipedia.org/wiki/ANSI_escape_code
# http://www.retards.org/terminals/vt102.html
# http://vt100.net/docs/vt102-ug/contents.html
# http://vt100.net/docs/vt220-rm/
# http://www.termsys.demon.co.uk/vtansi.htm
import screen
import FSM
import copy
import string
#
# The 'Do.*' functions are helper functions for the ANSI class.
#
def DoEmit (fsm):
screen = fsm.memory[0]
screen.write_ch(fsm.input_symbol)
def DoStartNumber (fsm):
fsm.memory.append (fsm.input_symbol)
def DoBuildNumber (fsm):
ns = fsm.memory.pop()
ns = ns + fsm.input_symbol
fsm.memory.append (ns)
def DoBackOne (fsm):
screen = fsm.memory[0]
screen.cursor_back ()
def DoBack (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_back (count)
def DoDownOne (fsm):
screen = fsm.memory[0]
screen.cursor_down ()
def DoDown (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_down (count)
def DoForwardOne (fsm):
screen = fsm.memory[0]
screen.cursor_forward ()
def DoForward (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_forward (count)
def DoUpReverse (fsm):
screen = fsm.memory[0]
screen.cursor_up_reverse()
def DoUpOne (fsm):
screen = fsm.memory[0]
screen.cursor_up ()
def DoUp (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_up (count)
def DoHome (fsm):
c = int(fsm.memory.pop())
r = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_home (r,c)
def DoHomeOrigin (fsm):
c = 1
r = 1
screen = fsm.memory[0]
screen.cursor_home (r,c)
def DoEraseDown (fsm):
screen = fsm.memory[0]
screen.erase_down()
def DoErase (fsm):
arg = int(fsm.memory.pop())
screen = fsm.memory[0]
if arg == 0:
screen.erase_down()
elif arg == 1:
screen.erase_up()
elif arg == 2:
screen.erase_screen()
def DoEraseEndOfLine (fsm):
screen = fsm.memory[0]
screen.erase_end_of_line()
def DoEraseLine (fsm):
arg = int(fsm.memory.pop())
screen = fsm.memory[0]
if arg == 0:
screen.erase_end_of_line()
elif arg == 1:
screen.erase_start_of_line()
elif arg == 2:
screen.erase_line()
def DoEnableScroll (fsm):
screen = fsm.memory[0]
screen.scroll_screen()
def DoCursorSave (fsm):
screen = fsm.memory[0]
screen.cursor_save_attrs()
def DoCursorRestore (fsm):
screen = fsm.memory[0]
screen.cursor_restore_attrs()
def DoScrollRegion (fsm):
screen = fsm.memory[0]
r2 = int(fsm.memory.pop())
r1 = int(fsm.memory.pop())
screen.scroll_screen_rows (r1,r2)
def DoMode (fsm):
screen = fsm.memory[0]
mode = fsm.memory.pop() # Should be 4
# screen.setReplaceMode ()
def DoLog (fsm):
screen = fsm.memory[0]
fsm.memory = [screen]
fout = open ('log', 'a')
fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n')
fout.close()
class term (screen.screen):
"""This class is an abstract, generic terminal.
This does nothing. This is a placeholder that
provides a common base class for other terminals
such as an ANSI terminal. """
def __init__ (self, r=24, c=80):
screen.screen.__init__(self, r,c)
class ANSI (term):
"""This class implements an ANSI (VT100) terminal.
It is a stream filter that recognizes ANSI terminal
escape sequences and maintains the state of a screen object. """
def __init__ (self, r=24,c=80):
term.__init__(self,r,c)
#self.screen = screen (24,80)
self.state = FSM.FSM ('INIT',[self])
self.state.set_default_transition (DoLog, 'INIT')
self.state.add_transition_any ('INIT', DoEmit, 'INIT')
self.state.add_transition ('\x1b', 'INIT', None, 'ESC')
self.state.add_transition_any ('ESC', DoLog, 'INIT')
self.state.add_transition ('(', 'ESC', None, 'G0SCS')
self.state.add_transition (')', 'ESC', None, 'G1SCS')
self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT')
self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT')
self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT')
self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT')
self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad.
self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND')
self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT')
self.state.add_transition ('[', 'ESC', None, 'ELB')
# ELB means Escape Left Bracket. That is ^[[
self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT')
self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT')
self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT')
self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT')
self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT')
self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT')
self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT')
self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT')
self.state.add_transition ('m', 'ELB', None, 'INIT')
self.state.add_transition ('?', 'ELB', None, 'MODECRAP')
self.state.add_transition_list (string.digits, 'ELB', DoStartNumber, 'NUMBER_1')
self.state.add_transition_list (string.digits, 'NUMBER_1', DoBuildNumber, 'NUMBER_1')
self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT')
self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT')
self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT')
self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT')
self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT')
self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT')
self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT')
### It gets worse... the 'm' code can have infinite number of
### number;number;number before it. I've never seen more than two,
### but the specs say it's allowed. crap!
self.state.add_transition ('m', 'NUMBER_1', None, 'INIT')
### LED control. Same implementation problem as 'm' code.
self.state.add_transition ('q', 'NUMBER_1', None, 'INIT')
# \E[?47h switch to alternate screen
# \E[?47l restores to normal screen from alternate screen.
self.state.add_transition_list (string.digits, 'MODECRAP', DoStartNumber, 'MODECRAP_NUM')
self.state.add_transition_list (string.digits, 'MODECRAP_NUM', DoBuildNumber, 'MODECRAP_NUM')
self.state.add_transition ('l', 'MODECRAP_NUM', None, 'INIT')
self.state.add_transition ('h', 'MODECRAP_NUM', None, 'INIT')
#RM Reset Mode Esc [ Ps l none
self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON')
self.state.add_transition_any ('SEMICOLON', DoLog, 'INIT')
self.state.add_transition_list (string.digits, 'SEMICOLON', DoStartNumber, 'NUMBER_2')
self.state.add_transition_list (string.digits, 'NUMBER_2', DoBuildNumber, 'NUMBER_2')
self.state.add_transition_any ('NUMBER_2', DoLog, 'INIT')
self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT')
self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT')
self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT')
### It gets worse... the 'm' code can have infinite number of
### number;number;number before it. I've never seen more than two,
### but the specs say it's allowed. crap!
self.state.add_transition ('m', 'NUMBER_2', None, 'INIT')
### LED control. Same problem as 'm' code.
self.state.add_transition ('q', 'NUMBER_2', None, 'INIT')
self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
# Create a state for 'q' and 'm' which allows an infinite number of ignored numbers
self.state.add_transition_any ('SEMICOLON_X', DoLog, 'INIT')
self.state.add_transition_list (string.digits, 'SEMICOLON_X', None, 'NUMBER_X')
self.state.add_transition_any ('NUMBER_X', DoLog, 'INIT')
self.state.add_transition ('m', 'NUMBER_X', None, 'INIT')
self.state.add_transition ('q', 'NUMBER_X', None, 'INIT')
self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
def process (self, c):
self.state.process(c)
def process_list (self, l):
self.write(l)
def write (self, s):
for c in s:
self.process(c)
def flush (self):
pass
def write_ch (self, ch):
"""This puts a character at the current cursor position. The cursor
position is moved forward with wrap-around, but no scrolling is done if
the cursor hits the lower-right corner of the screen. """
#\r and \n both produce a call to cr() and lf(), respectively.
ch = ch[0]
if ch == '\r':
self.cr()
return
if ch == '\n':
self.crlf()
return
if ch == chr(screen.BS):
self.cursor_back()
return
if ch not in string.printable:
fout = open ('log', 'a')
fout.write ('Nonprint: ' + str(ord(ch)) + '\n')
fout.close()
return
self.put_abs(self.cur_r, self.cur_c, ch)
old_r = self.cur_r
old_c = self.cur_c
self.cursor_forward()
if old_c == self.cur_c:
self.cursor_down()
if old_r != self.cur_r:
self.cursor_home (self.cur_r, 1)
else:
self.scroll_up ()
self.cursor_home (self.cur_r, 1)
self.erase_line()
# def test (self):
#
# import sys
# write_text = 'I\'ve got a ferret sticking up my nose.\n' + \
# '(He\'s got a ferret sticking up his nose.)\n' + \
# 'How it got there I can\'t tell\n' + \
# 'But now it\'s there it hurts like hell\n' + \
# 'And what is more it radically affects my sense of smell.\n' + \
# '(His sense of smell.)\n' + \
# 'I can see a bare-bottomed mandril.\n' + \
# '(Slyly eyeing his other nostril.)\n' + \
# 'If it jumps inside there too I really don\'t know what to do\n' + \
# 'I\'ll be the proud posessor of a kind of nasal zoo.\n' + \
# '(A nasal zoo.)\n' + \
# 'I\'ve got a ferret sticking up my nose.\n' + \
# '(And what is worst of all it constantly explodes.)\n' + \
# '"Ferrets don\'t explode," you say\n' + \
# 'But it happened nine times yesterday\n' + \
# 'And I should know for each time I was standing in the way.\n' + \
# 'I\'ve got a ferret sticking up my nose.\n' + \
# '(He\'s got a ferret sticking up his nose.)\n' + \
# 'How it got there I can\'t tell\n' + \
# 'But now it\'s there it hurts like hell\n' + \
# 'And what is more it radically affects my sense of smell.\n' + \
# '(His sense of smell.)'
# self.fill('.')
# self.cursor_home()
# for c in write_text:
# self.write_ch (c)
# print str(self)
#
#if __name__ == '__main__':
# t = ANSI(6,65)
# t.test()
|
gwu-libraries/sfm-flickr-harvester | refs/heads/master | flickr_exporter.py | 1 | from sfmutils.exporter import BaseExporter, BaseTable
from flickr_warc_iter import FlickrWarcIter, TYPE_FLICKR_PHOTO
import logging
import time
from dateutil.parser import parse as date_parse
from dateutil.tz import tzutc
log = logging.getLogger(__name__)
QUEUE = "flickr_exporter"
ROUTING_KEY = "export.start.flickr.flickr_user"
class FlickrPhotoTable(BaseTable):
"""
PETL Table for Flickr photos.
"""
def __init__(self, warc_paths, dedupe, item_date_start, item_date_end, seed_uids, segment_row_size=None):
BaseTable.__init__(self, warc_paths, dedupe, item_date_start, item_date_end, seed_uids, FlickrWarcIter,
segment_row_size=segment_row_size, limit_item_types=[TYPE_FLICKR_PHOTO])
def _header_row(self):
return ("photo_id", "date_posted", "date_taken", "license", "safety_level", "original_format", "owner_nsid",
"owner_username", "title", "description", "media", "photopage")
def _row(self, item):
photopage_url = None
for url in item["urls"]["url"]:
if url["type"] == "photopage":
photopage_url = url["_content"]
return (item["id"],
# date posted is gmt epoch time, convert it to the same format as date taken
# detail as https://www.flickr.com/services/api/misc.dates.html
date_parse(time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(float(item["dates"]["posted"])))).replace(tzinfo=tzutc()),
date_parse(item["dates"]["taken"]), item["license"], item["safety_level"],
item.get("originalformat"), item["owner"]["nsid"], item["owner"]["username"],
item["title"]["_content"].replace('\n', ' '),
item["description"]["_content"].replace('\n', ' '), item["media"], photopage_url)
def id_field(self):
return "photo_id"
class FlickrExporter(BaseExporter):
def __init__(self, api_base_url, working_path, mq_config=None, warc_base_path=None):
BaseExporter.__init__(self, api_base_url, FlickrWarcIter, FlickrPhotoTable, working_path,
mq_config=mq_config, warc_base_path=warc_base_path, limit_item_types=[TYPE_FLICKR_PHOTO])
if __name__ == "__main__":
FlickrExporter.main(FlickrExporter, QUEUE, [ROUTING_KEY])
|
tonnosf/mondkalender | refs/heads/master | node_modules/protractor/node_modules/accessibility-developer-tools/scripts/parse_aria_schemas.py | 381 | import json
import re
import urllib
import xml.etree.ElementTree as ET
def parse_attributes():
schema = urllib.urlopen('http://www.w3.org/MarkUp/SCHEMA/aria-attributes-1.xsd')
tree = ET.parse(schema)
for node in tree.iter():
node.tag = re.sub(r'{.*}', r'', node.tag)
type_map = {
'states': 'state',
'props': 'property'
}
properties = {}
groups = tree.getroot().findall('attributeGroup')
print groups
for group in groups:
print(group.get('name'))
name_match = re.match(r'ARIA\.(\w+)\.attrib', group.get('name'))
if not name_match:
continue
group_type = name_match.group(1)
print group_type
if group_type not in type_map:
continue
type = type_map[group_type]
for child in group:
name = re.sub(r'aria-', r'', child.attrib['name'])
property = {}
property['type'] = type
if 'type' in child.attrib:
valueType = re.sub(r'xs:', r'', child.attrib['type'])
if valueType == 'IDREF':
property['valueType'] = 'idref'
elif valueType == 'IDREFS':
property['valueType'] = 'idref_list'
else:
property['valueType'] = valueType
else:
type_spec = child.findall('simpleType')[0]
restriction_spec = type_spec.findall('restriction')[0]
base = restriction_spec.attrib['base']
if base == 'xs:NMTOKENS':
property['valueType'] = 'token_list'
elif base == 'xs:NMTOKEN':
property['valueType'] = 'token'
else:
raise Exception('Unknown value type: %s' % base)
values = []
for value_type in restriction_spec:
values.append(value_type.get('value'))
property['values'] = values
if 'default' in child.attrib:
property['defaultValue'] = child.attrib['default']
properties[name] = property
return json.dumps(properties, sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == "__main__":
attributes_json = parse_attributes()
constants_file = open('src/js/Constants.js', 'r')
new_constants_file = open('src/js/Constants.new.js', 'w')
in_autogen_block = False
for line in constants_file:
if not in_autogen_block:
new_constants_file.write('%s' % line)
if re.match(r'// BEGIN ARIA_PROPERTIES_AUTOGENERATED', line):
in_autogen_block = True
if re.match(r'// END ARIA_PROPERTIES_AUTOGENERATED', line):
break
new_constants_file.write('/** @type {Object.<string, Object>} */\n')
new_constants_file.write('axs.constants.ARIA_PROPERTIES = %s;\n' % attributes_json)
new_constants_file.write('// END ARIA_PROPERTIES_AUTOGENERATED\n')
for line in constants_file:
new_constants_file.write('%s' % line)
|
chokribr/invenio | refs/heads/master | invenio/legacy/bibrank/selfcites_task.py | 4 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""
Self citations task
Stores self-citations in a table for quick access
Examples:
(run a daemon job)
bibrank -w selfcites
(run on a set of records)
selfcites -i 1-20
(run on a collection)
selfcites -c "Reports"
This task handles the self-citations computation
It is run on modified records so that it can update the tables used for
displaying info in the citesummary format
"""
import sys
import ConfigParser
import time
from datetime import datetime
from invenio.ext.cache import cache
from invenio.legacy.dbquery import serialize_via_marshal
from intbitset import intbitset
from invenio.config import CFG_ETCDIR
from invenio.legacy.bibsched.bibtask import \
task_get_option, write_message, \
task_sleep_now_if_required, \
task_update_progress
from invenio.legacy.dbquery import run_sql
from invenio.legacy.bibrank.selfcites_indexer import update_self_cites_tables, \
compute_friends_self_citations, \
compute_simple_self_citations, \
get_authors_tags
from invenio.legacy.bibrank.citation_searcher import get_refers_to
from invenio.legacy.bibrank.citation_indexer import get_bibrankmethod_lastupdate
from invenio.legacy.bibrank.tag_based_indexer import intoDB, fromDB
from invenio.modules.ranker.registry import configuration
def compute_and_store_self_citations(recid, tags, citations_fun, selfcites_dic,
verbose=False):
"""Compute and store self-cites in a table
Args:
- recid
- tags: used when bibauthorid is deactivated see get_author_tags()
in bibrank_selfcites_indexer
"""
assert recid
if verbose:
write_message("* processing %s" % recid)
references = get_refers_to(recid)
recids_to_check = set([recid]) | set(references)
placeholders = ','.join('%s' for r in recids_to_check)
rec_row = run_sql("SELECT MAX(`modification_date`) FROM `bibrec`"
" WHERE `id` IN (%s)" % placeholders, recids_to_check)
try:
rec_timestamp = rec_row[0]
except IndexError:
write_message("record not found")
return
cached_citations_row = run_sql("SELECT `count` FROM `rnkSELFCITES`"
" WHERE `last_updated` >= %s"
" AND `id_bibrec` = %s", (rec_timestamp[0], recid))
if cached_citations_row and cached_citations_row[0][0]:
if verbose:
write_message("%s found (cached)" % cached_citations_row[0])
else:
cites = citations_fun(recid, tags)
selfcites_dic[recid] = len(cites)
replace_cites(recid, cites)
sql = """REPLACE INTO rnkSELFCITES (`id_bibrec`, `count`, `references`,
`last_updated`) VALUES (%s, %s, %s, NOW())"""
references_string = ','.join(str(r) for r in references)
run_sql(sql, (recid, len(cites), references_string))
if verbose:
write_message("%s found" % len(cites))
def replace_cites(recid, new_cites):
"""Update database with new citations set
Given a set of self citations:
* stores the new ones in the database
* removes the old ones from the database
"""
old_cites = set(row[0] for row in run_sql("""SELECT citer
FROM rnkSELFCITEDICT
WHERE citee = %s""", [recid]))
cites_to_add = new_cites - old_cites
cites_to_delete = old_cites - new_cites
for cit in cites_to_add:
write_message('adding cite %s %s' % (recid, cit), verbose=1)
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
run_sql("""INSERT INTO rnkSELFCITEDICT (citee, citer, last_updated)
VALUES (%s, %s, %s)""", (recid, cit, now))
for cit in cites_to_delete:
write_message('deleting cite %s %s' % (recid, cit), verbose=1)
run_sql("""DELETE FROM rnkSELFCITEDICT
WHERE citee = %s and citer = %s""", (recid, cit))
def rebuild_tables(rank_method_code, config):
"""Rebuild the tables from scratch
Called by bibrank -w selfcites -R
"""
task_update_progress('emptying tables')
empty_self_cites_tables()
task_update_progress('filling tables')
fill_self_cites_tables(rank_method_code, config)
return True
def fetch_index_update():
"""Fetch last runtime of given task"""
end_date = get_bibrankmethod_lastupdate('citation')
return end_date
def fetch_records(start_date, end_date):
"""Filter records not indexed out of recids
"""
sql = """SELECT `id` FROM `bibrec`
WHERE `modification_date` <= %s
AND `modification_date` > %s"""
records = run_sql(sql, (end_date, start_date))
return intbitset(records)
def fetch_concerned_records(name, ids_param):
"""Fetch records that have been updated since the last run of the daemon"""
if ids_param:
recids = intbitset()
for first, last in ids_param:
recids += range(first, last+1)
end_date = None
else:
start_date = get_bibrankmethod_lastupdate(name)
end_date = fetch_index_update()
recids = fetch_records(start_date, end_date)
return recids, end_date
def store_last_updated(name, date):
"""Updates method last run date"""
run_sql("UPDATE rnkMETHOD SET last_updated=%s WHERE name=%s", (date, name))
def read_configuration(rank_method_code):
"""Load the config file from disk and parse it."""
filename = configuration.get(rank_method_code + '.cfg', '')
config = ConfigParser.ConfigParser()
try:
config.readfp(open(filename))
except StandardError:
write_message("Cannot find configuration file: %s" % filename, sys.stderr)
raise
return config
def process_updates(rank_method_code):
"""
This is what gets executed first when the task is started.
It handles the --rebuild option. If that option is not specified
we fall back to the process_one()
"""
write_message("Running rank method: %s" % rank_method_code, verbose=0)
selfcites_config = read_configuration(rank_method_code)
config = {
'algorithm': selfcites_config.get(rank_method_code, "algorithm"),
'friends_threshold': selfcites_config.get(rank_method_code, "friends_threshold")
}
quick = task_get_option("quick") != "no"
if not quick:
return rebuild_tables(rank_method_code, config)
tags = get_authors_tags()
recids, end_date = fetch_concerned_records(rank_method_code,
task_get_option("id"))
citations_fun = get_citations_fun(config['algorithm'])
weights = fromDB(rank_method_code)
write_message("recids %s" % str(recids))
total = len(recids)
for count, recid in enumerate(recids):
task_sleep_now_if_required(can_stop_too=True)
msg = "Extracting for %s (%d/%d)" % (recid, count + 1, total)
task_update_progress(msg)
write_message(msg)
process_one(recid, tags, citations_fun, weights)
intoDB(weights, end_date, rank_method_code)
store_weights_cache(weights)
write_message("Complete")
return True
def get_citations_fun(algorithm):
"""Returns the computation function given the algorithm name"""
if algorithm == 'friends':
citations_fun = compute_friends_self_citations
else:
citations_fun = compute_simple_self_citations
return citations_fun
def process_one(recid, tags, citations_fun, selfcites_dic):
"""Self-cites core func, executed on each recid"""
# First update this record then all its references
compute_and_store_self_citations(recid, tags, citations_fun, selfcites_dic)
references = get_refers_to(recid)
for recordid in references:
compute_and_store_self_citations(recordid,
tags,
citations_fun,
selfcites_dic)
def empty_self_cites_tables():
"""
This will empty all the self-cites tables
The purpose is to rebuild the tables from scratch in case there is problem
with them: inconsitencies, corruption,...
"""
run_sql('TRUNCATE rnkSELFCITES')
run_sql('TRUNCATE rnkEXTENDEDAUTHORS')
run_sql('TRUNCATE rnkRECORDSCACHE')
def fill_self_cites_tables(rank_method_code, config):
"""
This will fill the self-cites tables with data
The purpose of this function is to fill these tables on a website that
never ran the self-cites daemon
This is an optimization when running on empty tables, and we hope the
result is the same as the compute_and_store_self_citations.
"""
begin_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
algorithm = config['algorithm']
tags = get_authors_tags()
selfcites_dic = {}
all_ids = intbitset(run_sql('SELECT id FROM bibrec ORDER BY id'))
citations_fun = get_citations_fun(algorithm)
write_message('using %s' % citations_fun.__name__)
if algorithm == 'friends':
# We only needs this table for the friends algorithm or assimilated
# Fill intermediary tables
for index, recid in enumerate(all_ids):
if index % 1000 == 0:
msg = 'intermediate %d/%d' % (index, len(all_ids))
task_update_progress(msg)
write_message(msg)
task_sleep_now_if_required()
update_self_cites_tables(recid, config, tags)
# Fill self-cites table
for index, recid in enumerate(all_ids):
if index % 1000 == 0:
msg = 'final %d/%d' % (index, len(all_ids))
task_update_progress(msg)
write_message(msg)
task_sleep_now_if_required()
compute_and_store_self_citations(recid,
tags,
citations_fun,
selfcites_dic)
intoDB(selfcites_dic, begin_date, rank_method_code)
store_weights_cache(selfcites_dic)
def store_weights_cache(weights):
"""Store into key/value store"""
cache.set('selfcites_weights', serialize_via_marshal(weights))
|
nabilbendafi/script.module.pydevd | refs/heads/master | lib/pydevd_psyco_stub.py | 64 | '''
Psyco stub: should implement all the external API from psyco.
'''
def proxy(func, *args, **kwargs):
return func
def bind(func, *args, **kwargs):
return func
def unbind(func, *args, **kwargs):
return func
def unproxy(func, *args, **kwargs):
return func
def full(*args, **kwargs):
pass
def log(*args, **kwargs):
pass
def runonly(*args, **kwargs):
pass
def background(*args, **kwargs):
pass
def cannotcompile(*args, **kwargs):
pass
def profile(*args, **kwargs):
pass
def stop(*args, **kwargs):
pass
|
Starefossen/docker-tutorial | refs/heads/master | manage.py | 43 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
Grogdor/CouchPotatoServer | refs/heads/master | libs/chardet/sbcharsetprober.py | 2926 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
|
javierder/dogestart.me | refs/heads/master | django/contrib/sitemaps/tests/test_http.py | 109 | from __future__ import unicode_literals
import os
from datetime import date
from django.conf import settings
from django.contrib.sitemaps import Sitemap, GenericSitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from django.utils.unittest import skipUnless
from django.utils.formats import localize
from django.utils._os import upath
from django.utils.translation import activate, deactivate
from .base import TestModel, SitemapTestsBase
class HTTPSitemapTests(SitemapTestsBase):
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
)
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception
Site._meta.installed = False
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@skipUnless("django.contrib.sites" in settings.INSTALLED_APPS,
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
Site._meta.installed = False
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
test_sitemap = GenericSitemap({'queryset': TestModel.objects.all()})
def is_testmodel(url):
return isinstance(url['item'], TestModel)
item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
Check that a cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_x_robots_sitemap(self):
response = self.client.get('/simple/index.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
|
Peetz0r/micropython-esp32 | refs/heads/dev-bluetooth-plus-deepsleep | tests/basics/dict_clear.py | 118 | d = {1: 2, 3: 4}
print(len(d))
d.clear()
print(d)
d[2] = 42
print(d)
|
JeromeO/Pyromaths | refs/heads/master | src/pyromaths/ex/sixiemes/droites.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Pyromaths
# Un programme en Python qui permet de créer des fiches d'exercices types de
# mathématiques niveau collège ainsi que leur corrigé en LaTeX.
# Copyright (C) 2006 -- Jérôme Ortais (jerome.ortais@pyromaths.org)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import random
import math
def nodesep(ligne):
"""
Défini les valeurs nodesep : 0 pour une extrémité, -0.5 pour une continuité
@param ligne: droite, demi-droite, segment
@type ligne: string
"""
if ligne == 'une droite':
retour = ['-6', '-6']
elif ligne == 'une demi-droite':
retour = ['0', '-6']
else:
retour = ['0', '0']
return retour
def choix_points(n):
"""
choisit n points parmi A, B, C, ..., Z
@param n: nombre de points à choisir
@type n: integer
"""
points = [chr(i + 65) for i in range(26)]
liste = []
for i in range(n):
liste.append(points.pop(random.randrange(len(points))))
return liste
def choix_ligne(n):
"""
Retourne n propositions parmi droite, segment et demi-droite
@param n: nombre de propositions
@type n: interger
"""
lignes = ['une droite', 'une demi-droite', 'un segment']
(liste_lignes, retour) = ([], [])
for dummy in range((n - 1) // len(lignes) + 1):
liste_lignes.extend(lignes)
for dummy in range(n):
retour.append(liste_lignes.pop(random.randrange(len(liste_lignes))))
return retour
def symboles(ligne):
"""
Retourne les couples (), [] ou [) correspondant au type de ligne
@param ligne: droite, demi-droite ou segment
@type ligne: string
"""
if ligne == 'une droite':
retour = ['(', ')']
elif ligne == 'une demi-droite':
retour = ['[', ')']
else:
retour = ['[', ']']
return retour
def prepare_tuple(lpoints, ligne):
"""
Prepare deux tuples pour permettre l'affichage de la question et
de la solution
@param lpoints: les points de la figure
@type lpoints: liste de lettres
@param ligne: droite, demi-droite ou segment
@type ligne: string
"""
(retour_exo, retour_sol) = ([], [])
# choix des deux points permettant de tracer la ligne :
templist = [i for i in range(len(lpoints))]
deuxpoints = []
for i in range(2):
deuxpoints.append(lpoints[templist.pop(random.randrange(len(templist)))])
# choix des symbole correspondant à la ligne :
lsymboles = symboles(ligne)
retour_sol.append(lsymboles[0])
retour_sol.extend(deuxpoints)
retour_sol.append(lsymboles[1])
retour_sol.append(ligne)
# choix des trous pour l'exercice :
alea = random.randrange(3)
if alea > 1:
retour_exo = ['\\ldots', '\\ldots', '\\ldots', '\\ldots',
'\\dotfill']
elif alea > 0:
retour_exo = ['\\ldots']
retour_exo.extend(retour_sol[1:3])
retour_exo.extend(['\\ldots', retour_sol[4]])
else:
retour_exo = retour_sol[:4]
retour_exo.append('\\dotfill')
return (tuple(retour_exo), tuple(retour_sol))
def tex_figure(liste, lpoints, points_coord, nodesep=0):
"""
Écrit dans un fichier tex la construction de 3 points et éventuellement
une droite, une demi-droite ou un segment.
@param liste: liste d'exos ou corrigés
@type liste: liste
@param lpoints: liste de 3 points
@type lpoints: liste de 3 strings
@param nodesep: liste des dépassements pour pstricks
@type nodesep: liste de 2 strings
"""
liste.append('\\begin{pspicture*}(-0.5,0.2)(4.5,2.2)')
liste.append('\\psset{PointSymbol=x}')
liste.append('\\pstGeonode[PosAngle=90](0.5,%s){%s}(2,%s){%s}(3.5,%s){%s}' %
points_coord)
if nodesep:
liste.append('\\pstLineAB[nodesepA=%s, nodesepB=%s]{%s}{%s}' %
tuple(nodesep))
liste.append('\\end{pspicture*}\\tabularnewline')
def coord_points(lpoints):
"""Définit les ordonnées de trois points nommés dont les noms sont dans lpoints"""
ordonnees = [random.randrange(5, 16) / 10.for i in range(3)]
while abs(2 * ordonnees[1] - ordonnees[0] - ordonnees[2]) < .5:
ordonnees = [random.randrange(5, 16) / 10.for i in range(3)]
random.shuffle(ordonnees)
for i in range(3):
ordonnees.insert(2 * i + 1, lpoints[i])
return tuple(ordonnees)
def tex_ligne_tableau(exo, cor, ligne):
"""
Écrit une ligne de tableau dans un fichier tex
@param exo: fichier d'exercices
@type exo: file
@param cor: fichier de corrections
@type cor: file
@param ligne: droite, demi-droite ou segment
@type ligne: string
"""
lpoints = choix_points(3)
(exer, solution) = prepare_tuple(lpoints, ligne)
exo.append('$%s %s%s %s$ est %s &' %
exer)
cor.append('$%s %s%s %s$ est %s &' %
solution)
lnodesep = nodesep(ligne)
lnodesep.extend(solution[1:3])
points_coord = coord_points(lpoints)
if exer != ('\\ldots', '\\ldots', '\\ldots', '\\ldots', '\\dotfill'):
tex_figure(exo, lpoints, points_coord)
else:
tex_figure(exo, lpoints, points_coord, lnodesep)
tex_figure(cor, lpoints, points_coord, lnodesep)
exo.append('\\hline')
cor.append('\\hline')
def Droites():
"""
Écrit les 5 lignes du tableau
@param exo: fichier d'exercices
@type exo: file
@param cor: fichier de corrections
@type cor: file
"""
exo = ["\\exercice", u"Compléter les pointillés et les figures :\\par",
'\\renewcommand{\\tabularxcolumn}[1]{m{#1}}',
'\\begin{tabularx}{\\linewidth}{|X|>{\\centering}m{5cm}|}',
'\\hline',
u'\\textbf{phrase} & \\textbf{Figure} \\tabularnewline \\hline']
cor = ["\\exercice*", u"Compléter les pointillés et les figures :\\par",
'\\renewcommand{\\tabularxcolumn}[1]{m{#1}}',
'\\begin{tabularx}{\\linewidth}{|X|>{\\centering}m{5cm}|}',
'\\hline',
u'\\textbf{Phrase} & \\textbf{Figure} \\tabularnewline \\hline']
line = choix_ligne(5)
for i in range(5):
tex_ligne_tableau(exo, cor, line[i])
exo.append('\\end{tabularx}')
cor.append('\\end{tabularx}')
return (exo, cor)
Droites.description = u'Droites, demi-droites, segments'
#------------------------------------------------------------------------------
# Parallèles et perpendiculaires
#------------------------------------------------------------------------------
# perp à (ac) passant par b => val2=('a', 'c', 'a', 'c', 'b', 'b', 'b', 'a'
# para à (ab) passant apr d => val2=('a', 'b', 'a', 'b', 'd', 'd')
def fig_perp(points, coor, solution=0, per=[], par=[]):
val_enonce = (
points[0], points[1], coor[0], coor[1], coor[2], coor[3],
points[2], points[3], coor[4], coor[5], coor[6], coor[7],)
pts = ('a', 'b', 'c', 'd')
text = \
""" \\begin{pspicture*}(-4,-4)(4,4)
\psset{PointSymbol=x}
\pstGeonode[PointName={%s,%s}](%s;%s){a}(%s;%s){b}
\pstGeonode[PointName={%s,%s}](%s; %s){c}(%s; %s){d}""" % \
val_enonce
if solution:
val_soluce = (
pts[per[0]], pts[per[1]], pts[per[0]], pts[per[1]],
pts[per[2]], pts[per[2]], pts[per[2]], pts[per[0]],
pts[par[0]], pts[par[1]], pts[par[0]], pts[par[1]],
pts[par[2]], pts[par[2]])
text = text + \
""" \pstLineAB[nodesep=-4, linecolor=DarkBlue]{%s}{%s}
\pstProjection[PointName=none]{%s}{%s}{%s}[e]\pstLineAB[nodesep=-7, linecolor=DarkBlue]{%s}{e}
\pstRightAngle[, linecolor=DarkBlue]{%s}{e}{%s}
\pstLineAB[nodesep=-4, linecolor=DarkRed]{%s}{%s}
\pstTranslation[PointName=none,PointSymbol=none]{%s}{%s}{%s}[f]
\pstLineAB[nodesep=-7, linecolor=DarkRed]{%s}{f}
\end{pspicture*}""" % \
val_soluce
return text
def noms_sommets(nb): # renvoie nb noms de sommets
(listenb, listepts) = ([], [])
for i in range(26):
listenb.append(i + 65)
for i in range(nb):
listepts.append(chr(listenb.pop(random.randrange(26 - i))))
listepts.sort()
return tuple(listepts)
def cree_coordonnees(longueur=3):
from math import floor
alpha = random.randrange(180)
k0 = random.randrange(50, 100) / 100.0
a0 = alpha + random.randrange(30, 120)
k1 = random.randrange(50, 100) / 100.0
a1 = alpha + random.randrange(210, 300)
return (longueur,alpha, longueur, alpha + 180, floor((k0* 10) * longueur) /
10.0, a0, floor((k1 * 10) * longueur) / 10.0, a1)
def enonce_perp(exo, cor):
coor = cree_coordonnees(3)
noms = noms_sommets(4)
par, per = [], []
lval = [0, 1, 2, 3]
for dummy in range(3):
par.append(lval.pop(random.randrange(len(lval))))
while per == [] or (par[0], par[1]) == (per[0], per[1]) or \
(par[0], par[1]) == (per[1], per[0]) :
lval = [0, 1, 2, 3]
per = []
for dummy in range(3):
per.append(lval.pop(random.randrange(len(lval))))
exo.append(fig_perp(noms, coor))
cor.append(fig_perp(noms, coor, 1, per, par))
exo.append('\end{pspicture*}\\par\n\\begin{enumerate}')
cor.append('\\par\n\\begin{enumerate}')
s_per = u"\\item Tracer la droite perpendiculaire à la droite $(%s%s)$ passant par $%s$"
s_par = u"\\item Tracer la droite parallèle à la droite $(%s%s)$ passant par $%s$"
s_per = s_per % (noms[per[0]], noms[per[1]], noms[per[2]])
s_par = s_par % (noms[par[0]], noms[par[1]], noms[par[2]])
if random.randrange(2):
exo.append(s_par)
cor.append(s_par)
exo.append(s_per)
cor.append(s_per)
else:
exo.append(s_per)
cor.append(s_per)
exo.append(s_par)
cor.append(s_par)
exo.append('\\end{enumerate}')
cor.append('\\end{enumerate}')
def Perpendiculaires():
exo = ["\\exercice", u"Réaliser les figures suivantes :\\par", '\\begin{multicols}{2}']
cor = ["\\exercice*", u"Réaliser les figures suivantes :\\par", '\\begin{multicols}{2}']
enonce_perp(exo, cor)
exo.append('\\columnbreak')
cor.append('\\columnbreak')
enonce_perp(exo, cor)
exo.append('\\end{multicols}')
cor.append('\\end{multicols}')
return (exo, cor)
Perpendiculaires.description = u'Droites perpendiculaires et parallèles'
#------------------------------------------------------------------------------
# Propriétés
#------------------------------------------------------------------------------
def fonction(angle, xa, ya, dist=0, droite='par'):
"""
Retourne une fonction à utiliser avec psplot
@param angle: compris entre 1 et 89° ou 91 et 179°. Angle entre la droite et l'axe des abscisses
@type angle:
@param xa: abscisse d'un point de la droite
@type xa:
@param ya: ordonnée d'un point de la droite
@type ya:
@param dist: distance entre l'origine et la droite
@type dist:
@param droite: 'par' pour une parallèle et 'per' pour une perpendiculaire
"""
angle_rad = (angle * math.pi) / 180
if droite == 'par':
coef = math.floor(math.tan(angle_rad) * 1000) / 1000.0
ord_or = math.floor(((ya - xa * math.tan(angle_rad)) - dist /
math.cos(angle_rad)) * 1000) / 1000.0
return '{x %s mul %s add}' % (coef, ord_or)
else:
coef = math.floor(-1000 / math.tan(angle_rad)) / 1000.0
return '{x %s mul}' % coef
def PointInter(angle, xa, ya, dist=0):
angle_rad = (angle * math.pi) / 180
coef1 = math.floor(math.tan(angle_rad) * 1000) / 1000.0
ord_or1 = math.floor(((ya - xa * math.tan(angle_rad)) - dist / math.cos(angle_rad)) *
1000) / 1000.0
coef2 = math.floor(-1000 / math.tan(angle_rad)) / 1000.0
x = ord_or1 / (coef2 - coef1)
y = x * coef2
return ',PosAngle=%s](%s,%s)' % (45 + angle, math.floor(x * 1000) /
1000.0, math.floor(y * 1000) / 1000.0)
def Points(angle, xa, ya, dist=0):
angle_rad = (angle * math.pi) / 180
coef = math.floor(math.tan(angle_rad) * 1000) / 1000.0
ord_or = math.floor(((ya - xa * math.tan(angle_rad)) - dist / math.cos(angle_rad)) *
1000) / 1000.0
lpos = []
if -1.5 < -2 * coef + ord_or < 1.5:
x = -1.5
y = math.floor((x * coef + ord_or) * 1000) / 1000.0
lpos.append('(%s,%s)' % (x, y))
if -1.5 < 2 * coef + ord_or < 1.5:
x = 1.5
y = math.floor((x * coef + ord_or) * 1000) / 1000.0
lpos.append('(%s,%s)' % (x, y))
if -2.1 < (1.5 - ya + dist / math.cos(angle_rad) + xa * math.tan(angle_rad)) / \
math.tan(angle_rad) < 2.1:
y = 1.1
x = math.floor(((y - ya + dist / math.cos(angle_rad) + xa * math.tan(angle_rad)) /
math.tan(angle_rad)) * 1000) / 1000.0
lpos.append('(%s,%s)' % (x, y))
if -2.1 < (-1.5 - ya + dist / math.cos(angle_rad) + xa * math.tan(angle_rad)) / \
math.tan(angle_rad) < 2.1:
y = -1.1
x = math.floor(((y - ya + dist / math.cos(angle_rad) + xa * math.tan(angle_rad)) /
math.tan(angle_rad)) * 1000) / 1000.0
lpos.append('(%s,%s)' % (x, y))
return lpos
def figure(angle, xa, ya, dist, lpoints, noms, par_per, dist2=0):
"""
@param angle:
@param xa:
@param ya:
@param dist:
@param lpoints:
@param noms: 1: nomme la droite (AB)
2: nomme la droite (d1)
@param par_per: 1: parallèles + perpendiculaires
2: 3 parallèles
3: 2 perpendiculaires
"""
ltxt = []
ltxt.append('\\begin{pspicture*}[shift=-1.5](-2.1,-1.6)(2.1,1.6)')
ltxt.append('\\footnotesize')
if par_per < 3:
ltxt.append('\\psplot[linewidth=1.5\\pslinewidth]{-2.1}{2.1}%s' %
fonction(angle, xa, ya))
ltxt.append('\\psplot[linewidth=1.5\\pslinewidth]{-2.1}{2.1}%s' %
fonction(angle, xa, ya, dist))
else:
ltxt.append('\\psplot{-2.1}{2.1}%s' % fonction(angle, xa, ya))
ltxt.append('\\psplot{-2.1}{2.1}%s' % fonction(angle, xa, ya,
dist))
if par_per == 2:
ltxt.append('\\psplot[linewidth=1.5\\pslinewidth]{-2.1}{2.1}%s' %
fonction(angle, xa, ya, dist2))
else:
ltxt.append('\\psplot{-2.1}{2.1}%s' % fonction(angle, xa, ya,
droite='per'))
if noms: # nomme les droites par deux points
if par_per != 2: # 2 points d'intersection
ltxt.append('\\pstGeonode[PointSymbol={none,x},PointName={%s,%s} %s{i1}%s{a1}' %
(lpoints[0], lpoints[1], PointInter(angle, xa,
ya), Points(angle, xa, ya)[0]))
ltxt.append('\\pstGeonode[PointSymbol={none,x},PointName={%s,%s} %s{i2}%s{b1}' %
(lpoints[2], lpoints[3], PointInter(angle, xa,
ya, dist), Points(angle, xa, ya, dist)[0]))
ltxt.append('\\pstGeonode[PointSymbol=none,PointName=none]%s{c1}%s{c2}' %
(Points(angle + 90, 0, 0)[0], Points(angle + 90,
0, 0)[1]))
else:
# pas de point d'intersection
pts = Points(angle, xa, ya)
ltxt.append('\\pstGeonode[PointSymbol=x,PosAngle=%s,PointName={%s,%s}]%s{a1}%s{a2}' %
(angle + 45, lpoints[0], lpoints[1], pts[0], pts[1]))
pts = Points(angle, xa, ya, dist)
ltxt.append('\\pstGeonode[PointSymbol=x,PosAngle=%s,PointName={%s,%s}]%s{b1}%s{b2}' %
(angle - (45.0 * dist) / abs(dist), lpoints[2],
lpoints[3], pts[0], pts[1]))
pts = Points(angle, xa, ya, dist2)
ltxt.append('\\pstGeonode[PointSymbol=x,PosAngle=%s,PointName={%s,%s}]%s{c1}%s{c2}' %
(angle - (45.0 * dist2) / abs(dist2), lpoints[4],
lpoints[5], pts[0], pts[1]))
else:
# nomme les droites (d_1), ...
if par_per != 2: # 2 points d'intersection
ltxt.append('\\pstGeonode[PointSymbol=none,PointName={none,%s} %s{i1}%s{a1}' %
(lpoints[0], PointInter(angle, xa, ya), Points(angle,
xa, ya)[0]))
ltxt.append('\\pstGeonode[PointSymbol=none,PointName={none,%s} %s{i2}%s{b1}' %
(lpoints[1], PointInter(angle, xa, ya, dist),
Points(angle, xa, ya, dist)[0]))
ltxt.append('\\pstGeonode[PointSymbol=none,PointName={none,%s}]%s{c1}%s{c2}' %
(lpoints[2], Points(angle + 90, 0, 0)[0], Points(angle +
90, 0, 0)[1]))
else:
pts = Points(angle, xa, ya)
ltxt.append('\\pstGeonode[PointSymbol=none,PosAngle=%s,PointName={%s,none}]%s{a1}%s{a2}' %
(angle + 45, lpoints[0], pts[0], pts[1]))
pts = Points(angle, xa, ya, dist)
ltxt.append('\\pstGeonode[PointSymbol=none,PosAngle=%s,PointName={%s,none}]%s{b1}%s{b2}' %
(angle - (45.0 * dist) / abs(dist), lpoints[1],
pts[0], pts[1]))
# FIXME list index out of range
pts = Points(angle, xa, ya, dist2)
ltxt.append('\\pstGeonode[PointSymbol=none,PosAngle=%s,PointName={%s,none}]%s{c1}%s{c2}' %
(angle - (45.0 * dist2) / abs(dist2), lpoints[2],
pts[0], pts[1]))
if par_per != 2:
if angle < 90:
ltxt.append('\\pstRightAngle[RightAngleSize=.2]{c1}{i1}{a1}')
else:
ltxt.append('\\pstRightAngle[RightAngleSize=.2]{c2}{i1}{a1}')
if par_per == 3:
if angle < 90:
ltxt.append('\\pstRightAngle[RightAngleSize=.2]{c1}{i2}{b1}')
else:
ltxt.append('\\pstRightAngle[RightAngleSize=.2]{c2}{i2}{b1}')
ltxt.append('\\end{pspicture*}')
return ltxt
def valeurs_figures(par_per):
noms = random.randrange(2)
if noms:
lpoints = noms_sommets(6)
else:
lindices = [1, 2, 3]
lpoints = []
for dummy in range(3):
lpoints.append('(d_%s)' % lindices.pop(random.randrange(len(lindices))))
angle = random.randrange(1, 90) + 90 * random.randrange(2)
xa = random.randrange(-5, 5) / 10.0
ya = random.randrange(-3, 3) / 10.0
if random.randrange(2):
dist = random.randrange(4, 9) / 10.0
else:
dist = -random.randrange(4, 9) / 10.0
if par_per == 2:
if dist > 0:
dist2 = -random.randrange(4, 9) / 10.0
else:
dist2 = random.randrange(4, 9) / 10.0
return (angle, xa, ya, dist, lpoints, noms, dist2)
else:
return (angle, xa, ya, dist, lpoints, noms)
def enonce_prop(exo, cor):
exo.append('\\renewcommand{\\tabularxcolumn}[1]{m{#1}}')
exo.append('\\begin{tabularx}{\\textwidth}[t]{|m{3cm}|m{4cm}|X|m{3cm}|}')
exo.append('\\hline')
exo.append(u'\\multicolumn{1}{|c|}{\\bf Données} & \\multicolumn{1}{|c|}{\\bf Figure codée}')
exo.append(u'& \\multicolumn{1}{|c|}{\\bf Propriété} & \\multicolumn{1}{|c|}{\\bf Conclusion}\\\\')
cor.append('\\renewcommand{\\tabularxcolumn}[1]{m{#1}}')
cor.append('\\begin{tabularx}{\\textwidth}[t]{|m{3cm}|m{4cm}|X|m{3cm}|}')
cor.append('\\hline')
cor.append(u'\\multicolumn{1}{|c|}{\\bf Données} & \\multicolumn{1}{|c|}{\\bf Figure codée}')
cor.append(u'& \\multicolumn{1}{|c|}{\\bf Propriété} & \\multicolumn{1}{|c|}{\\bf Conclusion}\\\\')
ltypes = [1, 2, 3]
lexos = []
for i in range(3):
lexos.append(ltypes.pop(random.randrange(len(ltypes))))
for i in range(3):
exo.append('\\hline')
cor.append('\\hline')
v = valeurs_figures(lexos[i])
if lexos[i] == 2:
if v[5]: # noms de la forme (AB), on ajoute des parenthèses
exo.append('''$(%s%s)//(%s%s)$\\par et\\par $(%s%s)//(%s%s)$ &
\\begin{pspicture*}[shift=-1.5](-2.1,-1.6)(2.1,1.6)
\\end{pspicture*}
& & \\\\''' %
(v[4][0], v[4][1], v[4][2], v[4][3], v[4][0], v[4][1],
v[4][4], v[4][5]))
cor.append('$(%s%s)//(%s%s)$\\par et\\par $(%s%s)//(%s%s)$ & ' %
(v[4][0], v[4][1], v[4][2], v[4][3], v[4][0], v[4][1],
v[4][4], v[4][5]))
else:
exo.append('''$%s//%s$\\par et\\par $%s//%s$ &
\\begin{pspicture*}[shift=-1.5](-2.1,-1.6)(2.1,1.6)
\\end{pspicture*}
& & \\\\''' %
(v[4][0], v[4][1], v[4][0], v[4][2]))
cor.append('$%s//%s$\\par et\\par $%s//%s$ & ' % (v[4][0],
v[4][1], v[4][0], v[4][2]))
cor.append('%s & ' % ('\n').join(figure(v[0], v[1], v[2],
v[3], v[4], v[5], lexos[i], v[6]))) # eror out of range in figure
cor.append(u'Si deux droites sont parallèles, alors toute parallèle à l\'une est parallèle à l\'autre. &')
if v[5]:
cor.append('$(%s%s)//(%s%s)$ \\\\\n \\hline' % (v[4][2],
v[4][3], v[4][4], v[4][5]))
else:
cor.append('$%s//%s$ \\\\\n \\hline' % (v[4][1], v[4][2]))
else:
fig = random.randrange(2)
if lexos[i] == 1:
if v[5]:
if not fig:
exo.append('''$(%s%s)//(%s%s)$\\par et\\par $(%s%s)\\perp(%s%s)$ &
\\begin{pspicture*}[shift=-1.5](-2.1,-1.6)(2.1,1.6)
\\end{pspicture*}
& & \\\\''' %
(v[4][0], v[4][1], v[4][2], v[4][3], v[4][0],
v[4][1], v[4][0], v[4][2]))
cor.append('$(%s%s)//(%s%s)$\\par et\\par $(%s%s)\\perp(%s%s)$ &' %
(v[4][0], v[4][1], v[4][2], v[4][3], v[4][0],
v[4][1], v[4][0], v[4][2]))
else:
if not fig:
exo.append('''$%s//%s$\\par et\\par $%s\perp%s$ &
\\begin{pspicture*}[shift=-1.5](-2.1,-1.6)(2.1,1.6)
\\end{pspicture*}
& & \\\\''' %
(v[4][0], v[4][1], v[4][0], v[4][2]))
cor.append('$%s//%s$\\par et\\par $%s\perp%s$ &' %
(v[4][0], v[4][1], v[4][0], v[4][2]))
if fig:
exo.append('& %s & & \\\\' % ('\n').join(figure(v[0],
v[1], v[2], v[3], v[4], v[5], lexos[i])))
cor.append('%s & ' % ('\n').join(figure(v[0], v[1], v[2],
v[3], v[4], v[5], lexos[i])))
cor.append(u'Si deux droites sont parallèles, alors toute perpendiculaire à l\'une est perpendiculaire à l\'autre. &')
if v[5]:
cor.append('$(%s%s)\\perp(%s%s)$ \\\\\n \\hline' %
(v[4][2], v[4][3], v[4][0], v[4][2]))
else:
cor.append('$%s\perp%s$ \\\\\n \\hline' % (v[4][1],
v[4][2]))
else:
if v[5]:
if not fig:
exo.append('''$(%s%s)\\perp(%s%s)$\\par et\\par $(%s%s)\\perp(%s%s)$ &
\\begin{pspicture*}[shift=-1.5](-2.1,-1.6)(2.1,1.6)
\\end{pspicture*}
& & \\\\''' %
(v[4][0], v[4][1], v[4][0], v[4][2], v[4][2],
v[4][3], v[4][0], v[4][2]))
cor.append('$(%s%s)\\perp(%s%s)$\\par et\\par $(%s%s)\\perp(%s%s)$ &' %
(v[4][0], v[4][1], v[4][0], v[4][2], v[4][2],
v[4][3], v[4][0], v[4][2]))
else:
if not fig:
exo.append('''$%s\\perp%s$\\par et\\par $%s\perp%s$ &
\\begin{pspicture*}[shift=-1.5](-2.1,-1.6)(2.1,1.6)
\\end{pspicture*}
& & \\\\''' %
(v[4][0], v[4][2], v[4][1], v[4][2]))
cor.append('$%s\\perp%s$\\par et\\par $%s\perp%s$ &' %
(v[4][0], v[4][2], v[4][1], v[4][2]))
if fig:
exo.append('& %s & & \\\\' % ('\n').join(figure(v[0],
v[1], v[2], v[3], v[4], v[5], lexos[i])))
cor.append('%s &' % ('\n').join(figure(v[0], v[1], v[2],
v[3], v[4], v[5], lexos[i])))
cor.append(u'Si deux droites sont perpendiculaires à une même troisième alors elles sont parallèles entre elles. &')
if v[5]:
cor.append('$(%s%s)//(%s%s)$ \\\\\n \\hline' % (v[4][0],
v[4][1], v[4][2], v[4][3]))
else:
cor.append('$%s//%s$ \\\\\n \\hline' % (v[4][0],
v[4][1]))
exo.append('''\\hline
\\end{tabularx}
''')
cor.append('\\end{tabularx}')
def Proprietes():
exo = ["\\exercice", u"Compléter le tableau suivant :\\par Les droites en gras sont parallèles.\\par """]
cor = ["\\exercice*", u"Compléter le tableau suivant :\\par Les droites en gras sont parallèles.\\par """]
enonce_prop(exo, cor)
return (exo, cor)
Proprietes.description = u'Propriétés sur les droites'
|
undertherain/benchmarker | refs/heads/master | benchmarker/util/abstractprocess.py | 1 | import subprocess
import tempfile
class Process(object):
def __init__(self, ptype="local", host="localhost", command=["hostname"]):
self.proc = None
self.type = ptype
self.host = host
self.command = command
self.__start()
def __start(self):
self.out_file = tempfile.TemporaryFile()
self.err_file = tempfile.TemporaryFile()
runner = []
if self.type == "local":
pass
if self.type == "ssh":
runner = ["ssh", self.host, "-C"]
if self.type == "mpi":
runner = ["mpirun", "-host", self.host]
self.command = runner + self.command
self.proc = subprocess.Popen(self.command, stdout=self.out_file, stderr=self.err_file)
def get_output(self):
if self.proc is None:
return None
self.proc.wait()
self.out_file.seek(0)
out = self.out_file.read().decode()
self.err_file.seek(0)
err = self.err_file.read().decode()
self.out_file.close()
self.err_file.close()
return {"returncode": self.proc.returncode, "out": out, "err": err}
|
moijes12/treeherder | refs/heads/master | tests/webapp/api/test_utils.py | 13 | from treeherder.webapp.api.utils import UrlQueryFilter
def test_single_filter():
input = {
"name": "john",
"age__gte": 30,
"weight__lt": 80,
"gender__in": "male,female"
}
expected = {
'name': set([('=', 'john')]),
'age': set([('>=', 30)]),
'weight': set([('<', 80)]),
'gender': set([('IN', ("male", "female"))])
}
filter = UrlQueryFilter(input)
actual = filter.conditions
for k in expected:
assert actual[k] == expected[k]
def test_multiple_filters():
input = {
"name": "john",
"age__gte": 30,
"age__lt": 80,
}
expected = {
'name': set([('=', 'john')]),
'age': set([('>=', 30), ('<', 80)]),
}
filter = UrlQueryFilter(input)
actual = filter.conditions
for k in expected:
assert actual[k] == expected[k]
def test_get_multiple_value():
input = {
"name": "john",
"age__gte": 30,
"age__lt": 80,
}
expected = set([('>=', 30), ('<', 80)])
filter = UrlQueryFilter(input)
actual = filter.get("age")
assert actual == expected
def test_get_single_value():
input = {
"name": "john",
"age__gte": 30,
"age__lt": 80,
}
expected = "john"
filter = UrlQueryFilter(input)
actual = filter.get("name")
assert actual == expected
def test_get_default_value():
input = {
"name": "john",
"age__gte": 30,
"age__lt": 80,
}
expected = "bar"
filter = UrlQueryFilter(input)
actual = filter.get("foo", "bar")
assert expected == actual
|
Menooker/gem5_pcm | refs/heads/master | src/dev/mips/Malta.py | 45 | # Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
from m5.params import *
from m5.proxy import *
from BadDevice import BadDevice
from Device import BasicPioDevice
from Pci import PciConfigAll
from Platform import Platform
from Uart import Uart8250
class MaltaCChip(BasicPioDevice):
type = 'MaltaCChip'
cxx_header = "dev/mips/malta_cchip.hh"
malta = Param.Malta(Parent.any, "Malta")
class MaltaIO(BasicPioDevice):
type = 'MaltaIO'
cxx_header = "dev/mips/malta_io.hh"
time = Param.Time('01/01/2009',
"System time to use (0 for actual time, default is 1/1/06)")
year_is_bcd = Param.Bool(False,
"The RTC should interpret the year as a BCD value")
malta = Param.Malta(Parent.any, "Malta")
frequency = Param.Frequency('1024Hz', "frequency of interrupts")
class MaltaPChip(BasicPioDevice):
type = 'MaltaPChip'
cxx_header = "dev/mips/malta_pchip.hh"
malta = Param.Malta(Parent.any, "Malta")
class Malta(Platform):
type = 'Malta'
cxx_header = "dev/mips/malta.hh"
system = Param.System(Parent.any, "system")
cchip = MaltaCChip(pio_addr=0x801a0000000)
io = MaltaIO(pio_addr=0x801fc000000)
uart = Uart8250(pio_addr=0xBFD003F8)
# Attach I/O devices to specified bus object. Can't do this
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
self.cchip.pio = bus.master
self.io.pio = bus.master
self.uart.pio = bus.master
|
KhronosGroup/COLLADA-CTS | refs/heads/master | StandardDataSets/1_5/collada/library_geometries/geometry/mesh/vertices/input/position_texcoord_color/position_texcoord_color.py | 4 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
# Compare the rendered images between import and export
# Then compare images against reference test to check for non-equivalence
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "_reference_position_texcoord_color")
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
|
ckundo/nvda | refs/heads/master | source/baseObject.py | 2 | #baseObject.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2007 NVDA Contributors <http://www.nvda-project.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Contains the base classes that many of NVDA's classes such as NVDAObjects, virtualBuffers, appModules, synthDrivers inherit from. These base classes provide such things as auto properties, and methods and properties for scripting and key binding.
"""
import weakref
from logHandler import log
class Getter(object):
def __init__(self,fget):
self.fget=fget
def __get__(self,instance,owner):
if not instance:
return self
return self.fget(instance)
def setter(self,func):
return property(fget=self._func,fset=func)
def deleter(self,func):
return property(fget=self._func,fdel=func)
class CachingGetter(Getter):
def __get__(self, instance, owner):
if not instance:
return self
return instance._getPropertyViaCache(self.fget)
class AutoPropertyType(type):
def __init__(self,name,bases,dict):
super(AutoPropertyType,self).__init__(name,bases,dict)
cacheByDefault=False
try:
cacheByDefault=dict["cachePropertiesByDefault"]
except KeyError:
cacheByDefault=any(getattr(base, "cachePropertiesByDefault", False) for base in bases)
props=(x[5:] for x in dict.keys() if x[0:5] in ('_get_','_set_','_del_'))
for x in props:
g=dict.get('_get_%s'%x,None)
s=dict.get('_set_%s'%x,None)
d=dict.get('_del_%s'%x,None)
if x in dict:
methodsString=",".join([str(i) for i in g,s,d if i])
raise TypeError("%s is already a class attribute, cannot create descriptor with methods %s"%(x,methodsString))
if not g:
# There's a setter or deleter, but no getter.
# This means it could be in one of the base classes.
for base in bases:
g = getattr(base,'_get_%s'%x,None)
if g:
break
cache=dict.get('_cache_%s'%x,None)
if cache is None:
# The cache setting hasn't been specified in this class, but it could be in one of the bases.
for base in bases:
cache = getattr(base,'_cache_%s'%x,None)
if cache is not None:
break
else:
cache=cacheByDefault
if g and not s and not d:
setattr(self,x,(CachingGetter if cache else Getter)(g))
else:
setattr(self,x,property(fget=g,fset=s,fdel=d))
class AutoPropertyObject(object):
"""A class that dynamicly supports properties, by looking up _get_* and _set_* methods at runtime.
_get_x will make property x with a getter (you can get its value).
_set_x will make a property x with a setter (you can set its value).
If there is a _get_x but no _set_x then setting x will override the property completely.
Properties can also be cached for the duration of one core pump cycle.
This is useful if the same property is likely to be fetched multiple times in one cycle. For example, several NVDAObject properties are fetched by both braille and speech.
Setting _cache_x to C{True} specifies that x should be cached. Setting it to C{False} specifies that it should not be cached.
If _cache_x is not set, L{cachePropertiesByDefault} is used.
"""
__metaclass__=AutoPropertyType
#: Tracks the instances of this class; used by L{invalidateCaches}.
#: @type: weakref.WeakKeyDictionary
__instances=weakref.WeakKeyDictionary()
#: Specifies whether properties are cached by default;
#: can be overridden for individual properties by setting _cache_propertyName.
#: @type: bool
cachePropertiesByDefault = False
def __init__(self):
#: Maps properties to cached values.
#: @type: dict
self._propertyCache={}
self.__instances[self]=None
def _getPropertyViaCache(self,getterMethod=None):
if not getterMethod:
raise ValueError("getterMethod is None")
try:
val=self._propertyCache[getterMethod]
except KeyError:
val=getterMethod(self)
self._propertyCache[getterMethod]=val
return val
def invalidateCache(self):
self._propertyCache.clear()
@classmethod
def invalidateCaches(cls):
"""Invalidate the caches for all current instances.
"""
# We use keys() here instead of iterkeys(), as invalidating the cache on an object may cause instances to disappear,
# which would in turn cause an exception due to the dictionary changing size during iteration.
for instance in cls.__instances.keys():
instance.invalidateCache()
class ScriptableObject(AutoPropertyObject):
"""A class that implements NVDA's scripting interface.
Input gestures are bound to scripts such that the script will be executed when the appropriate input gesture is received.
Scripts are methods named with a prefix of C{script_}; e.g. C{script_foo}.
They accept an L{inputCore.InputGesture} as their single argument.
Gesture bindings can be specified on the class by creating a C{__gestures} dict which maps gesture identifiers to script names.
They can also be bound on an instance using the L{bindGesture} method.
"""
def __init__(self):
#: Maps input gestures to script functions.
#: @type: dict
self._gestureMap = {}
# Bind gestures specified on the class.
for cls in self.__class__.__mro__:
try:
self.bindGestures(getattr(cls, "_%s__gestures" % cls.__name__))
except AttributeError:
pass
super(ScriptableObject, self).__init__()
def bindGesture(self, gestureIdentifier, scriptName):
"""Bind an input gesture to a script.
@param gestureIdentifier: The identifier of the input gesture.
@type gestureIdentifier: str
@param scriptName: The name of the script, which is the name of the method excluding the C{script_} prefix.
@type scriptName: str
@raise LookupError: If there is no script with the provided name.
"""
# Don't store the instance method, as this causes a circular reference
# and instance methods are meant to be generated on retrieval anyway.
func = getattr(self.__class__, "script_%s" % scriptName, None)
if not func:
raise LookupError("No such script: %s" % func)
# Import late to avoid circular import.
import inputCore
self._gestureMap[inputCore.normalizeGestureIdentifier(gestureIdentifier)] = func
def clearGestureBindings(self):
"""Remove all input gesture bindings from this object.
"""
self._gestureMap.clear()
def bindGestures(self, gestureMap):
"""Bind multiple input gestures to scripts.
This is a convenience method which simply calls L{bindGesture} for each gesture and script pair, logging any errors.
@param gestureMap: A mapping of gesture identifiers to script names.
@type gestureMap: dict of str to str
"""
for gestureIdentifier, scriptName in gestureMap.iteritems():
try:
self.bindGesture(gestureIdentifier, scriptName)
except LookupError:
log.error("Error binding script %s in %r" % (scriptName, self))
def getScript(self,gesture):
"""Retrieve the script bound to a given gesture.
@param gesture: The input gesture in question.
@type gesture: L{inputCore.InputGesture}
@return: The script function or C{None} if none was found.
@rtype: script function
"""
for identifier in gesture.identifiers:
try:
# Convert to instance method.
return self._gestureMap[identifier].__get__(self, self.__class__)
except KeyError:
continue
else:
return None
#: A value for sleepMode which indicates that NVDA should fully sleep for this object;
#: i.e. braille and speech via NVDA controller client is disabled and the user cannot disable sleep mode.
SLEEP_FULL = "full"
|
percy-g2/Novathor_xperia_u8500 | refs/heads/master | 6.2.A.1.100/external/webkit/Tools/Scripts/webkitpy/common/checkout/diff_parser.py | 15 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebKit's Python module for interacting with patches."""
import logging
import re
_log = logging.getLogger("webkitpy.common.checkout.diff_parser")
# FIXME: This is broken. We should compile our regexps up-front
# instead of using a custom cache.
_regexp_compile_cache = {}
# FIXME: This function should be removed.
def match(pattern, string):
"""Matches the string with the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = re.compile(pattern)
return _regexp_compile_cache[pattern].match(string)
# FIXME: This belongs on DiffParser (e.g. as to_svn_diff()).
def git_diff_to_svn_diff(line):
"""Converts a git formatted diff line to a svn formatted line.
Args:
line: A string representing a line of the diff.
"""
# FIXME: This list should be a class member on DiffParser.
# These regexp patterns should be compiled once instead of every time.
conversion_patterns = (("^diff --git \w/(.+) \w/(?P<FilePath>.+)", lambda matched: "Index: " + matched.group('FilePath') + "\n"),
("^new file.*", lambda matched: "\n"),
("^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}", lambda matched: "===================================================================\n"),
("^--- \w/(?P<FilePath>.+)", lambda matched: "--- " + matched.group('FilePath') + "\n"),
("^\+\+\+ \w/(?P<FilePath>.+)", lambda matched: "+++ " + matched.group('FilePath') + "\n"))
for pattern, conversion in conversion_patterns:
matched = match(pattern, line)
if matched:
return conversion(matched)
return line
# FIXME: This method belongs on DiffParser
def get_diff_converter(first_diff_line):
"""Gets a converter function of diff lines.
Args:
first_diff_line: The first filename line of a diff file.
If this line is git formatted, we'll return a
converter from git to SVN.
"""
if match(r"^diff --git \w/", first_diff_line):
return git_diff_to_svn_diff
return lambda input: input
_INITIAL_STATE = 1
_DECLARED_FILE_PATH = 2
_PROCESSING_CHUNK = 3
class DiffFile(object):
"""Contains the information for one file in a patch.
The field "lines" is a list which contains tuples in this format:
(deleted_line_number, new_line_number, line_string)
If deleted_line_number is zero, it means this line is newly added.
If new_line_number is zero, it means this line is deleted.
"""
# FIXME: Tuples generally grow into classes. We should consider
# adding a DiffLine object.
def added_or_modified_line_numbers(self):
# This logic was moved from patchreader.py, but may not be
# the right API for this object long-term.
return [line[1] for line in self.lines if not line[0]]
def __init__(self, filename):
self.filename = filename
self.lines = []
def add_new_line(self, line_number, line):
self.lines.append((0, line_number, line))
def add_deleted_line(self, line_number, line):
self.lines.append((line_number, 0, line))
def add_unchanged_line(self, deleted_line_number, new_line_number, line):
self.lines.append((deleted_line_number, new_line_number, line))
# If this is going to be called DiffParser, it should be a re-useable parser.
# Otherwise we should rename it to ParsedDiff or just Diff.
class DiffParser(object):
"""A parser for a patch file.
The field "files" is a dict whose key is the filename and value is
a DiffFile object.
"""
def __init__(self, diff_input):
"""Parses a diff.
Args:
diff_input: An iterable object.
"""
self.files = self._parse_into_diff_files(diff_input)
# FIXME: This function is way too long and needs to be broken up.
def _parse_into_diff_files(self, diff_input):
files = {}
state = _INITIAL_STATE
current_file = None
old_diff_line = None
new_diff_line = None
for line in diff_input:
line = line.rstrip("\n")
if state == _INITIAL_STATE:
transform_line = get_diff_converter(line)
line = transform_line(line)
file_declaration = match(r"^Index: (?P<FilePath>.+)", line)
if file_declaration:
filename = file_declaration.group('FilePath')
current_file = DiffFile(filename)
files[filename] = current_file
state = _DECLARED_FILE_PATH
continue
lines_changed = match(r"^@@ -(?P<OldStartLine>\d+)(,\d+)? \+(?P<NewStartLine>\d+)(,\d+)? @@", line)
if lines_changed:
if state != _DECLARED_FILE_PATH and state != _PROCESSING_CHUNK:
_log.error('Unexpected line change without file path '
'declaration: %r' % line)
old_diff_line = int(lines_changed.group('OldStartLine'))
new_diff_line = int(lines_changed.group('NewStartLine'))
state = _PROCESSING_CHUNK
continue
if state == _PROCESSING_CHUNK:
if line.startswith('+'):
current_file.add_new_line(new_diff_line, line[1:])
new_diff_line += 1
elif line.startswith('-'):
current_file.add_deleted_line(old_diff_line, line[1:])
old_diff_line += 1
elif line.startswith(' '):
current_file.add_unchanged_line(old_diff_line, new_diff_line, line[1:])
old_diff_line += 1
new_diff_line += 1
elif line == '\\ No newline at end of file':
# Nothing to do. We may still have some added lines.
pass
else:
_log.error('Unexpected diff format when parsing a '
'chunk: %r' % line)
return files
|
CodeforHawaii/froide | refs/heads/master | froide/publicbody/migrations/0002_auto_20151127_1754.py | 3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('publicbody', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='jurisdiction',
options={'ordering': ('rank', 'name'), 'verbose_name': 'Jurisdiction', 'verbose_name_plural': 'Jurisdictions'},
),
migrations.AddField(
model_name='publicbody',
name='file_index',
field=models.CharField(max_length=1024, blank=True),
),
migrations.AddField(
model_name='publicbody',
name='org_chart',
field=models.CharField(max_length=1024, blank=True),
),
]
|
Honry/crosswalk-test-suite | refs/heads/master | webapi/tct-csp-w3c-tests/csp-py/csp_object-src_cross-origin_multi_allowed_two-manual.py | 30 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "object-src " + url2 + " " + url1
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_object-src_cross-origin_allowed_two</title>
<link rel="author" title="Intel" href="http://www.intel.com/"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#object-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is a filled green square.</p>
<object data='""" + url1 + """/tests/csp/support/green-100x100.png'/>
</body>
</html> """
|
dankilman/clue | refs/heads/master | clue/tests/test_install.py | 1 | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import sh
from clue import tests
class TestInstall(tests.BaseTest):
def test_default(self):
try:
self.clue.env.create(repos_dir=self.repos_dir)
self.clue.apply()
except sh.ErrorReturnCode as e:
self.fail(e.stdout)
|
drjeep/django | refs/heads/master | django/contrib/auth/migrations/0003_alter_user_email_max_length.py | 586 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0002_alter_permission_name_max_length'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
]
|
gaziel/heekscnc | refs/heads/master | pycnc/wxPictureWindow.py | 25 | import wx
class PictureWindow(wx.Window):
def __init__(self, parent, size = None, bitmap = None):# use either size or bitmap
if bitmap != None:
wx.Window.__init__(self, parent, size = wx.Size(b2.GetWidth(), b2.GetHeight()))
elif size != None:
wx.Window.__init__(self, parent, size = size)
else:
wx.Window.__init__(self, parent)
self.bitmap = bitmap
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnPaint(self, event):
dc = wx.PaintDC(self)
if self.bitmap != None:
dc.DrawBitmap(self.bitmap, 0, 0, False)
def SetPicture(self, bitmap):
self.bitmap = bitmap
self.Refresh()
def SetPictureBitmap(self, bitmap, filepath, image_type):
if bitmap == None:
bitmap = wx.BitmapFromImage(wx.Image(filepath, image_type))
self.SetPicture(bitmap)
|
yjmade/odoo | refs/heads/8.0 | addons/base_vat/__openerp__.py | 52 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'VAT Number Validation',
'version': '1.0',
'category': 'Hidden/Dependency',
'description': """
VAT validation for Partner's VAT numbers.
=========================================
After installing this module, values entered in the VAT field of Partners will
be validated for all supported countries. The country is inferred from the
2-letter country code that prefixes the VAT number, e.g. ``BE0477472701``
will be validated using the Belgian rules.
There are two different levels of VAT number validation:
--------------------------------------------------------
* By default, a simple off-line check is performed using the known validation
rules for the country, usually a simple check digit. This is quick and
always available, but allows numbers that are perhaps not truly allocated,
or not valid anymore.
* When the "VAT VIES Check" option is enabled (in the configuration of the user's
Company), VAT numbers will be instead submitted to the online EU VIES
database, which will truly verify that the number is valid and currently
allocated to a EU company. This is a little bit slower than the simple
off-line check, requires an Internet connection, and may not be available
all the time. If the service is not available or does not support the
requested country (e.g. for non-EU countries), a simple check will be performed
instead.
Supported countries currently include EU countries, and a few non-EU countries
such as Chile, Colombia, Mexico, Norway or Russia. For unsupported countries,
only the country code will be validated.
""",
'author': 'OpenERP SA',
'depends': ['account'],
'website': 'https://www.odoo.com/page/accounting',
'data': ['base_vat_view.xml'],
'installable': True,
'auto_install': False,
'images': ['images/1_partner_vat.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mairob/Trafficsign-classification-in-TensorFlow | refs/heads/master | Code/TrafficSign_TF_Network_4.py | 1 | import numpy as np
import tensorflow as tf
#Note: this one should give you around 99.1% on an augumented GTSRB
image_size = 48 #width = height
col_channels = 3 #RGB
def initVariable(name, shape):
"""
Initialize weights and biases based on http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
"""
return tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
def conv2d(x, W):
"""
Basic convolution.
"""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""
Basic pooling
"""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
#Note: tf.name_scope is used for structuring the graph
#Note: dropout should be around 35...50%
#Note: learningrate should be around 1e-4....1e-6
with tf.name_scope('Network'):
with tf.name_scope('input'):
x_image = tf.placeholder(tf.float32, [None, image_size, image_size, col_channels], name='Images_raw')
y_raw = tf.placeholder(tf.int32, [None] ,name='Labels_raw')
y_= tf.one_hot(indices=y_raw, depth=43 , name='Labels_oneHot')
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
with tf.name_scope('learningrate'):
learningrate = tf.placeholder(tf.float32)
with tf.name_scope('Layer1'):
W_conv1 = initVariable("W1", [5, 5, 3, 96])
b_conv1 = initVariable("B1", [96])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(tf.nn.local_response_normalization(h_conv1)) #resulting feature maps = 24x24 Pixel
with tf.name_scope('Layer2'):
W_conv2 = initVariable("W2",[3, 3, 96, 96])
b_conv2 = initVariable("B2",[96])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(tf.nn.local_response_normalization(h_conv2)) #resulting feature maps = 12x12 Pixel
with tf.name_scope('Layer3'):
W_conv3 = initVariable("W3",[3, 3, 96, 48])
b_conv3 = initVariable("B3",[48])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_conv3_flat = tf.reshape(h_conv3, [-1, 12*12*48])
with tf.name_scope('Layer4'):
W_conv4 = initVariable("W4",[3, 3, 48, 48])
b_conv4 = initVariable("B4",[48])
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4) + b_conv4)
with tf.name_scope('Layer5'):
W_conv5 = initVariable("W5",[3, 3, 48, 48])
b_conv5= initVariable("B5",[48])
h_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5) + b_conv5)
h_pool5 = max_pool_2x2(tf.nn.local_response_normalization(h_conv5)) #resulting feature maps = 6x6 Pixel
with tf.name_scope('Layer6'):
W_conv6 = initVariable("W6",[2, 2, 48, 48])
b_conv6 = initVariable("B6",[48])
h_conv6 = tf.nn.relu(conv2d(h_pool5, W_conv6) + b_conv6)
h_pool6= max_pool_2x2(tf.nn.local_response_normalization(h_conv6)) #resulting feature maps = 3x3 Pixel
h_pool6_flat = tf.reshape(h_pool6, [-1, 3*3*48])
with tf.name_scope('Concat'):
ccat = tf.concat([h_conv3_flat, h_pool5_flat], 1)
W_ccat = initVariable("Wccat",[7344 , 2048]) #7344 = 12*12*48 +3*3*48
b_ccat = initVariable("Bccat",[2048])
h_ccat = tf.nn.relu(tf.matmul(ccat, W_ccat) + b_ccat)
h_ccat_drop = tf.nn.dropout(h_ccat, keep_prob)
with tf.name_scope('Fully1'):
W_fc1 = initVariable("Wfc1",[3*3*48 , 1024])
b_fc1 = initVariable("Bfc1",[1024])
h_fc1 = tf.nn.relu(tf.matmul(h_pool5_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
with tf.name_scope('Fully2'):
fc2_ccat = tf.concat([h_ccat_drop, h_fc1_drop], 1)
W_fc2 = initVariable("Wfc2",[3072 , 768])
b_fc2 = initVariable("Bfc2",[768])
h_fc2 = tf.nn.relu(tf.matmul(fc2_ccat, W_fc2) + b_fc2)
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
with tf.name_scope('OutputLayer'):
W_out = initVariable("Wout",[768, 43])
b_out = initVariable("Bout",[43])
with tf.name_scope("softmax"):
y_conv=tf.nn.softmax(tf.matmul(h_fc2_drop, W_out) + b_out)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(learningrate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
|
jestapinski/oppia | refs/heads/develop | core/domain/collection_domain_test.py | 6 | # coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for collection domain objects and methods defined on them."""
from core.domain import collection_domain
from core.domain import collection_services
from core.tests import test_utils
import feconf
import utils
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with the YAML generation
# methods tested below.
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = ("""category: A category
language_code: en
nodes:
- acquired_skills:
- Skill0a
- Skill0b
exploration_id: an_exploration_id
prerequisite_skills: []
objective: An objective
schema_version: %d
tags: []
title: A title
""") % (feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
class CollectionDomainUnitTests(test_utils.GenericTestBase):
"""Test the collection domain object."""
COLLECTION_ID = 'collection_id'
EXPLORATION_ID = 'exp_id_0'
def setUp(self):
super(CollectionDomainUnitTests, self).setUp()
self.save_new_valid_collection(
self.COLLECTION_ID, 'user@example.com', title='Title',
category='Category', objective='Objective',
exploration_id=self.EXPLORATION_ID)
self.collection = collection_services.get_collection_by_id(
self.COLLECTION_ID)
def _assert_validation_error(self, expected_error_substring):
"""Checks that the collection passes strict validation."""
with self.assertRaisesRegexp(
utils.ValidationError, expected_error_substring):
self.collection.validate()
def test_initial_validation(self):
"""Test validating a new, valid collection."""
self.collection.validate()
def test_title_validation(self):
self.collection.title = 0
self._assert_validation_error('Expected title to be a string')
def test_category_validation(self):
self.collection.category = 0
self._assert_validation_error('Expected category to be a string')
def test_objective_validation(self):
self.collection.objective = ''
self._assert_validation_error('objective must be specified')
self.collection.objective = 0
self._assert_validation_error('Expected objective to be a string')
def test_language_code_validation(self):
self.collection.language_code = ''
self._assert_validation_error('language must be specified')
self.collection.language_code = 0
self._assert_validation_error('Expected language code to be a string')
self.collection.language_code = 'xz'
self._assert_validation_error('Invalid language code')
def test_tags_validation(self):
self.collection.tags = 'abc'
self._assert_validation_error('Expected tags to be a list')
self.collection.tags = [2, 3]
self._assert_validation_error('Expected each tag to be a string')
self.collection.tags = ['', 'tag']
self._assert_validation_error('Tags should be non-empty')
self.collection.tags = ['234']
self._assert_validation_error(
'Tags should only contain lowercase letters and spaces')
self.collection.tags = [' abc']
self._assert_validation_error(
'Tags should not start or end with whitespace')
self.collection.tags = ['abc def']
self._assert_validation_error(
'Adjacent whitespace in tags should be collapsed')
self.collection.tags = ['abc', 'abc']
self._assert_validation_error(
'Expected tags to be unique, but found duplicates')
def test_schema_version_validation(self):
self.collection.schema_version = 'some_schema_version'
self._assert_validation_error('Expected schema version to be an int')
self.collection.schema_version = 100
self._assert_validation_error(
'Expected schema version to be %s' %
feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
def test_nodes_validation(self):
self.collection.nodes = {}
self._assert_validation_error('Expected nodes to be a list')
self.collection.nodes = [
collection_domain.CollectionNode.from_dict({
'exploration_id': '0',
'prerequisite_skills': [],
'acquired_skills': ['skill0a']
}),
collection_domain.CollectionNode.from_dict({
'exploration_id': '0',
'prerequisite_skills': ['skill0a'],
'acquired_skills': ['skill0b']
})
]
self._assert_validation_error(
'There are explorations referenced in the collection more than '
'once.')
def test_initial_explorations_validation(self):
# Having no collection nodes is fine for non-strict validation.
self.collection.nodes = []
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'Expected to have at least 1 exploration in the collection.')
# If the collection has exactly one exploration and that exploration
# has prerequisite skills, then the collection should fail validation.
self.collection.add_node('exp_id_1')
self.save_new_valid_exploration(
'exp_id_1', 'user@example.com', end_state_name='End')
collection_node1 = self.collection.get_node('exp_id_1')
collection_node1.update_prerequisite_skills(['skill1a'])
self._assert_validation_error(
'Expected to have at least 1 exploration with no prerequisite '
'skills.')
def test_metadata_validation(self):
self.collection.title = ''
self.collection.objective = ''
self.collection.category = ''
self.collection.nodes = []
self.collection.add_node('exp_id_1')
# Having no title is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'A title must be specified for the collection.')
self.collection.title = 'A title'
# Having no objective is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'An objective must be specified for the collection.')
self.collection.objective = 'An objective'
# Having no category is fine for non-strict validation.
self.collection.validate(strict=False)
# But it's not okay for strict validation.
self._assert_validation_error(
'A category must be specified for the collection.')
self.collection.category = 'A category'
# Now the collection passes both strict and non-strict validation.
self.collection.validate(strict=False)
self.collection.validate(strict=True)
def test_collection_completability_validation(self):
# Add another exploration, but make it impossible to reach exp_id_1.
self.collection.add_node('exp_id_1')
collection_node1 = self.collection.get_node('exp_id_1')
collection_node1.update_prerequisite_skills(['skill0a'])
self._assert_validation_error(
'Some explorations are unreachable from the initial explorations')
# Connecting the two explorations should lead to clean validation.
collection_node0 = self.collection.get_node('exp_id_0')
collection_node0.update_acquired_skills(['skill0a'])
self.collection.validate()
def test_collection_node_exploration_id_validation(self):
# Validate CollectionNode's exploration_id.
collection_node0 = self.collection.get_node('exp_id_0')
collection_node0.exploration_id = 2
self._assert_validation_error('Expected exploration ID to be a string')
def test_collection_node_prerequisite_skills_validation(self):
collection_node0 = self.collection.get_node('exp_id_0')
collection_node0.prerequisite_skills = {}
self._assert_validation_error(
'Expected prerequisite_skills to be a list')
collection_node0.prerequisite_skills = ['skill0a', 'skill0a']
self._assert_validation_error(
'The prerequisite_skills list has duplicate entries')
collection_node0.prerequisite_skills = ['skill0a', 2]
self._assert_validation_error(
'Expected all prerequisite skills to be strings')
def test_collection_node_acquired_skills_validation(self):
collection_node0 = self.collection.get_node('exp_id_0')
collection_node0.acquired_skills = {}
self._assert_validation_error('Expected acquired_skills to be a list')
collection_node0.acquired_skills = ['skill0a', 'skill0a']
self._assert_validation_error(
'The acquired_skills list has duplicate entries')
collection_node0.acquired_skills = ['skill0a', 2]
self._assert_validation_error(
'Expected all acquired skills to be strings')
def test_collection_node_skills_validation(self):
collection_node0 = self.collection.get_node('exp_id_0')
# Ensure prerequisite_skills and acquired_skills do not overlap.
collection_node0.prerequisite_skills = [
'skill0a', 'skill0b', 'skill0c']
collection_node0.acquired_skills = [
'skill0z', 'skill0b', 'skill0c', 'skill0d']
self._assert_validation_error(
'There are some skills which are both required for exploration '
'exp_id_0 and acquired after playing it: [skill0b, skill0c]')
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = collection_domain.Collection.create_default_collection('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = collection_domain.Collection.create_default_collection('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = collection_domain.Collection.create_default_collection(
'abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_collection_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
collection.
"""
self.save_new_valid_exploration(
'0', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.create_default_collection(
'0', title='title', category='category', objective='objective')
collection_dict = collection.to_dict()
collection_from_dict = collection_domain.Collection.from_dict(
collection_dict)
self.assertEqual(collection_from_dict.to_dict(), collection_dict)
def test_add_delete_node(self):
"""Test that add_node and delete_node fail in the correct situations.
"""
collection = collection_domain.Collection.create_default_collection(
'0')
self.assertEqual(len(collection.nodes), 0)
collection.add_node('test_exp')
self.assertEqual(len(collection.nodes), 1)
with self.assertRaisesRegexp(
ValueError,
'Exploration is already part of this collection: test_exp'
):
collection.add_node('test_exp')
collection.add_node('another_exp')
self.assertEqual(len(collection.nodes), 2)
collection.delete_node('another_exp')
self.assertEqual(len(collection.nodes), 1)
with self.assertRaisesRegexp(
ValueError,
'Exploration is not part of this collection: another_exp'
):
collection.delete_node('another_exp')
collection.delete_node('test_exp')
self.assertEqual(len(collection.nodes), 0)
def test_skills_property(self):
collection = collection_domain.Collection.create_default_collection(
'0')
self.assertEqual(collection.skills, [])
collection.add_node('exp_id_0')
collection.add_node('exp_id_1')
collection.get_node('exp_id_0').update_acquired_skills(
['skill0a'])
collection.get_node('exp_id_1').update_prerequisite_skills(
['skill0a'])
collection.get_node('exp_id_1').update_acquired_skills(
['skill1b', 'skill1c'])
self.assertEqual(collection.skills, ['skill0a', 'skill1b', 'skill1c'])
# Skills should be unique, even if they are duplicated across multiple
# acquired and prerequisite skill lists.
collection.add_node('exp_id_2')
collection.get_node('exp_id_2').update_acquired_skills(
['skill0a', 'skill1c'])
self.assertEqual(collection.skills, ['skill0a', 'skill1b', 'skill1c'])
class ExplorationGraphUnitTests(test_utils.GenericTestBase):
"""Test the skill graph structure within a collection."""
def test_initial_explorations(self):
"""Any exploration without prerequisites should be an initial
exploration.
"""
collection = collection_domain.Collection.create_default_collection(
'collection_id')
# If there are no explorations in the collection, there can be no
# initial explorations.
self.assertEqual(collection.nodes, [])
self.assertEqual(collection.init_exploration_ids, [])
# A freshly added exploration will be an initial one.
collection.add_node('exp_id_0')
self.assertEqual(collection.init_exploration_ids, ['exp_id_0'])
# Having prerequisites will make an exploration no longer initial.
collection.add_node('exp_id_1')
self.assertEqual(len(collection.nodes), 2)
collection.get_node('exp_id_1').update_prerequisite_skills(
['skill0a'])
self.assertEqual(collection.init_exploration_ids, ['exp_id_0'])
# There may be multiple initial explorations.
collection.add_node('exp_id_2')
self.assertEqual(
collection.init_exploration_ids, ['exp_id_0', 'exp_id_2'])
def test_next_explorations(self):
"""Explorations should be suggested based on prerequisite and
acquired skills, as well as which explorations have already been played
in the collection.
"""
collection = collection_domain.Collection.create_default_collection(
'collection_id')
# There should be no next explorations for an empty collection.
self.assertEqual(collection.get_next_exploration_ids([]), [])
# If a new exploration is added, the next exploration IDs should be the
# same as the initial explorations.
collection.add_node('exp_id_1')
self.assertEqual(collection.get_next_exploration_ids([]), ['exp_id_1'])
self.assertEqual(
collection.init_exploration_ids,
collection.get_next_exploration_ids([]))
# Completing the only exploration of the collection should lead to no
# available explorations thereafter. This test is done without any
# prerequisite or acquired skill lists.
self.assertEqual(collection.get_next_exploration_ids(['exp_id_1']), [])
# If the only exploration in the collection has a prerequisite skill,
# there are no explorations left to do.
collection_node1 = collection.get_node('exp_id_1')
collection_node1.update_prerequisite_skills(['skill0a'])
self.assertEqual(collection.get_next_exploration_ids([]), [])
# If another exploration has been added with a prerequisite that is the
# same as an acquired skill of another exploration and the exploration
# giving that skill is completed, then the first exploration should be
# the next one to complete.
collection.add_node('exp_id_2')
collection_node2 = collection.get_node('exp_id_2')
collection_node1.update_acquired_skills(['skill1b'])
collection_node2.update_prerequisite_skills(['skill1b'])
self.assertEqual(collection.get_next_exploration_ids([]), [])
self.assertEqual(collection.get_next_exploration_ids(
['exp_id_1']), ['exp_id_2'])
# If another exploration is added that has no prerequisites, the
# learner will be able to get to exp_id_1. exp_id_2 should not be
# suggested to be completed unless exp_id_1 is thereafter completed.
collection.add_node('exp_id_0')
collection_node0 = collection.get_node('exp_id_0')
collection_node0.update_acquired_skills(['skill0a'])
self.assertEqual(
collection.get_next_exploration_ids([]), ['exp_id_0'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0']), ['exp_id_1'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0', 'exp_id_1']),
['exp_id_2'])
# There may be multiple branches of initial suggested explorations.
collection.add_node('exp_id_3')
self.assertEqual(
collection.get_next_exploration_ids([]), ['exp_id_0', 'exp_id_3'])
# There may also be multiple suggested explorations at other points,
# depending on which explorations the learner has completed.
collection_node3 = collection.get_node('exp_id_3')
collection_node3.update_prerequisite_skills(['skill0c'])
collection_node0.update_acquired_skills(['skill0a', 'skill0c'])
self.assertEqual(
collection.get_next_exploration_ids([]), ['exp_id_0'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0']),
['exp_id_1', 'exp_id_3'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0', 'exp_id_3']),
['exp_id_1'])
self.assertEqual(
collection.get_next_exploration_ids(['exp_id_0', 'exp_id_1']),
['exp_id_2', 'exp_id_3'])
self.assertEqual(
collection.get_next_exploration_ids(
['exp_id_0', 'exp_id_1', 'exp_id_2']), ['exp_id_3'])
# If all explorations have been completed, none should be suggested.
self.assertEqual(
collection.get_next_exploration_ids(
['exp_id_0', 'exp_id_1', 'exp_id_2', 'exp_id_3']), [])
def test_next_explorations_in_sequence(self):
collection = collection_domain.Collection.create_default_collection(
'collection_id')
exploration_id = 'exp_id_0'
collection.add_node(exploration_id)
# Completing the only exploration of the collection should lead to no
# available explorations thereafter.
self.assertEqual(
collection.get_next_exploration_ids_in_sequence(exploration_id), [])
# If the current exploration has no acquired skills, a list of all
# explorations with no prerequisite skills should be returned.
collection.add_node('exp_id_1')
collection.add_node('exp_id_2')
self.assertEqual(
collection.get_next_exploration_ids_in_sequence(exploration_id),
['exp_id_1', 'exp_id_2'])
# If only one exploration in the collection has a prerequisite skill
# that is included in the user's learned skills, only that exploration
# should be returned.
collection_node0 = collection.get_node('exp_id_0')
collection_node1 = collection.get_node('exp_id_1')
collection_node0.update_acquired_skills(['skill1a'])
collection_node1.update_prerequisite_skills(['skill1a'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence(exploration_id),
['exp_id_1'])
# Given a chain of explorations in a collections where each
# exploration's acquired skills are the following exploration's
# prerequisite skills, each exploration should return the following
# exploration as a recommendation. The last exploration should
# return an empty list.
collection.add_node('exp_id_3')
collection_node2 = collection.get_node('exp_id_2')
collection_node3 = collection.get_node('exp_id_3')
collection_node1.update_acquired_skills(['skill2a'])
collection_node2.update_acquired_skills(['skill3a'])
collection_node0.update_prerequisite_skills([])
collection_node2.update_prerequisite_skills(['skill2a'])
collection_node3.update_prerequisite_skills(['skill3a'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence('exp_id_0'),
['exp_id_1'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence('exp_id_1'),
['exp_id_2'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence('exp_id_2'),
['exp_id_3'])
self.assertEqual(
collection.get_next_exploration_ids_in_sequence('exp_id_3'),
[])
def test_next_explorations_with_invalid_exploration_ids(self):
collection = collection_domain.Collection.create_default_collection(
'collection_id')
collection.add_node('exp_id_1')
# There should be one suggested exploration to complete by default.
self.assertEqual(collection.get_next_exploration_ids([]), ['exp_id_1'])
# If an invalid exploration ID is passed to get_next_exploration_ids(),
# it should be ignored. This tests the situation where an exploration
# is deleted from a collection after being completed by a user.
self.assertEqual(
collection.get_next_exploration_ids(['fake_exp_id']), ['exp_id_1'])
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of collections from YAML files."""
COLLECTION_ID = 'a_collection_id'
EXPLORATION_ID = 'an_exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
self.save_new_valid_exploration(
self.EXPLORATION_ID, 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.create_default_collection(
self.COLLECTION_ID, title='A title', category='A category',
objective='An objective')
collection.add_node(self.EXPLORATION_ID)
self.assertEqual(len(collection.nodes), 1)
collection_node = collection.get_node(self.EXPLORATION_ID)
collection_node.update_acquired_skills(['Skill0a', 'Skill0b'])
collection.validate()
yaml_content = collection.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
collection2 = collection_domain.Collection.from_yaml(
'collection2', yaml_content)
self.assertEqual(len(collection2.nodes), 1)
yaml_content_2 = collection2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
# Should not be able to create a collection from no YAML content.
with self.assertRaises(Exception):
collection_domain.Collection.from_yaml('collection3', None)
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""category: A category
nodes:
- acquired_skills:
- Skill1
- Skill2
exploration_id: Exp1
prerequisite_skills: []
objective: ''
schema_version: 1
title: A title
""")
YAML_CONTENT_V2 = ("""category: A category
language_code: en
nodes:
- acquired_skills:
- Skill1
- Skill2
exploration_id: Exp1
prerequisite_skills: []
objective: ''
schema_version: 2
tags: []
title: A title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V1
_LATEST_YAML_CONTENT = YAML_CONTENT_V2
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V1)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
self.save_new_valid_exploration(
'Exp1', 'user@example.com', end_state_name='End')
collection = collection_domain.Collection.from_yaml(
'cid', self.YAML_CONTENT_V2)
self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)
|
hgomersall/myhdl | refs/heads/master | myhdl/test/conversion/general/test_adapter.py | 2 | from __future__ import absolute_import
import myhdl
from myhdl import *
@block
def adapter(o_err, i_err, o_spec, i_spec):
nomatch = Signal(bool(0))
other = Signal(bool(0))
o_err_bits = []
for s in o_spec:
if s == 'other':
o_err_bits.append(other)
elif s == 'nomatch':
o_err_bits.append(nomatch)
else:
bit = i_err(i_spec[s])
o_err_bits.append(bit)
o_err_vec = ConcatSignal(*o_err_bits)
other_bits = []
for s, i in i_spec.items():
if s in o_spec:
continue
bit = i_err(i)
other_bits.append(bit)
other_vec = ConcatSignal(*other_bits)
@always_comb
def assign():
nomatch.next = 0
other.next = (other_vec != 0)
o_err.next = o_err_vec
return assign
@block
def bench_adapter(hdl=None):
o_spec = ('c', 'a', 'other', 'nomatch')
i_spec = { 'a' : 1, 'b' : 2, 'c' : 0, 'd' : 3, 'e' : 4, 'f' : 5, }
o_err = Signal(intbv(0)[4:])
i_err = Signal(intbv(0)[6:])
if hdl:
dut = adapter(o_err, i_err, o_spec, i_spec).convert(hdl=hdl)
else:
dut = adapter(o_err, i_err, o_spec, i_spec)
N = 2**len(i_err)
@instance
def stimulus():
for i in range(N):
i_err.next = i
yield delay(10)
assert o_err[0] == 0
assert o_err[1] == (i_err[2] | i_err[3] | i_err[4] | i_err[5])
assert o_err[2] == i_err[1]
assert o_err[3] == i_err[0]
print(o_err)
return dut, stimulus
def test_adapter():
assert bench_adapter().verify_convert() == 0
bench_adapter('Verilog')
bench_adapter('VHDL')
|
bencmbrook/home-assistant | refs/heads/master | tests/components/test_demo.py | 7 | """
tests.test_component_demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.demo as demo
from tests.common import mock_http_component
class TestDemo(unittest.TestCase):
""" Test the demo module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
mock_http_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_if_demo_state_shows_by_default(self):
""" Test if demo state shows if we give no configuration. """
demo.setup(self.hass, {demo.DOMAIN: {}})
self.assertIsNotNone(self.hass.states.get('a.Demo_Mode'))
def test_hiding_demo_state(self):
""" Test if you can hide the demo card. """
demo.setup(self.hass, {demo.DOMAIN: {'hide_demo_state': 1}})
self.assertIsNone(self.hass.states.get('a.Demo_Mode'))
|
keedio/hue | refs/heads/master | desktop/core/ext-py/PyYAML-3.09/tests/lib/test_structure.py | 60 |
import yaml, canonical
import pprint
def _convert_structure(loader):
if loader.check_event(yaml.ScalarEvent):
event = loader.get_event()
if event.tag or event.anchor or event.value:
return True
else:
return None
elif loader.check_event(yaml.SequenceStartEvent):
loader.get_event()
sequence = []
while not loader.check_event(yaml.SequenceEndEvent):
sequence.append(_convert_structure(loader))
loader.get_event()
return sequence
elif loader.check_event(yaml.MappingStartEvent):
loader.get_event()
mapping = []
while not loader.check_event(yaml.MappingEndEvent):
key = _convert_structure(loader)
value = _convert_structure(loader)
mapping.append((key, value))
loader.get_event()
return mapping
elif loader.check_event(yaml.AliasEvent):
loader.get_event()
return '*'
else:
loader.get_event()
return '?'
def test_structure(data_filename, structure_filename, verbose=False):
nodes1 = []
nodes2 = eval(open(structure_filename, 'rb').read())
try:
loader = yaml.Loader(open(data_filename, 'rb'))
while loader.check_event():
if loader.check_event(yaml.StreamStartEvent, yaml.StreamEndEvent,
yaml.DocumentStartEvent, yaml.DocumentEndEvent):
loader.get_event()
continue
nodes1.append(_convert_structure(loader))
if len(nodes1) == 1:
nodes1 = nodes1[0]
assert nodes1 == nodes2, (nodes1, nodes2)
finally:
if verbose:
print "NODES1:"
pprint.pprint(nodes1)
print "NODES2:"
pprint.pprint(nodes2)
test_structure.unittest = ['.data', '.structure']
def _compare_events(events1, events2, full=False):
assert len(events1) == len(events2), (len(events1), len(events2))
for event1, event2 in zip(events1, events2):
assert event1.__class__ == event2.__class__, (event1, event2)
if isinstance(event1, yaml.AliasEvent) and full:
assert event1.anchor == event2.anchor, (event1, event2)
if isinstance(event1, (yaml.ScalarEvent, yaml.CollectionStartEvent)):
if (event1.tag not in [None, u'!'] and event2.tag not in [None, u'!']) or full:
assert event1.tag == event2.tag, (event1, event2)
if isinstance(event1, yaml.ScalarEvent):
assert event1.value == event2.value, (event1, event2)
def test_parser(data_filename, canonical_filename, verbose=False):
events1 = None
events2 = None
try:
events1 = list(yaml.parse(open(data_filename, 'rb')))
events2 = list(yaml.canonical_parse(open(canonical_filename, 'rb')))
_compare_events(events1, events2)
finally:
if verbose:
print "EVENTS1:"
pprint.pprint(events1)
print "EVENTS2:"
pprint.pprint(events2)
test_parser.unittest = ['.data', '.canonical']
def test_parser_on_canonical(canonical_filename, verbose=False):
events1 = None
events2 = None
try:
events1 = list(yaml.parse(open(canonical_filename, 'rb')))
events2 = list(yaml.canonical_parse(open(canonical_filename, 'rb')))
_compare_events(events1, events2, full=True)
finally:
if verbose:
print "EVENTS1:"
pprint.pprint(events1)
print "EVENTS2:"
pprint.pprint(events2)
test_parser_on_canonical.unittest = ['.canonical']
def _compare_nodes(node1, node2):
assert node1.__class__ == node2.__class__, (node1, node2)
assert node1.tag == node2.tag, (node1, node2)
if isinstance(node1, yaml.ScalarNode):
assert node1.value == node2.value, (node1, node2)
else:
assert len(node1.value) == len(node2.value), (node1, node2)
for item1, item2 in zip(node1.value, node2.value):
if not isinstance(item1, tuple):
item1 = (item1,)
item2 = (item2,)
for subnode1, subnode2 in zip(item1, item2):
_compare_nodes(subnode1, subnode2)
def test_composer(data_filename, canonical_filename, verbose=False):
nodes1 = None
nodes2 = None
try:
nodes1 = list(yaml.compose_all(open(data_filename, 'rb')))
nodes2 = list(yaml.canonical_compose_all(open(canonical_filename, 'rb')))
assert len(nodes1) == len(nodes2), (len(nodes1), len(nodes2))
for node1, node2 in zip(nodes1, nodes2):
_compare_nodes(node1, node2)
finally:
if verbose:
print "NODES1:"
pprint.pprint(nodes1)
print "NODES2:"
pprint.pprint(nodes2)
test_composer.unittest = ['.data', '.canonical']
def _make_loader():
global MyLoader
class MyLoader(yaml.Loader):
def construct_sequence(self, node):
return tuple(yaml.Loader.construct_sequence(self, node))
def construct_mapping(self, node):
pairs = self.construct_pairs(node)
pairs.sort()
return pairs
def construct_undefined(self, node):
return self.construct_scalar(node)
MyLoader.add_constructor(u'tag:yaml.org,2002:map', MyLoader.construct_mapping)
MyLoader.add_constructor(None, MyLoader.construct_undefined)
def _make_canonical_loader():
global MyCanonicalLoader
class MyCanonicalLoader(yaml.CanonicalLoader):
def construct_sequence(self, node):
return tuple(yaml.CanonicalLoader.construct_sequence(self, node))
def construct_mapping(self, node):
pairs = self.construct_pairs(node)
pairs.sort()
return pairs
def construct_undefined(self, node):
return self.construct_scalar(node)
MyCanonicalLoader.add_constructor(u'tag:yaml.org,2002:map', MyCanonicalLoader.construct_mapping)
MyCanonicalLoader.add_constructor(None, MyCanonicalLoader.construct_undefined)
def test_constructor(data_filename, canonical_filename, verbose=False):
_make_loader()
_make_canonical_loader()
native1 = None
native2 = None
try:
native1 = list(yaml.load_all(open(data_filename, 'rb'), Loader=MyLoader))
native2 = list(yaml.load_all(open(canonical_filename, 'rb'), Loader=MyCanonicalLoader))
assert native1 == native2, (native1, native2)
finally:
if verbose:
print "NATIVE1:"
pprint.pprint(native1)
print "NATIVE2:"
pprint.pprint(native2)
test_constructor.unittest = ['.data', '.canonical']
if __name__ == '__main__':
import test_appliance
test_appliance.run(globals())
|
sammyshj/gci | refs/heads/master | modules/geopy/geocoders/bing.py | 46 | try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
from urllib import urlencode
from urllib2 import urlopen
from geopy.geocoders.base import Geocoder
from geopy.util import logger, decode_page, join_filter
class Bing(Geocoder):
"""Geocoder using the Bing Maps API."""
def __init__(self, api_key, format_string='%s', output_format=None):
"""Initialize a customized Bing geocoder with location-specific
address information and your Bing Maps API key.
``api_key`` should be a valid Bing Maps API key.
``format_string`` is a string containing '%s' where the string to
geocode should be interpolated before querying the geocoder.
For example: '%s, Mountain View, CA'. The default is just '%s'.
``output_format`` (DEPRECATED) is ignored
"""
if output_format != None:
from warnings import warn
warn('geopy.geocoders.bing.Bing: The `output_format` parameter is deprecated '+
'and ignored.', DeprecationWarning)
self.api_key = api_key
self.format_string = format_string
self.url = "http://dev.virtualearth.net/REST/v1/Locations?%s"
def geocode(self, string, exactly_one=True):
if isinstance(string, unicode):
string = string.encode('utf-8')
params = {'query': self.format_string % string,
'key': self.api_key
}
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one)
def geocode_url(self, url, exactly_one=True):
logger.debug("Fetching %s..." % url)
page = urlopen(url)
return self.parse_json(page, exactly_one)
def parse_json(self, page, exactly_one=True):
"""Parse a location name, latitude, and longitude from an JSON response."""
if not isinstance(page, basestring):
page = decode_page(page)
doc = json.loads(page)
resources = doc['resourceSets'][0]['resources']
if exactly_one and len(resources) != 1:
raise ValueError("Didn't find exactly one resource! " \
"(Found %d.)" % len(resources))
def parse_resource(resource):
stripchars = ", \n"
a = resource['address']
address = a.get('addressLine', '').strip(stripchars)
city = a.get('locality', '').strip(stripchars)
state = a.get('adminDistrict', '').strip(stripchars)
zipcode = a.get('postalCode', '').strip(stripchars)
country = a.get('countryRegion', '').strip(stripchars)
city_state = join_filter(", ", [city, state])
place = join_filter(" ", [city_state, zipcode])
location = join_filter(", ", [address, place, country])
latitude = resource['point']['coordinates'][0] or None
longitude = resource['point']['coordinates'][1] or None
if latitude and longitude:
latitude = float(latitude)
longitude = float(longitude)
return (location, (latitude, longitude))
if exactly_one:
return parse_resource(resources[0])
else:
return [parse_resource(resource) for resource in resources]
|
kushalbhola/MyStuff | refs/heads/master | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexing/multiindex/test_datetime.py | 3 | from datetime import datetime
import numpy as np
from pandas import Index, Period, Series, period_range
def test_multiindex_period_datetime():
# GH4861, using datetime in period of multiindex raises exception
idx1 = Index(["a", "a", "a", "b", "b"])
idx2 = period_range("2012-01", periods=len(idx1), freq="M")
s = Series(np.random.randn(len(idx1)), [idx1, idx2])
# try Period as index
expected = s.iloc[0]
result = s.loc["a", Period("2012-01")]
assert result == expected
# try datetime as index
result = s.loc["a", datetime(2012, 1, 1)]
assert result == expected
|
QudevETH/PycQED_py3 | refs/heads/qudev_master | examples/pulse_scheme_example.py | 2 | import matplotlib.pyplot as plt
import pycqed.utilities.pulse_scheme as pls
import numpy as np
# Simple example of how to use the pulse_scheme module
cm = 1 / 2.54 # inch to cm conversion
fig, ax = pls.new_pulse_fig((7*cm, 3*cm))
# Plot pulses
p1 = pls.mwPulse(ax, 0, width=1.5, label='$X_{\\pi/2}$')
p2 = pls.ramZPulse(ax, p1, width=2.5, sep=1.5)
p3 = pls.mwPulse(ax, p2 + 0.5, width=1.5, phase=np.pi/2, label='$Y_{\\pi/2}$')
# Add some arrows and labeling
pls.interval(ax, p1, p1 + 1.5, height=1.7, label='$T_\\mathsf{p}$')
pls.interval(ax, p1, p2 + 0.5, height=-.6, labelHeight=-0.5, label='$\\tau$',
vlines=False)
# Adjust plot range to fit the whole figure
ax.set_ylim(-1.2, 2.5)
plt.show()
# Two-qubit pulse scheme (Grover's algorithm)
fig = plt.figure(figsize=(9*cm, 5*cm))
labHeight = 1.25
ax1 = pls.new_pulse_subplot(fig, 211)
p1 = pls.mwPulse(ax1, 0, label='$G_0$', labelHeight=labHeight)
p2 = pls.fluxPulse(ax1, p1, label='CZ')
p3 = pls.mwPulse(ax1, p2, label='$Y_{\\pi/2}$', labelHeight=labHeight)
p4 = pls.fluxPulse(ax1, p3, label='CZ')
p5 = pls.mwPulse(ax1, p4, label='$Y_{\\pi/2}$', labelHeight=labHeight)
ax1.text(-.5, 0, '$Q_0$', va='center', ha='right')
ax2 = pls.new_pulse_subplot(fig, 212, sharex=ax1, sharey=ax1)
pls.mwPulse(ax2, 0, label='$G_1$', labelHeight=labHeight)
pls.mwPulse(ax2, p2, label='$Y_{\\pi/2}$', labelHeight=labHeight)
pls.mwPulse(ax2, p4, label='$Y_{\\pi/2}$', labelHeight=labHeight)
ax2.text(-.5, 0, '$Q_1$', va='center', ha='right')
fig.subplots_adjust(left=.07, top=.9, hspace=.1)
plt.show()
|
ddsc/ddsc-core | refs/heads/master | ddsc_core/migrations/0017_auto__add_field_ipaddress_label2.py | 1 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'IPAddress.label2'
db.add_column(u'ddsc_core_ipaddress', 'label2',
self.gf('django.db.models.fields.GenericIPAddressField')(default='127.0.0.1', max_length=39),
keep_default=False)
def backwards(self, orm):
# Deleting field 'IPAddress.label2'
db.delete_column(u'ddsc_core_ipaddress', 'label2')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ddsc_core.compartment': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Compartment'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'})
},
u'ddsc_core.folder': {
'Meta': {'object_name': 'Folder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.ipaddress': {
'Meta': {'object_name': 'IPAddress'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'label2': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.location': {
'Meta': {'object_name': 'Location'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'geometry_precision': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'point_geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'dim': '3', 'null': 'True', 'blank': 'True'}),
'real_geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'dim': '3', 'null': 'True', 'blank': 'True'})
},
u'ddsc_core.measuringdevice': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringDevice'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.measuringmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '600', 'null': 'True'})
},
u'ddsc_core.parameter': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Parameter'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'cas_number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sikb_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True'})
},
u'ddsc_core.processingmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ProcessingMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.referenceframe': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ReferenceFrame'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.timeseries': {
'Meta': {'object_name': 'Timeseries'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'compartment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Compartment']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'timeseries'", 'symmetrical': 'False', 'to': "orm['lizard_security.DataSet']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'first_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_value_number': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'timeseries'", 'null': 'True', 'to': u"orm['ddsc_core.Location']"}),
'measuring_device': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringDevice']", 'null': 'True', 'blank': 'True'}),
'measuring_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringMethod']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Parameter']", 'null': 'True', 'blank': 'True'}),
'processing_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ProcessingMethod']", 'null': 'True', 'blank': 'True'}),
'reference_frame': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ReferenceFrame']", 'null': 'True', 'blank': 'True'}),
'supplying_system': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'timeseries'", 'null': 'True', 'to': "orm['auth.User']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Unit']", 'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'ddsc_core.unit': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Unit'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'conversion_factor': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['name']", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
}
}
complete_apps = ['ddsc_core'] |
Maspear/odoo | refs/heads/8.0 | openerp/sql_db.py | 168 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
The PostgreSQL connector is a connectivity layer between the OpenERP code and
the database, *not* a database abstraction toolkit. Database abstraction is what
the ORM does, in fact.
"""
from contextlib import contextmanager
from functools import wraps
import logging
import urlparse
import uuid
import psycopg2.extras
import psycopg2.extensions
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
from psycopg2.pool import PoolError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
_logger = logging.getLogger(__name__)
types_mapping = {
'date': (1082,),
'time': (1083,),
'datetime': (1114,),
}
def unbuffer(symb, cr):
if symb is None:
return None
return str(symb)
def undecimalize(symb, cr):
if symb is None:
return None
return float(symb)
for name, typeoid in types_mapping.items():
psycopg2.extensions.register_type(psycopg2.extensions.new_type(typeoid, name, lambda x, cr: x))
psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize))
import tools
from tools.func import frame_codeinfo
from datetime import datetime as mdt
from datetime import timedelta
import threading
from inspect import currentframe
import re
re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$')
re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$')
sql_counter = 0
class Cursor(object):
"""Represents an open transaction to the PostgreSQL DB backend,
acting as a lightweight wrapper around psycopg2's
``cursor`` objects.
``Cursor`` is the object behind the ``cr`` variable used all
over the OpenERP code.
.. rubric:: Transaction Isolation
One very important property of database transactions is the
level of isolation between concurrent transactions.
The SQL standard defines four levels of transaction isolation,
ranging from the most strict *Serializable* level, to the least
strict *Read Uncommitted* level. These levels are defined in
terms of the phenomena that must not occur between concurrent
transactions, such as *dirty read*, etc.
In the context of a generic business data management software
such as OpenERP, we need the best guarantees that no data
corruption can ever be cause by simply running multiple
transactions in parallel. Therefore, the preferred level would
be the *serializable* level, which ensures that a set of
transactions is guaranteed to produce the same effect as
running them one at a time in some order.
However, most database management systems implement a limited
serializable isolation in the form of
`snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_,
providing most of the same advantages as True Serializability,
with a fraction of the performance cost.
With PostgreSQL up to version 9.0, this snapshot isolation was
the implementation of both the ``REPEATABLE READ`` and
``SERIALIZABLE`` levels of the SQL standard.
As of PostgreSQL 9.1, the previous snapshot isolation implementation
was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE``
level was introduced, providing some additional heuristics to
detect a concurrent update by parallel transactions, and forcing
one of them to rollback.
OpenERP implements its own level of locking protection
for transactions that are highly likely to provoke concurrent
updates, such as stock reservations or document sequences updates.
Therefore we mostly care about the properties of snapshot isolation,
but we don't really need additional heuristics to trigger transaction
rollbacks, as we are taking care of triggering instant rollbacks
ourselves when it matters (and we can save the additional performance
hit of these heuristics).
As a result of the above, we have selected ``REPEATABLE READ`` as
the default transaction isolation level for OpenERP cursors, as
it will be mapped to the desired ``snapshot isolation`` level for
all supported PostgreSQL version (8.3 - 9.x).
Note: up to psycopg2 v.2.4.2, psycopg2 itself remapped the repeatable
read level to serializable before sending it to the database, so it would
actually select the new serializable mode on PostgreSQL 9.1. Make
sure you use psycopg2 v2.4.2 or newer if you use PostgreSQL 9.1 and
the performance hit is a concern for you.
.. attribute:: cache
Cache dictionary with a "request" (-ish) lifecycle, only lives as
long as the cursor itself does and proactively cleared when the
cursor is closed.
This cache should *only* be used to store repeatable reads as it
ignores rollbacks and savepoints, it should not be used to store
*any* data which may be modified during the life of the cursor.
"""
IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
def check(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if self._closed:
msg = 'Unable to use a closed cursor.'
if self.__closer:
msg += ' It was closed at %s, line %s' % self.__closer
raise psycopg2.OperationalError(msg)
return f(self, *args, **kwargs)
return wrapper
def __init__(self, pool, dbname, dsn, serialized=True):
self.sql_from_log = {}
self.sql_into_log = {}
# default log level determined at cursor creation, could be
# overridden later for debugging purposes
self.sql_log = _logger.isEnabledFor(logging.DEBUG)
self.sql_log_count = 0
# avoid the call of close() (by __del__) if an exception
# is raised by any of the following initialisations
self._closed = True
self.__pool = pool
self.dbname = dbname
# Whether to enable snapshot isolation level for this cursor.
# see also the docstring of Cursor.
self._serialized = serialized
self._cnx = pool.borrow(dsn)
self._obj = self._cnx.cursor()
if self.sql_log:
self.__caller = frame_codeinfo(currentframe(), 2)
else:
self.__caller = False
self._closed = False # real initialisation value
self.autocommit(False)
self.__closer = False
self._default_log_exceptions = True
self.cache = {}
def __build_dict(self, row):
return {d.name: row[i] for i, d in enumerate(self._obj.description)}
def dictfetchone(self):
row = self._obj.fetchone()
return row and self.__build_dict(row)
def dictfetchmany(self, size):
return map(self.__build_dict, self._obj.fetchmany(size))
def dictfetchall(self):
return map(self.__build_dict, self._obj.fetchall())
def __del__(self):
if not self._closed and not self._cnx.closed:
# Oops. 'self' has not been closed explicitly.
# The cursor will be deleted by the garbage collector,
# but the database connection is not put back into the connection
# pool, preventing some operation on the database like dropping it.
# This can also lead to a server overload.
msg = "Cursor not closed explicitly\n"
if self.__caller:
msg += "Cursor was created at %s:%s" % self.__caller
else:
msg += "Please enable sql debugging to trace the caller."
_logger.warning(msg)
self._close(True)
@check
def execute(self, query, params=None, log_exceptions=None):
if '%d' in query or '%f' in query:
_logger.warning(query)
_logger.warning("SQL queries cannot contain %d or %f anymore. Use only %s")
if params and not isinstance(params, (tuple, list, dict)):
_logger.error("SQL query parameters should be a tuple, list or dict; got %r", params)
raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))
if self.sql_log:
now = mdt.now()
try:
params = params or None
res = self._obj.execute(query, params)
except psycopg2.ProgrammingError, pe:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.error("Programming error: %s, in query %s", pe, query)
raise
except Exception:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.exception("bad query: %s", self._obj.query or query)
raise
# simple query count is always computed
self.sql_log_count += 1
# advanced stats only if sql_log is enabled
if self.sql_log:
delay = mdt.now() - now
delay = delay.seconds * 1E6 + delay.microseconds
_logger.debug("query: %s", self._obj.query)
res_from = re_from.match(query.lower())
if res_from:
self.sql_from_log.setdefault(res_from.group(1), [0, 0])
self.sql_from_log[res_from.group(1)][0] += 1
self.sql_from_log[res_from.group(1)][1] += delay
res_into = re_into.match(query.lower())
if res_into:
self.sql_into_log.setdefault(res_into.group(1), [0, 0])
self.sql_into_log[res_into.group(1)][0] += 1
self.sql_into_log[res_into.group(1)][1] += delay
return res
def split_for_in_conditions(self, ids):
"""Split a list of identifiers into one or more smaller tuples
safe for IN conditions, after uniquifying them."""
return tools.misc.split_every(self.IN_MAX, ids)
def print_log(self):
global sql_counter
if not self.sql_log:
return
def process(type):
sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log}
sum = 0
if sqllogs[type]:
sqllogitems = sqllogs[type].items()
sqllogitems.sort(key=lambda k: k[1][1])
_logger.debug("SQL LOG %s:", type)
sqllogitems.sort(lambda x, y: cmp(x[1][0], y[1][0]))
for r in sqllogitems:
delay = timedelta(microseconds=r[1][1])
_logger.debug("table: %s: %s/%s", r[0], delay, r[1][0])
sum += r[1][1]
sqllogs[type].clear()
sum = timedelta(microseconds=sum)
_logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter)
sqllogs[type].clear()
process('from')
process('into')
self.sql_log_count = 0
self.sql_log = False
@check
def close(self):
return self._close(False)
def _close(self, leak=False):
global sql_counter
if not self._obj:
return
del self.cache
if self.sql_log:
self.__closer = frame_codeinfo(currentframe(), 3)
# simple query count is always computed
sql_counter += self.sql_log_count
# advanced stats only if sql_log is enabled
self.print_log()
self._obj.close()
# This force the cursor to be freed, and thus, available again. It is
# important because otherwise we can overload the server very easily
# because of a cursor shortage (because cursors are not garbage
# collected as fast as they should). The problem is probably due in
# part because browse records keep a reference to the cursor.
del self._obj
self._closed = True
# Clean the underlying connection.
self._cnx.rollback()
if leak:
self._cnx.leaked = True
else:
chosen_template = tools.config['db_template']
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
keep_in_pool = self.dbname not in templates_list
self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
@check
def autocommit(self, on):
if on:
isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
else:
# If a serializable cursor was requested, we
# use the appropriate PotsgreSQL isolation level
# that maps to snaphsot isolation.
# For all supported PostgreSQL versions (8.3-9.x),
# this is currently the ISOLATION_REPEATABLE_READ.
# See also the docstring of this class.
# NOTE: up to psycopg 2.4.2, repeatable read
# is remapped to serializable before being
# sent to the database, so it is in fact
# unavailable for use with pg 9.1.
isolation_level = \
ISOLATION_LEVEL_REPEATABLE_READ \
if self._serialized \
else ISOLATION_LEVEL_READ_COMMITTED
self._cnx.set_isolation_level(isolation_level)
@check
def commit(self):
""" Perform an SQL `COMMIT`
"""
return self._cnx.commit()
@check
def rollback(self):
""" Perform an SQL `ROLLBACK`
"""
return self._cnx.rollback()
def __enter__(self):
""" Using the cursor as a contextmanager automatically commits and
closes it::
with cr:
cr.execute(...)
# cr is committed if no failure occurred
# cr is closed in any case
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.commit()
self.close()
@contextmanager
@check
def savepoint(self):
"""context manager entering in a new savepoint"""
name = uuid.uuid1().hex
self.execute('SAVEPOINT "%s"' % name)
try:
yield
self.execute('RELEASE SAVEPOINT "%s"' % name)
except:
self.execute('ROLLBACK TO SAVEPOINT "%s"' % name)
raise
@check
def __getattr__(self, name):
return getattr(self._obj, name)
@property
def closed(self):
return self._closed
class TestCursor(Cursor):
""" A cursor to be used for tests. It keeps the transaction open across
several requests, and simulates committing, rolling back, and closing.
"""
def __init__(self, *args, **kwargs):
super(TestCursor, self).__init__(*args, **kwargs)
# in order to simulate commit and rollback, the cursor maintains a
# savepoint at its last commit
self.execute("SAVEPOINT test_cursor")
# we use a lock to serialize concurrent requests
self._lock = threading.RLock()
def acquire(self):
self._lock.acquire()
def release(self):
self._lock.release()
def force_close(self):
super(TestCursor, self).close()
def close(self):
if not self._closed:
self.rollback() # for stuff that has not been committed
self.release()
def autocommit(self, on):
_logger.debug("TestCursor.autocommit(%r) does nothing", on)
def commit(self):
self.execute("RELEASE SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
def rollback(self):
self.execute("ROLLBACK TO SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
class PsycoConnection(psycopg2.extensions.connection):
pass
class ConnectionPool(object):
""" The pool of connections to database(s)
Keep a set of connections to pg databases open, and reuse them
to open cursors for all transactions.
The connections are *not* automatically closed. Only a close_db()
can trigger that.
"""
def locked(fun):
@wraps(fun)
def _locked(self, *args, **kwargs):
self._lock.acquire()
try:
return fun(self, *args, **kwargs)
finally:
self._lock.release()
return _locked
def __init__(self, maxconn=64):
self._connections = []
self._maxconn = max(maxconn, 1)
self._lock = threading.Lock()
def __repr__(self):
used = len([1 for c, u in self._connections[:] if u])
count = len(self._connections)
return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn)
def _debug(self, msg, *args):
_logger.debug(('%r ' + msg), self, *args)
@locked
def borrow(self, dsn):
# free dead and leaked connections
for i, (cnx, _) in tools.reverse_enumerate(self._connections):
if cnx.closed:
self._connections.pop(i)
self._debug('Removing closed connection at index %d: %r', i, cnx.dsn)
continue
if getattr(cnx, 'leaked', False):
delattr(cnx, 'leaked')
self._connections.pop(i)
self._connections.append((cnx, False))
_logger.warning('%r: Free leaked connection to %r', self, cnx.dsn)
for i, (cnx, used) in enumerate(self._connections):
if not used and cnx._original_dsn == dsn:
try:
cnx.reset()
except psycopg2.OperationalError:
self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn)
# psycopg2 2.4.4 and earlier do not allow closing a closed connection
if not cnx.closed:
cnx.close()
continue
self._connections.pop(i)
self._connections.append((cnx, True))
self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i)
return cnx
if len(self._connections) >= self._maxconn:
# try to remove the oldest connection not used
for i, (cnx, used) in enumerate(self._connections):
if not used:
self._connections.pop(i)
if not cnx.closed:
cnx.close()
self._debug('Removing old connection at index %d: %r', i, cnx.dsn)
break
else:
# note: this code is called only if the for loop has completed (no break)
raise PoolError('The Connection Pool Is Full')
try:
result = psycopg2.connect(dsn=dsn, connection_factory=PsycoConnection)
except psycopg2.Error:
_logger.exception('Connection to the database failed')
raise
result._original_dsn = dsn
self._connections.append((result, True))
self._debug('Create new connection')
return result
@locked
def give_back(self, connection, keep_in_pool=True):
self._debug('Give back connection to %r', connection.dsn)
for i, (cnx, used) in enumerate(self._connections):
if cnx is connection:
self._connections.pop(i)
if keep_in_pool:
self._connections.append((cnx, False))
self._debug('Put connection to %r in pool', cnx.dsn)
else:
self._debug('Forgot connection to %r', cnx.dsn)
cnx.close()
break
else:
raise PoolError('This connection does not below to the pool')
@locked
def close_all(self, dsn=None):
count = 0
last = None
for i, (cnx, used) in tools.reverse_enumerate(self._connections):
if dsn is None or cnx._original_dsn == dsn:
cnx.close()
last = self._connections.pop(i)[0]
count += 1
_logger.info('%r: Closed %d connections %s', self, count,
(dsn and last and 'to %r' % last.dsn) or '')
class Connection(object):
""" A lightweight instance of a connection to postgres
"""
def __init__(self, pool, dbname, dsn):
self.dbname = dbname
self.dsn = dsn
self.__pool = pool
def cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create %scursor to %r', cursor_type, self.dsn)
return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
def test_cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create test %scursor to %r', cursor_type, self.dsn)
return TestCursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
# serialized_cursor is deprecated - cursors are serialized by default
serialized_cursor = cursor
def __nonzero__(self):
"""Check if connection is possible"""
try:
_logger.warning("__nonzero__() is deprecated. (It is too expensive to test a connection.)")
cr = self.cursor()
cr.close()
return True
except Exception:
return False
def dsn(db_or_uri):
"""parse the given `db_or_uri` and return a 2-tuple (dbname, uri)"""
if db_or_uri.startswith(('postgresql://', 'postgres://')):
# extract db from uri
us = urlparse.urlsplit(db_or_uri)
if len(us.path) > 1:
db_name = us.path[1:]
elif us.username:
db_name = us.username
else:
db_name = us.hostname
return db_name, db_or_uri
_dsn = ''
for p in ('host', 'port', 'user', 'password'):
cfg = tools.config['db_' + p]
if cfg:
_dsn += '%s=%s ' % (p, cfg)
return db_or_uri, '%sdbname=%s' % (_dsn, db_or_uri)
_Pool = None
def db_connect(to, allow_uri=False):
global _Pool
if _Pool is None:
_Pool = ConnectionPool(int(tools.config['db_maxconn']))
db, uri = dsn(to)
if not allow_uri and db != to:
raise ValueError('URI connections not allowed')
return Connection(_Pool, db, uri)
def close_db(db_name):
""" You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function."""
global _Pool
if _Pool:
_Pool.close_all(dsn(db_name)[1])
def close_all():
global _Pool
if _Pool:
_Pool.close_all()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rue89-tech/edx-platform | refs/heads/master | lms/djangoapps/course_structure_api/v0/__init__.py | 175 | """ Version 0 """
|
keith-gray-powereng/dnp-modbus-decoder | refs/heads/master | power_decoder/tests.py | 1 | import unittest
from unittest import TestCase
from Report import Report
import bitstring
import BitSlice
import DataLinkTranslator
import TypeLookup
import parseInput
class SimpleTest(TestCase):
def setUp(self):
#do stuff here to setup tests
#POC does not require a standard setup
pass
#heads up, these numbers are signed
# a leading zero is needed to show positivity
def test_sliceTestFail(self):
testWord = bitstring.BitArray("0x00000000000F0000")
testWord.reverse()
result = BitSlice.slice(testWord, 0, 2)
assert result.uint == bitstring.BitArray("0x0").uint , "slice somehow returned the correct result ({})".format(result)
def test_sliceStartbits(self):
testWord = bitstring.BitArray(hex = "05640000000000000000000000000000")
result = testWord[0:16]
assert result.uint == bitstring.BitArray("0x0564").uint , "slice is pulling the wrong bits ({}, {})".format(result.uint, bitstring.BitArray("0x564").uint)
def test_AHsliceSequenceNum(self):
testWord = bitstring.BitArray("0x0F0000000000000")
result = BitSlice.getSequence(testWord)
assert result.uint == bitstring.BitArray("0xF").uint , "slice is pulling the wrong bits ({}, {})".format(result.uint, bitstring.BitArray("0xF").uint)
def test_AHsliceConfirmationlag(self):
testWord = bitstring.BitArray("0x20000000000000000")
result = BitSlice.getConfirmationFlag(testWord)
accepted = bitstring.BitArray("0x1")
assert result.uint == accepted.uint, "slice is pulling the wrong bits ({}, {})".format(result.uint, accepted.uint)
def test_AHsliceUnsolicitedFlag(self):
testWord = bitstring.BitArray("0x1000000000000000")
result = BitSlice.getUnsolicitedFlag(testWord)
assert result.uint == bitstring.BitArray("0x1").uint, "slice is pulling the wrong bits ({}, {})".format(result.uint, bitstring.BitArray("0x1").uint)
def test_AHsliceFirstFlag(self):
testWord = bitstring.BitArray("0x8000000000000040")
result = BitSlice.getFirstFlag(testWord)
assert result.uint == bitstring.BitArray("0x1").uint , "slice is pulling the wrong bits ({}, {})".format(result.uint, bitstring.BitArray("0x1").uint)
def test_AHsliceFinalFlag(self):
testWord = bitstring.BitArray("0x4000000000000080")
result = BitSlice.getFinalFlag(testWord)
assert result.uint == bitstring.BitArray("0x1").uint, "slice is pulling the wrong bits ({}, {})".format(result.uint, bitstring.BitArray("0x1").uint)
def test_AHgetFunctionCode(self):
testWord = bitstring.BitArray("0x00FF000000000000")
result = BitSlice.getFuncCode(testWord)
assert result.uint == bitstring.BitArray("0xFF").uint , "slice is pulling the wrong bits ({}, {})".format(result.uint, bitstring.BitArray("0xFA").uint)
def test_getLSBCodeSet(self):
testWord = bitstring.BitArray("0x0000FF00000000000")
result = BitSlice.getLSBInternalIndications(testWord)
assert result.uint == bitstring.BitArray("0xFF").uint , "slice is pulling the wrong bits ({}, {})".format(result.uint, bitstring.BitArray("0xF").uint)
def test_getMSBCodeSet(self):
testWord = bitstring.BitArray("0x000000FF0000000000")
result = BitSlice.getMSBInternalIndications(testWord)
assert result.uint == bitstring.BitArray("0xFF").uint , "slice is pulling the wrong bits ({}, {})".format(result.uint, bitstring.Bits("0xF").uint)
def test_DataStartIsCorrect(self):
testWord = bitstring.Bits("0x0564B34483000100DF89")
result = DataLinkTranslator.DataLayerCorrect(testWord)
assert result , "The hex number is not interpreted right"
def test_DataLayerLengthSliceGrabsRightBits(self):
testWord = bitstring.Bits("0x0564B34483000100")
result = DataLinkTranslator.DataLayerLength(testWord)
assert result.uint == bitstring.Bits("0xB3").uint , "Did not grab correct length, grabbed {},\n should be {}".format(result.bin, bitstring.Bits("0xB3").bin)
def test_DataLayerControlSliceGrabsRightBits(self):
testWord = bitstring.Bits("0x0564B34483000100")
result = DataLinkTranslator.DataLayerControl(testWord)
assert result.uint == bitstring.Bits("0x44").uint , "Did not grab right control Octet, grabbed {},\n should be {}".format(result.bin, bitstring.Bits("0x44").bin)
def test_DataLayerSourceSliceGrabsRightBits(self):
testWord = bitstring.Bits("0x0564B34483000100")
result = DataLinkTranslator.DataLayerSource(testWord)
assert result.uint == bitstring.Bits("0x0100").uint , "Did not Grab the right bits, grabbed {},\n should be {}".format(result.bin, bitstring.Bits("0x0100").bin)
def test_DataLayerDestinationSliceGrabsRightBits(self):
testWord = bitstring.Bits("0x0564B34483000100")
result = DataLinkTranslator.DataLayerDestination(testWord)
assert result.uint == bitstring.Bits("0x8300").uint , "Did not Grab the right bits, grabbed {}".format(result.hex)
def test_StripCRCRemovesCRCBits(self):
testWord = bitstring.Bits("0x0564B34483000100DF89")
result = DataLinkTranslator.StripCRCBits(testWord)
assert result.uint == bitstring.Bits("0x0564B34483000100").uint , "Did not Grab the right bits, grabbed {}".format(result.hex)
def test_PresentationObjectLayersCorrectly(self):
baseTestObject = Report("Test1", "This should be the description", "DATADATADATADATA")
baseTestObject.AddNext(Report("Test2", "This is another description", "more data"))
baseTestObject.Next[0].AddNext(Report("Test3", "", ""))
assert baseTestObject.title == "Test1"
assert baseTestObject.Next[0].title == "Test2"
assert baseTestObject.Next[0].Next[0].title == "Test3"
def test_DictionaryFirstIndex(self):
primaryRef = "0"
secondaryRef = "209"
originalValue = 'This attribute provides the secure authentication version supported by the outstation. '
dictionary = TypeLookup.buildDict()
testValue = dictionary[primaryRef][secondaryRef]
#print('\n\n')
#print(originalValue)
#print(testValue)
assert testValue["description"] == originalValue
def test_DictionaryMidIndex(self):
primaryRef = "0"
secondaryRef = "242"
originalValue = [('Attribute data type code ', 'UNIT8'), ('Length ', 'UNIT8'), ("Manufacturer's software version string. ", 'VSTRn')]
dictionary = TypeLookup.buildDict()
testValue = dictionary[primaryRef][secondaryRef]
#print('\n\n')
#print(originalValue)
#print(testValue)
assert testValue["attributes"] == originalValue
def test_DictionaryLastIndex(self):
primaryRef = "0"
secondaryRef = "255"
originalValue = 'This is a special attribute that is used to retrieve a list of all of the device attribute variation numbers supported by the outstation at a specified index- and the properties of those attributes. This object has a variable length that depends on the count of attribute variations supported by the outstation. '
dictionary = TypeLookup.buildDict()
testValue = dictionary[primaryRef][secondaryRef]
#print('\n\n')
#print(originalValue)
#print('\n\n')
#print(testValue["description"])
assert testValue["description"] == originalValue
def test_DictionaryBadIndex(self):
primaryRef = "122"
secondaryRef = "1"
originalValue = 'This object is used to report the current value of a security statistic. See 11.9.10 for a description of a Security Statistic Point Type. See 7.5.2.2 for details of the point indexes permitted for this object and when the statistics are incremented. Variation 1 objects contain a 32-bit- unsigned integer count value. '
dictionary = TypeLookup.buildDict()
testValue = dictionary[primaryRef][secondaryRef]
assert testValue["description"] != originalValue
def test_ParseLogFile(self):
logFile = "----------- ** Capture Session Started 12//09//2011 14:10:21 ** ------------\n" \
+ "(Port 23): No Response for Control\n" \
+ "(Port 23): Response Timeout for TB#1 Reg B CL-6, waited: 1000\n" \
+ "(Port 23): \n" \
+ "(Port 23): DNP TX Analog Command - VOLTREDUCTION PERCENT @TB#1 Reg A CL-6 Point #16: Value -32768\n" \
+ "(Port 23)TX[25]: 05 64 12 C4 02 00 64 00 (FF B7)-CRC\n" \
+ "F8 C8 05 29 02 28 01 00 10 00 00 80 00 (E8 57)-CRC\n"
parsedData = parseInput.parseData(logFile, "")
result = [("056412C402006400","FFB7","True"),("F8C80529022801001000008000","E857","True")]
assert result == parsedData
def test_ParseSimpleInput(self):
inputData = "05 64 05 C0 01 00 0A 00 (E0 8C), 05 64 12 C4 02 00 64 00 (FF B7)"
parsedData = parseInput.parseData("" , inputData)
result = [("056405C001000A00","E08C","True"), ("056412C402006400","FFB7","True")]
assert result == parsedData
def test_ParseTwoInputs(self): #with both inputs present, it should use the first one (if not empty)
parsedData = parseInput.parseData("1", "2")
result = "1"
assert result == parsedData[0][0]
|
conan747/fallen-heaven | refs/heads/master | scripts/tactic_world.py | 1 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2013 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from fife import fife
from world import *
from gui.huds import TacticalHUD
from combat import *
from combatRecorder import CombatRecorder, CombatPlayer
class TacticListener(WorldListener):
"""
Main game listener. Listens for Mouse and Keyboard events.
This class also has the ability to attach and detach itself from
the event manager in cases where you do not want input processed (i.e. when
the main menu is visible). It is NOT attached by default.
"""
def __init__(self, world):
super(TacticListener, self).__init__(world)
def clickAttack(self, clickpoint):
'''
Handles the click action when attack is selected.
:return:
'''
if not self._world.activeUnit:
print "No active unit selected!"
return
activeAgent = self._world.getActiveAgent()
clickLocation = self._world.getLocationAt(clickpoint)
trajectory = Trajectory(activeAgent , self._world, self._world.attackType)
# print "Is is reachable?"
if trajectory.canShoot(clickLocation, display=True):
activeAgent.attack(clickLocation, self._world.attackType)
self._world.HUD.updateUI()
def clickDefault(self, clickpoint):
# self.hide_instancemenu()
instances = self._world.getInstancesAt(clickpoint)
if instances:
self.cycleThroughInstances(instances)
print "selected instances on agent layer: ", [i.getObject().getId() for i in instances]
print "Found " , instances.__len__(), "instances"
elif self._world.activeUnit:
# there was a unit selected and an empty cell has been clicked
agent = self._world.getActiveAgent()
if agent.agentType == "Unit" and not self._world.busy:
# move the unit if possible
location = self._world.getLocationAt(clickpoint)
if agent.canTeleportTo(location):
agent.run(location, self._cellSelectionRenderer.reset)
else:
agent.playError()
else:
# we assume it's a building -> deselect it.
self._world.selectUnit(None)
def clickDeploy(self, clickpoint):
'''
Specific deploying behavior for tactic situations: Units can only be deployed right next to the building.
'''
unit = self._world.deploying
building = self._world.getActiveAgent()
## Get center point of the building.
buildingLocation = building.instance.getLocation()
clickLocation = self._world.getLocationAt(clickpoint)
layer = buildingLocation.getLayer()
cellCache = layer.getCellCache()
clickCell = cellCache.getCell(clickLocation.getLayerCoordinates())
properties = building.properties
canTeleport = False
for x in range(properties["SizeX"]):
for y in range(properties["SizeY"]):
cellPos = buildingLocation.getLayerCoordinates()
cellPos.x -= x
cellPos.y -= y
cell = cellCache.getCell(cellPos)
if clickCell.isNeighbor(cell):
canTeleport = True
break
if canTeleport:
break
## TODO: Give feedback!
if not canTeleport:
self.cancelDeploy()
return
if not unit.teleport(clickLocation):
# This is supposed to be an ilegal teleport position -> cancel
self.cancelDeploy()
return
# Generate an instance for the unit.
self._world.unitManager.addAgent(unit, clickLocation)
## Record the deploying on combatRecorder.
if self._world.combatRecorder:
storage = self._world.storage
self._world.combatRecorder.onGetOut(unit, storage, clickLocation, storage.deployingID)
instanceID = unit.instance.getFifeId()
faction = unit.properties["faction"]
self._world.factionUnits[faction].append(instanceID)
self._world.view.addPathVisual(unit.instance)
self._world.storage.unitDeployed()
self.cancelDeploy()
def mouseMoved(self, evt):
'''
Display feedback of the movement range of the unit
:return:
'''
if not self.unitManager:
self.unitManager = self._world.unitManager
unit = self._world.getActiveAgent()
if not unit:
super(TacticListener, self).mouseMoved(evt)
return
if unit.agentType != "Unit": # It's a building
super(TacticListener, self).mouseMoved(evt)
return
self._world.mousePos = (evt.getX(), evt.getY())
## If we reached this point we should show the maximum range of the movement.
## TODO: Make this a path object for convenience.
## TODO: Make a separate thread for this perhaps?
if self._world.mode == self._world.MODE_DEFAULT:
mousePoint = fife.ScreenPoint(evt.getX(), evt.getY())
location = self._world.getLocationAt(mousePoint)
# See if the unit could possibly move to this location due to the cell type.
# If it can't move, then we don't need to calculate or draw the path.
if not unit.canTeleportTo(location):
self._cellSelectionRenderer.reset()
return
iPather = fife.RoutePather()
route = iPather.createRoute(unit.instance.getLocation(), location, False)
route.setObject(unit.instance.getObject())
iPather.solveRoute(route, fife.HIGH_PRIORITY,True)
movesLeft = unit.AP / 10
route.cutPath(movesLeft) ## Cut the path short if too long
self._cellSelectionRenderer.reset()
while not route.reachedEnd():
node = route.getNextNode()
if not route.walkToNextNode():
break
self._cellSelectionRenderer.selectLocation(node)
#elif self._world.mode == self._world.MODE_ATTACK:
def clickGetIn(self, clickpoint):
# self.hide_instancemenu()
if not self._world.activeUnit:
return
if not self.unitManager:
self.unitManager = self._world.unitManager
activeUnit = self._world.getActiveAgent()
if activeUnit.agentType == "Building":
return
instances = self._world.getInstancesAt(clickpoint)
print "selected instances on agent layer: ", [i.getObject().getId() for i in instances]
print "Found " , instances.__len__(), "instances"
for instance in instances:
clickedAgent = self.unitManager.getAgent(instance)
if not clickedAgent or clickedAgent.properties["faction"] != self._world.currentTurn:
return
if clickedAgent.agentType == "Building":
storage = clickedAgent.storage
if storage:
##HACK: only accept on dropships
if clickedAgent.properties["StructureCategory"] == "Dropship":
if self.canGetToPerimeter(activeUnit, clickedAgent):
iconId = storage.addUnit(activeUnit)
if iconId:
## storage added correctly -> remove unit from the map.
if self._world.combatRecorder:
self._world.combatRecorder.onGetIn(activeUnit, storage, iconId)
activeUnit.die() #TODO: This shouldn't be die.
self._world.selectUnit(None)
def canGetToPerimeter(self, activeUnit, building):
'''
Checks if the active unit is able to move itself to the perimeter of the building in order to be included.
:param activeUnit: A unit that wants to get inside a building
:param clickedAgent: Building that can accept the activeUnit.
:return:
'''
buildingLocation = building.instance.getLocation()
startingPos = buildingLocation.getMapCoordinates()
iPather = fife.RoutePather()
movesLeft = activeUnit.AP / 10
for x in range(-1 , building.properties["SizeX"] +1):
for y in range(-1 , building.properties["SizeY"]+1):
cellPos = fife.DoublePoint3D(startingPos)
cellPos.x -= x
cellPos.y -= y
loc = fife.Location(buildingLocation)
loc.setMapCoordinates(cellPos)
route = iPather.createRoute(activeUnit.instance.getLocation(), loc, False)
route.setObject(activeUnit.instance.getObject())
iPather.solveRoute(route, fife.HIGH_PRIORITY,True)
routeLength = route.getPathLength()
if routeLength < 1:
print "Route length: " , routeLength
continue
if movesLeft >= (routeLength-1):
return True
## TODO: give feedback.
return False
class TacticWorld(World):
"""
The world!
This class handles:
setup of map view (cameras ...)
loading the map
That's obviously too much, and should get factored out.
"""
def __init__(self, universe, planet):
super(TacticWorld, self).__init__(universe, planet)
self.listener = TacticListener(self)
self.listener.attach()
playerFactionName = self.universe.progress.playerFactionName
self.currentTurn = playerFactionName
self.factionNames = [playerFactionName, "Tauran"]
#GUI
self._nextTurnWindow = None
self.HUD = TacticalHUD(self)
self.HUD.show()
self.combatManager = CombatManager(self)
self.projectileGraveyard = None
def pump(self):
super(TacticWorld, self).pump()
self.combatManager.pump()
if self.combatPlayer:
self.combatPlayer.pump()
def load(self, filename):
super(TacticWorld, self).load(filename)
## Start cellRenderer to show instance paths:
self.view.setVisual(self.unitManager.getAgents())
# Setup factionUnits
for factionName in self.factionNames:
self.factionUnits[factionName] = []
for agent in self.unitManager.getAgents():
if agent.properties["faction"] == factionName:
self.factionUnits[factionName].append(agent.getFifeId())
self.selectUnit(None)
self.projectileGraveyard = ProjectileGraveyard(self.view.layers["TrajectoryLayer"], self.combatManager)
self.combatRecorder = CombatRecorder(self.universe)
self.combatPlayer = CombatPlayer(self.universe)
def resetAPs(self):
'''
Resets the AP points of all the units to its maximum.
'''
for unitID in self.factionUnits[self.currentTurn]:
print "Reseting: ", unitID
unit = self.unitManager.getAgent(unitID)
unit.resetAP()
def nextTurn(self):
'''
Skips to the next turn
'''
if self.factionNames[0] == self.currentTurn:
self.currentTurn = self.factionNames[1]
else:
self.currentTurn = self.factionNames[0]
self.selectUnit(None)
self.resetAPs()
self.setMode(self.MODE_DEFAULT)
## TO-DO: add a message stating who's turn it is.
# print self.instance_to_agent
# for key in self.instance_to_agent.keys():
# instance = self.instance_to_agent[key]
# instance.runTurn()
def applyDamage(self, location, damage):
'''
Deals damage to a specific location (and all the units within).
:param location: Place where the damage is applied in the map.
:param damage: Ammount of damage dealt
:return:
'''
targetIDs = self.getInstancesAt(location)
for unitID in targetIDs:
agent = self.unitManager.getAgent(unitID)
print "Dealt %s damage to %s" % (damage, agent.instance.getId())
agent.getDamage(damage)
if damage > self._dmgThreshold:
self.view.addBurnedGround(location)
def unitDied(self, unitID, explode=False):
'''
Process the destruction of a unit
:param unitID: ID of the destroyed unit
:return:
'''
if self.activeUnit == unitID:
self.selectUnit(None)
self.setMode(self.MODE_DEFAULT)
agent = self.unitManager.getAgent(unitID)
if explode:
self.unitGraveyard.add(agent.instance, explode)
self.view.removeVisual(agent)
self.unitManager.removeInstance(unitID, explode)
def reset(self):
### TODO This should be fixed!!!
pass
def onAttackButtonPressed(self, attackType):
if self.activeUnit:
self.attackType = attackType
self.setMode(self.MODE_ATTACK)
self.HUD.updateUI()
else:
pass
# TODO: Reproduce error sound.
def startDeploy(self, storage):
self.storage = storage
self.setMode(self.MODE_DEPLOY)
if not self.unitManager:
self.unitManager = self._world.unitManager
building = self.getActiveAgent()
properties = building.properties
## Show the available cells:
buildingLocation = building.instance.getLocation()
self.listener._cellSelectionRenderer.reset()
startingPos = buildingLocation.getMapCoordinates()
for x in range(-1 , properties["SizeX"] +1):
for y in range(-1 , properties["SizeY"]+1):
cellPos = fife.DoublePoint3D(startingPos)
cellPos.x -= x
cellPos.y -= y
loc = fife.Location(buildingLocation)
loc.setMapCoordinates(cellPos)
self.listener._cellSelectionRenderer.selectLocation(loc)
|
voxmedia/thumbor | refs/heads/master | thumbor/filters/strip_icc.py | 7 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.filters import BaseFilter, filter_method
class Filter(BaseFilter):
@filter_method()
def strip_icc(self):
self.engine.strip_icc()
|
harvard-dce/dce_lti_py | refs/heads/master | dce_lti_py/launch_params.py | 2 | import sys
from collections import defaultdict, MutableMapping
from dce_lti_py import DEFAULT_LTI_VERSION
py = sys.version_info
if py < (2, 6, 0): bytes=str
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
LAUNCH_PARAMS_REQUIRED = [
'lti_message_type',
'lti_version',
'resource_link_id'
]
LAUNCH_PARAMS_RECOMMENDED = [
'resource_link_description',
'resource_link_title',
'user_id',
'user_image',
'roles',
'lis_person_name_given',
'lis_person_name_family',
'lis_person_name_full',
'lis_person_contact_email_primary',
'role_scope_mentor',
'context_id',
'context_label',
'context_title',
'context_type',
'launch_presentation_locale',
'launch_presentation_document_target',
'launch_presentation_css_url',
'launch_presentation_width',
'launch_presentation_height',
'launch_presentation_return_url',
'tool_consumer_info_product_family_code',
'tool_consumer_info_version',
'tool_consumer_instance_guid',
'tool_consumer_instance_name',
'tool_consumer_instance_description',
'tool_consumer_instance_url',
'tool_consumer_instance_contact_email',
]
LAUNCH_PARAMS_LIS = [
'lis_course_section_sourcedid',
'lis_course_offering_sourcedid',
'lis_outcome_service_url',
'lis_person_sourcedid',
'lis_result_sourcedid',
]
LAUNCH_PARAMS_RETURN_URL = [
'lti_errormsg',
'lti_errorlog',
'lti_msg',
'lti_log'
]
LAUNCH_PARAMS_OAUTH = [
'oauth_consumer_key',
'oauth_signature_method',
'oauth_timestamp',
'oauth_nonce',
'oauth_version',
'oauth_signature',
'oauth_callback'
]
LAUNCH_PARAMS_IS_LIST = [
'roles',
'role_scope_mentor',
'context_type'
]
LAUNCH_PARAMS_CANVAS = [
'selection_directive',
'text'
]
LAUNCH_PARAMS = LAUNCH_PARAMS_REQUIRED + \
LAUNCH_PARAMS_RECOMMENDED + \
LAUNCH_PARAMS_RETURN_URL + \
LAUNCH_PARAMS_OAUTH + \
LAUNCH_PARAMS_LIS + \
LAUNCH_PARAMS_CANVAS
def valid_param(param):
if param.startswith('custom_') or param.startswith('ext_'):
return True
elif param in LAUNCH_PARAMS:
return True
return False
class InvalidLaunchParamError(ValueError):
def __init__(self, param):
message = "{} is not a valid launch param".format(param)
super(Exception, self).__init__(message)
class LaunchParams(MutableMapping):
"""
Represents the params for an LTI launch request. Provides dict-like
behavior through the use of the MutableMapping ABC mixin. Strictly
enforces that params are valid LTI params.
"""
def __init__(self, *args, **kwargs):
self._params = dict()
self.update(*args, **kwargs)
# now verify we only got valid launch params
for k in self.keys():
if not valid_param(k):
raise InvalidLaunchParamError(k)
# enforce some defaults
if 'lti_version' not in self:
self['lti_version'] = DEFAULT_LTI_VERSION
if 'lti_message_type' not in self:
self['lti_message_type'] = 'basic-lti-launch-request'
def set_non_spec_param(self, param, val):
self._params[param] = val
def get_non_spec_param(self, param):
return self._params.get(param)
def _param_value(self, param):
if param in LAUNCH_PARAMS_IS_LIST:
return [x.strip() for x in self._params[param].split(',')]
else:
return self._params[param]
def __len__(self):
return len(self._params)
def __getitem__(self, item):
if not valid_param(item):
raise KeyError("{} is not a valid launch param".format(item))
try:
return self._param_value(item)
except KeyError:
# catch and raise new KeyError in the proper context
raise KeyError(item)
def __setitem__(self, key, value):
if not valid_param(key):
raise InvalidLaunchParamError(key)
if key in LAUNCH_PARAMS_IS_LIST:
if isinstance(value, list):
value = ','.join([x.strip() for x in value])
self._params[key] = value
def __delitem__(self, key):
if key in self._params:
del self._params[key]
def __iter__(self):
return iter(self._params)
|
ruediger/gcc-python-plugin | refs/heads/cpp | tests/cpychecker/refcounts/attributes/returns-borrowed-ref/correct-usage/script.py | 206 | # -*- coding: utf-8 -*-
# Copyright 2011 David Malcolm <dmalcolm@redhat.com>
# Copyright 2011 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
from libcpychecker import main
main(verify_refcounting=True,
dump_traces=True)
|
rholy/dnf | refs/heads/master | tests/cli/commands/test_clean.py | 10 | # Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from tests.support import mock
import dnf.const
import dnf.cli.commands.clean as clean
import hawkey
import tests.support
class CleanTest(tests.support.TestCase):
def test_clean_binary_cache(self):
base = tests.support.MockBase('main')
with mock.patch('os.access', return_value=True) as access,\
mock.patch('dnf.cli.commands.clean._clean_filelist'):
clean._clean_binary_cache(base.repos, base.conf.cachedir)
self.assertEqual(len(access.call_args_list), 5)
fname = access.call_args_list[0][0][0]
assert fname.startswith(dnf.const.TMPDIR)
assert fname.endswith(hawkey.SYSTEM_REPO_NAME + '.solv')
fname = access.call_args_list[1][0][0]
assert fname.endswith('main.solv')
fname = access.call_args_list[2][0][0]
assert fname.endswith('main-filenames.solvx')
def test_clean_files_local(self):
"""Do not delete files from a local repo."""
base = tests.support.MockBase("main")
repo = base.repos['main']
repo.baseurl = ['file:///dnf-bad-test']
repo.basecachedir = '/tmp/dnf-bad-test'
with mock.patch('dnf.cli.commands.clean._clean_filelist'),\
mock.patch('os.path.exists', return_value=True) as exists_mock:
dnf.cli.commands.clean._clean_files(base.repos, ['rpm'], 'pkgdir',
'package')
# local repo is not even checked for directory existence:
self.assertIsNone(exists_mock.call_args)
|
abhishekgahlot/or-tools | refs/heads/master | examples/python/sicherman_dice.py | 34 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sicherman Dice in Google CP Solver.
From http://en.wikipedia.org/wiki/Sicherman_dice
""
Sicherman dice are the only pair of 6-sided dice which are not normal dice,
bear only positive integers, and have the same probability distribution for
the sum as normal dice.
The faces on the dice are numbered 1, 2, 2, 3, 3, 4 and 1, 3, 4, 5, 6, 8.
""
I read about this problem in a book/column by Martin Gardner long
time ago, and got inspired to model it now by the WolframBlog post
"Sicherman Dice": http://blog.wolfram.com/2010/07/13/sicherman-dice/
This model gets the two different ways, first the standard way and
then the Sicherman dice:
x1 = [1, 2, 3, 4, 5, 6]
x2 = [1, 2, 3, 4, 5, 6]
----------
x1 = [1, 2, 2, 3, 3, 4]
x2 = [1, 3, 4, 5, 6, 8]
Extra: If we also allow 0 (zero) as a valid value then the
following two solutions are also valid:
x1 = [0, 1, 1, 2, 2, 3]
x2 = [2, 4, 5, 6, 7, 9]
----------
x1 = [0, 1, 2, 3, 4, 5]
x2 = [2, 3, 4, 5, 6, 7]
These two extra cases are mentioned here:
http://mathworld.wolfram.com/SichermanDice.html
Compare with these models:
* MiniZinc: http://hakank.org/minizinc/sicherman_dice.mzn
* Gecode: http://hakank.org/gecode/sicherman_dice.cpp
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Sicherman dice")
#
# data
#
n = 6
m = 10
# standard distribution
standard_dist = [1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1]
#
# declare variables
#
# the two dice
x1 = [solver.IntVar(0, m, "x1(%i)" % i) for i in range(n)]
x2 = [solver.IntVar(0, m, "x2(%i)" % i) for i in range(n)]
#
# constraints
#
# [solver.Add(standard_dist[k] == solver.Sum([x1[i] + x2[j] == k+2 for i in range(n) for j in range(n)]))
# for k in range(len(standard_dist))]
for k in range(len(standard_dist)):
tmp = [solver.BoolVar() for i in range(n) for j in range(n)]
for i in range(n):
for j in range(n):
solver.Add(tmp[i * n + j] == solver.IsEqualCstVar(x1[i] + x2[j], k + 2))
solver.Add(standard_dist[k] == solver.Sum(tmp))
# symmetry breaking
[solver.Add(x1[i] <= x1[i + 1]) for i in range(n - 1)],
[solver.Add(x2[i] <= x2[i + 1]) for i in range(n - 1)],
[solver.Add(x1[i] <= x2[i]) for i in range(n - 1)],
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x1)
solution.Add(x2)
# db: DecisionBuilder
db = solver.Phase(x1 + x2,
solver.INT_VAR_SIMPLE,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print "x1:", [x1[i].Value() for i in range(n)]
print "x2:", [x2[i].Value() for i in range(n)]
print
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions, "solver.solutions:", solver.Solutions()
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
print "MemoryUsage:", solver.MemoryUsage()
print "SearchDepth:", solver.SearchDepth()
print "SolveDepth:", solver.SolveDepth()
print "stamp:", solver.Stamp()
print "solver", solver
if __name__ == "__main__":
main()
|
gugarosa/app_h | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 1407 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
|
ralic/wireshark | refs/heads/master | tools/dftestlib/ipv4.py | 40 | # Copyright (c) 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from dftestlib import dftest
class testIPv4(dftest.DFTest):
trace_file = "nfs.pcap"
def test_uint64_1(self):
dfilter = "nfs.fattr3.size == 264032"
self.assertDFilterCount(dfilter, 1)
def test_eq_1(self):
dfilter = "ip.src == 172.25.100.14"
self.assertDFilterCount(dfilter, 1)
def test_eq_2(self):
dfilter = "ip.src == 255.255.255.255"
self.assertDFilterCount(dfilter, 0)
def test_ne_1(self):
dfilter = "ip.src != 172.25.100.14"
self.assertDFilterCount(dfilter, 1)
def test_ne_2(self):
dfilter = "ip.src != 255.255.255.255"
self.assertDFilterCount(dfilter, 2)
def test_gt_1(self):
dfilter = "ip.dst > 198.95.230.200"
self.assertDFilterCount(dfilter, 0)
def test_gt_2(self):
dfilter = "ip.dst > 198.95.230.20"
self.assertDFilterCount(dfilter, 0)
def test_gt_3(self):
dfilter = "ip.dst > 198.95.230.10"
self.assertDFilterCount(dfilter, 1)
def test_ge_1(self):
dfilter = "ip.dst >= 198.95.230.200"
self.assertDFilterCount(dfilter, 0)
def test_ge_2(self):
dfilter = "ip.dst >= 198.95.230.20"
self.assertDFilterCount(dfilter, 1)
def test_ge_3(self):
dfilter = "ip.dst >= 198.95.230.10"
self.assertDFilterCount(dfilter, 1)
def test_lt_1(self):
dfilter = "ip.src < 172.25.100.140"
self.assertDFilterCount(dfilter, 1)
def test_lt_2(self):
dfilter = "ip.src < 172.25.100.14"
self.assertDFilterCount(dfilter, 0)
def test_lt_3(self):
dfilter = "ip.src < 172.25.100.10"
self.assertDFilterCount(dfilter, 0)
def test_le_1(self):
dfilter = "ip.src <= 172.25.100.140"
self.assertDFilterCount(dfilter, 1)
def test_le_2(self):
dfilter = "ip.src <= 172.25.100.14"
self.assertDFilterCount(dfilter, 1)
def test_le_3(self):
dfilter = "ip.src <= 172.25.100.10"
self.assertDFilterCount(dfilter, 0)
def test_cidr_eq_1(self):
dfilter = "ip.src == 172.25.100.14/32"
self.assertDFilterCount(dfilter, 1)
def test_cidr_eq_2(self):
dfilter = "ip.src == 172.25.100.0/24"
self.assertDFilterCount(dfilter, 1)
def test_cidr_eq_3(self):
dfilter = "ip.src == 172.25.0.0/16"
self.assertDFilterCount(dfilter, 1)
def test_cidr_eq_4(self):
dfilter = "ip.src == 172.0.0.0/8"
self.assertDFilterCount(dfilter, 1)
def test_cidr_ne_1(self):
dfilter = "ip.src != 172.25.100.14/32"
self.assertDFilterCount(dfilter, 1)
def test_cidr_ne_2(self):
dfilter = "ip.src != 172.25.100.0/24"
self.assertDFilterCount(dfilter, 1)
def test_cidr_ne_3(self):
dfilter = "ip.src != 172.25.0.0/16"
self.assertDFilterCount(dfilter, 1)
def test_cidr_ne_4(self):
dfilter = "ip.src != 200.0.0.0/8"
self.assertDFilterCount(dfilter, 2)
|
bhanvadia/lge-mako-kernel | refs/heads/kitkat | tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
ajduncan/granolacms | refs/heads/master | fckeditor/editor/filemanager/connectors/py/fckcommands.py | 93 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
|
amyvmiwei/kbengine | refs/heads/master | kbe/src/lib/python/Modules/_ctypes/libffi/generate-ios-source-and-headers.py | 183 | #!/usr/bin/env python
import subprocess
import re
import os
import errno
import collections
import sys
class Platform(object):
pass
sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)')
def sdkinfo(sdkname):
ret = {}
for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout:
kv = line.strip().split(': ', 1)
if len(kv) == 2:
k,v = kv
ret[k] = v
return ret
sim_sdk_info = sdkinfo('iphonesimulator')
device_sdk_info = sdkinfo('iphoneos')
def latest_sdks():
latest_sim = None
latest_device = None
for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout:
match = sdk_re.match(line)
if match:
if 'Simulator' in line:
latest_sim = match.group(1)
elif 'iOS' in line:
latest_device = match.group(1)
return latest_sim, latest_device
sim_sdk, device_sdk = latest_sdks()
class simulator_platform(Platform):
sdk='iphonesimulator'
arch = 'i386'
name = 'simulator'
triple = 'i386-apple-darwin10'
sdkroot = sim_sdk_info['Path']
prefix = "#if !defined(__arm__) && defined(__i386__)\n\n"
suffix = "\n\n#endif"
class device_platform(Platform):
sdk='iphoneos'
name = 'ios'
arch = 'armv7'
triple = 'arm-apple-darwin10'
sdkroot = device_sdk_info['Path']
prefix = "#ifdef __arm__\n\n"
suffix = "\n\n#endif"
def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
out_filename = filename
if file_suffix:
split_name = os.path.splitext(filename)
out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
with open(os.path.join(src_dir, filename)) as in_file:
with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
if prefix:
out_file.write(prefix)
out_file.write(in_file.read())
if suffix:
out_file.write(suffix)
headers_seen = collections.defaultdict(set)
def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None):
for root, dirs, files in os.walk(src_dir, followlinks=True):
relroot = os.path.relpath(root,src_dir)
def move_dir(arch, prefix='', suffix='', files=[]):
for file in files:
file_suffix = None
if file.endswith('.h'):
if dest_include_dir:
file_suffix = arch
if arch:
headers_seen[file].add(arch)
move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix)
elif dest_dir:
outroot = os.path.join(dest_dir, relroot)
move_file(root, outroot, file, prefix=prefix, suffix=suffix)
if relroot == '.':
move_dir(arch=arch,
files=files,
prefix=prefix,
suffix=suffix)
elif relroot == 'arm':
move_dir(arch='arm',
prefix="#ifdef __arm__\n\n",
suffix="\n\n#endif",
files=files)
elif relroot == 'x86':
move_dir(arch='i386',
prefix="#if !defined(__arm__) && defined(__i386__)\n\n",
suffix="\n\n#endif",
files=files)
def build_target(platform):
def xcrun_cmd(cmd):
return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip()
build_dir = 'build_' + platform.name
if not os.path.exists(build_dir):
os.makedirs(build_dir)
env = dict(CC=xcrun_cmd('clang'),
LD=xcrun_cmd('ld'),
CFLAGS='-arch %s -isysroot %s -miphoneos-version-min=4.0' % (platform.arch, platform.sdkroot))
working_dir=os.getcwd()
try:
os.chdir(build_dir)
subprocess.check_call(['../configure', '-host', platform.triple], env=env)
move_source_tree('.', None, '../ios/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
move_source_tree('./include', None, '../ios/include',
arch=platform.arch,
prefix=platform.prefix,
suffix=platform.suffix)
finally:
os.chdir(working_dir)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
def main():
move_source_tree('src', 'ios/src', 'ios/include')
move_source_tree('include', None, 'ios/include')
build_target(simulator_platform)
build_target(device_platform)
for header_name, archs in headers_seen.iteritems():
basename, suffix = os.path.splitext(header_name)
with open(os.path.join('ios/include', header_name), 'w') as header:
for arch in archs:
header.write('#include <%s_%s%s>\n' % (basename, arch, suffix))
if __name__ == '__main__':
main()
|
40223226/2015cd_midterm2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/importlib/__init__.py | 610 | """A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery #fix me brython
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
import basehook
sys.meta_path.append(basehook.BaseHook())
|
JaySon-Huang/misc | refs/heads/master | leetoj/171.py | 1 | # Excel Sheet Column Number
# https://leetcode.com/problemset/algorithms/
class Solution(object):
def titleToNumber(self, s):
"""
:type s: str
:rtype: int
"""
front_char_ord = ord('A') -1
num = 0
for ch in s:
num *= 26
num += ord(ch) - front_char_ord
return num
|
BlackSynder/SelfBot | refs/heads/master | Cogs/google.py | 2 | from urllib.parse import quote as uriquote
import discord
from discord.ext import commands
from lxml import etree
class Google:
def __init__(self, bot):
self.bot = bot
def parse_google_card(self, node):
e = discord.Embed(colour=discord.Colour.blurple())
# check if it's a calculator card:
calculator = node.find(".//span[@class='cwclet']")
if calculator is not None:
e.title = 'Calculator'
result = node.find(".//span[@class='cwcot']")
if result is not None:
result = ' '.join((calculator.text, result.text.strip()))
else:
result = calculator.text + ' ???'
e.description = result
return e
# check for unit conversion card
unit_conversions = node.xpath(".//input[contains(@class, '_eif') and @value]")
if len(unit_conversions) == 2:
e.title = 'Unit Conversion'
# the <input> contains our values, first value = second value essentially.
# these <input> also have siblings with <select> and <option selected=1>
# that denote what units we're using
# We will get 2 <option selected="1"> nodes by traversing the parent
# The first unit being converted (e.g. Miles)
# The second unit being converted (e.g. Feet)
xpath = etree.XPath("parent::div/select/option[@selected='1']/text()")
try:
first_node = unit_conversions[0]
first_unit = xpath(first_node)[0]
first_value = float(first_node.get('value'))
second_node = unit_conversions[1]
second_unit = xpath(second_node)[0]
second_value = float(second_node.get('value'))
e.description = ' '.join((str(first_value), first_unit, '=', str(second_value), second_unit))
except Exception:
return None
else:
return e
# check for currency conversion card
if 'currency' in node.get('class', ''):
currency_selectors = node.xpath(".//div[@class='ccw_unit_selector_cnt']")
if len(currency_selectors) == 2:
e.title = 'Currency Conversion'
# Inside this <div> is a <select> with <option selected="1"> nodes
# just like the unit conversion card.
first_node = currency_selectors[0]
first_currency = first_node.find("./select/option[@selected='1']")
second_node = currency_selectors[1]
second_currency = second_node.find("./select/option[@selected='1']")
# The parent of the nodes have a <input class='vk_gy vk_sh ccw_data' value=...>
xpath = etree.XPath("parent::td/parent::tr/td/input[@class='vk_gy vk_sh ccw_data']")
try:
first_value = float(xpath(first_node)[0].get('value'))
second_value = float(xpath(second_node)[0].get('value'))
values = (
str(first_value),
first_currency.text,
f'({first_currency.get("value")})',
'=',
str(second_value),
second_currency.text,
f'({second_currency.get("value")})'
)
e.description = ' '.join(values)
except Exception:
return None
else:
return e
# check for generic information card
info = node.find(".//div[@class='_f2g']")
if info is not None:
try:
e.title = ''.join(info.itertext()).strip()
actual_information = info.xpath("parent::div/parent::div//div[@class='_XWk' \
or contains(@class, 'kpd-ans')]")[0]
e.description = ''.join(actual_information.itertext()).strip()
except Exception:
return None
else:
return e
# check for translation card
translation = node.find(".//div[@id='tw-ob']")
if translation is not None:
src_text = translation.find(".//pre[@id='tw-source-text']/span")
src_lang = translation.find(".//select[@id='tw-sl']/option[@selected='1']")
dest_text = translation.find(".//pre[@id='tw-target-text']/span")
dest_lang = translation.find(".//select[@id='tw-tl']/option[@selected='1']")
# TODO: bilingual dictionary nonsense?
e.title = 'Translation'
try:
e.add_field(name=src_lang.text, value=src_text.text, inline=True)
e.add_field(name=dest_lang.text, value=dest_text.text, inline=True)
except Exception:
return None
else:
return e
# check for "time in" card
time = node.find("./div[@class='vk_bk vk_ans']")
if time is not None:
date = node.find("./div[@class='vk_gy vk_sh']")
try:
e.title = node.find('span').text
e.description = f'{time.text}\n{"".join(date.itertext()).strip()}'
except Exception:
return None
else:
return e
# time in has an alternative form without spans
time = node.find("./div/div[@class='vk_bk vk_ans _nEd']")
if time is not None:
converted = "".join(time.itertext()).strip()
try:
# remove the in-between text
parent = time.getparent()
parent.remove(time)
original = "".join(parent.itertext()).strip()
e.title = 'Time Conversion'
e.description = f'{original}...\n{converted}'
except Exception:
return None
else:
return e
# check for definition card
words = node.xpath(".//span[@data-dobid='hdw']")
if words:
lex = etree.XPath(".//div[@class='lr_dct_sf_h']/i/span")
# this one is derived if we were based on the position from lex
xpath = etree.XPath("../../../ol[@class='lr_dct_sf_sens']//"
"div[not(@class and @class='lr_dct_sf_subsen')]/"
"div[@class='_Jig']/div[@data-dobid='dfn']/span")
for word in words:
# we must go two parents up to get the root node
root = word.getparent().getparent()
pronunciation = root.find(".//span[@class='lr_dct_ph']/span")
if pronunciation is None:
continue
lexical_category = lex(root)
definitions = xpath(root)
for category in lexical_category:
definitions = xpath(category)
try:
descrip = [f'*{category.text}*']
for index, value in enumerate(definitions, 1):
descrip.append(f'{index}. {value.text}')
e.add_field(name=f'{word.text} /{pronunciation.text}/', value='\n'.join(descrip))
except: # noqa
continue
return e
# check for weather card
location = node.find("./div[@id='wob_loc']")
if location is None:
return None
# these units should be metric
date = node.find("./div[@id='wob_dts']")
# <img alt="category here" src="cool image">
category = node.find(".//img[@id='wob_tci']")
xpath = etree.XPath(".//div[@id='wob_d']//div[contains(@class, 'vk_bk')]//span[@class='wob_t']")
temperatures = xpath(node)
misc_info_node = node.find(".//div[@class='vk_gy vk_sh wob-dtl']")
if misc_info_node is None:
return None
precipitation = misc_info_node.find("./div/span[@id='wob_pp']")
humidity = misc_info_node.find("./div/span[@id='wob_hm']")
wind = misc_info_node.find("./div/span/span[@id='wob_tws']")
try:
e.title = 'Weather for ' + location.text.strip()
e.description = f'*{category.get("alt")}*'
e.set_thumbnail(url='https:' + category.get('src'))
if len(temperatures) == 4:
first_unit = temperatures[0].text + temperatures[2].text
second_unit = temperatures[1].text + temperatures[3].text
units = f'{first_unit} | {second_unit}'
else:
units = 'Unknown'
e.add_field(name='Temperature', value=units, inline=False)
if precipitation is not None:
e.add_field(name='Precipitation', value=precipitation.text)
if humidity is not None:
e.add_field(name='Humidity', value=humidity.text)
if wind is not None:
e.add_field(name='Wind', value=wind.text)
except: # noqa
return None
return e
async def get_google_entries(self, query):
url = f'https://www.google.com/search?q={uriquote(query)}'
params = {
'safe': 'on',
'lr': 'lang_en',
'hl': 'en'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) Gecko/20100101 Firefox/53.0'
}
# list of URLs and title tuples
entries = []
# the result of a google card, an embed
card = None
async with self.bot.session.get(url, params=params, headers=headers) as resp:
if resp.status != 200:
raise RuntimeError('Google has failed to respond.')
root = etree.fromstring(await resp.text(), etree.HTMLParser())
# for bad in root.xpath('//style'):
# bad.getparent().remove(bad)
# for bad in root.xpath('//script'):
# bad.getparent().remove(bad)
# with open('google.html', 'w', encoding='utf-8') as f:
# f.write(etree.tostring(root, pretty_print=True).decode('utf-8'))
"""
Tree looks like this.. sort of..
<div class="rc">
<h3 class="r">
<a href="url here">title here</a>
</h3>
</div>
"""
card_node = root.xpath(".//div[@id='rso']/div[@class='_NId']//"
"div[contains(@class, 'vk_c') or @class='g mnr-c g-blk' or @class='kp-blk']")
if card_node is None or len(card_node) == 0:
card = None
else:
card = self.parse_google_card(card_node[0])
search_results = root.findall(".//div[@class='rc']")
# print(len(search_results))
for node in search_results:
link = node.find("./h3[@class='r']/a")
if link is not None:
# print(etree.tostring(link, pretty_print=True).decode())
entries.append((link.get('href'), link.text))
return card, entries
@commands.command(aliases=['google', 'search'])
async def g(self, ctx, *, query):
"""Searches google and gives you top result."""
await ctx.trigger_typing()
try:
card, entries = await self.get_google_entries(query)
except RuntimeError as e:
await ctx.send(str(e))
else:
if card:
value = '\n'.join(f'[{title}]({url.replace(")", "%29")})' for url, title in entries[:3])
if value:
card.add_field(name='Search Results', value=value, inline=False)
return await ctx.send(embed=card)
if len(entries) == 0:
return await ctx.send('No results found... sorry.')
icon = "https://cdn.discordapp.com/attachments/246291440106340352/293036111373139969/google_logo1600.png"
emb = discord.Embed(colour=0x77EE00, timestamp=ctx.message.created_at)
emb.set_author(name="Google Search",
url="https://www.google.com/search?q=" + query.replace(" ", "+"),
icon_url=icon)
next_two = entries[1:3]
first_entry = entries[0]
if first_entry[-1] == ')':
first_entry = first_entry[:-1] + '%29'
if next_two:
formatted = '\n'.join(f'[{title}]({url.replace(")", "%29")})' for url, title in entries[:3])
url, title = first_entry
emb.add_field(name="Search Result", value=f'[{title}]({url.replace(")", "%29")})')
emb.add_field(name="More", value=formatted, inline=False)
else:
emb.add_field(name="Search Result", value=first_entry)
await ctx.send(embed=emb)
def setup(bot):
bot.add_cog(Google(bot))
|
cuspaceflight/firefish | refs/heads/master | test/test_kinematics.py | 1 | import firefish.kinematics as kine
import pytest
@pytest.fixture
def kilogram_point_mass():
return kine.KinematicBody(1, [1, 1, 1])
def test_kinematics_vert():
dur = 10
dt = 0.1
g = 10
F = [0,0,20]
sim = kine.KinematicSimulation(kilogram_point_mass(), g, dur, dt)
while sim.tIndex*dt <= dur:
sim.time_step(F, [0, 0, 0], 0)
zPos = sim.posits[100, 2]
calcZPos = 0.5*10*dur**2
distTol = 1
assert(abs(zPos-calcZPos)<distTol)
def test_kinematics_mass():
dur = 10
dt = 0.1
g = 10
sim = kine.KinematicSimulation(kilogram_point_mass(),g,dur,dt)
sim.time_step([0,0,0],[0,0,0],1)
massTol = 0.01
assert(abs(sim.body.mass-(1.0-1.0*dt))<massTol)
|
sam-tsai/django | refs/heads/master | tests/template_tests/filter_tests/test_make_list.py | 345 | from django.template.defaultfilters import make_list
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils.safestring import mark_safe
from ..utils import setup
class MakeListTests(SimpleTestCase):
"""
The make_list filter can destroy existing escaping, so the results are
escaped.
"""
@setup({'make_list01': '{% autoescape off %}{{ a|make_list }}{% endautoescape %}'})
def test_make_list01(self):
output = self.engine.render_to_string('make_list01', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list02': '{{ a|make_list }}'})
def test_make_list02(self):
output = self.engine.render_to_string('make_list02', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list03':
'{% autoescape off %}{{ a|make_list|stringformat:"s"|safe }}{% endautoescape %}'})
def test_make_list03(self):
output = self.engine.render_to_string('make_list03', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
@setup({'make_list04': '{{ a|make_list|stringformat:"s"|safe }}'})
def test_make_list04(self):
output = self.engine.render_to_string('make_list04', {"a": mark_safe("&")})
self.assertEqual(output, str_prefix("[%(_)s'&']"))
class FunctionTests(SimpleTestCase):
def test_string(self):
self.assertEqual(make_list('abc'), ['a', 'b', 'c'])
def test_integer(self):
self.assertEqual(make_list(1234), ['1', '2', '3', '4'])
|
samuelcolvin/aiohttp-devtools | refs/heads/master | tests/test_runserver_logs.py | 1 | import json
import logging
import re
import sys
from unittest.mock import MagicMock
import pytest
from aiohttp_devtools.logs import AccessFormatter, DefaultFormatter
from aiohttp_devtools.runserver.log_handlers import AccessLogger, AuxAccessLogger, parse_body
def test_aiohttp_std():
info = MagicMock()
logger = type('Logger', (), {'info': info})
logger = AccessLogger(logger, None)
request = MagicMock()
request.method = 'GET'
request.path_qs = '/foobar?v=1'
response = MagicMock()
response.status = 200
response.body_length = 100
logger.log(request, response, 0.15)
assert info.call_count == 1
log = json.loads(info.call_args[0][0])
time = log.pop('time')
assert re.fullmatch(r'\[\d\d:\d\d:\d\d\]', time)
assert log == {
'prefix': '●',
'msg': 'GET /foobar?v=1 200 100B 150ms',
'dim': False,
}
def test_aiohttp_debugtoolbar():
info = MagicMock()
logger = type('Logger', (), {'info': info})
logger = AccessLogger(logger, None)
request = MagicMock()
request.method = 'GET'
request.path_qs = '/_debugtoolbar/whatever'
response = MagicMock()
response.status = 200
response.body_length = 100
logger.log(request, response, 0.15)
assert info.call_count == 1
log = json.loads(info.call_args[0][0])
time = log.pop('time')
assert re.fullmatch(r'\[\d\d:\d\d:\d\d\]', time)
assert log == {
'prefix': '●',
'msg': 'GET /_debugtoolbar/whatever 200 100B 150ms',
'dim': True,
}
def test_aux_logger():
info = MagicMock()
logger = type('Logger', (), {'info': info})
logger = AuxAccessLogger(logger, None)
request = MagicMock()
request.method = 'GET'
request.path = '/'
request.path_qs = '/'
response = MagicMock()
response.status = 200
response.body_length = 100
logger.log(request, response, 0.15)
assert info.call_count == 1
log = json.loads(info.call_args[0][0])
time = log.pop('time')
assert re.fullmatch(r'\[\d\d:\d\d:\d\d\]', time)
assert log == {
'prefix': '◆',
'msg': 'GET / 200 100B',
'dim': False,
}
def test_aux_logger_livereload():
info = MagicMock()
logger = type('Logger', (), {'info': info})
logger = AuxAccessLogger(logger, None)
request = MagicMock()
request.method = 'GET'
request.path = '/livereload.js'
request.path_qs = '/livereload.js'
response = MagicMock()
response.status = 200
response.body_length = 100
logger.log(request, response, 0.15)
assert info.call_count == 0
def test_extra():
info = MagicMock()
logger = type('Logger', (), {'info': info})
logger = AccessLogger(logger, None)
request = MagicMock()
request.method = 'GET'
request.headers = {'Foo': 'Bar'}
request.path_qs = '/foobar?v=1'
request._read_bytes = b'testing'
response = MagicMock()
response.status = 500
response.body_length = 100
response.headers = {'Foo': 'Spam'}
response.text = 'testing'
logger.log(request, response, 0.15)
assert info.call_count == 1
assert info.call_args[1]['extra'] == {
'details': {
'request_duration_ms': 150.0,
'request_headers': {
'Foo': 'Bar',
},
'request_body': b'testing',
'request_size': '7B',
'response_headers': {
'Foo': 'Spam',
},
'response_body': 'testing',
}
}
@pytest.mark.parametrize('value,result', [
(None, None),
('foobar', 'foobar'),
(b'foobar', b'foobar'),
('{"foo": "bar"}', {'foo': 'bar'}),
])
def test_parse_body(value, result):
assert parse_body(value, 'testing') == result
@pytest.mark.skipif(sys.version_info < (3, 6), reason='3.5 behaves differently')
def test_parse_body_unicode_decode():
with pytest.warns(UserWarning):
assert parse_body(b'will fail: \x80', 'testing') == 'will fail: '
def _mk_record(msg, level=logging.INFO, **extra):
class Record:
levelno = level
exc_info = None
exc_text = None
stack_info = None
def __init__(self):
if extra:
for k, v in extra.items():
setattr(self, k, v)
def getMessage(self):
return msg
return Record()
def test_dft_formatter():
f = DefaultFormatter()
assert f.format(_mk_record('testing')) == 'testing'
def test_dft_formatter_colour():
f = DefaultFormatter()
f.stream_is_tty = True
assert f.format(_mk_record('testing')) == '\x1b[32mtesting\x1b[0m'
def test_dft_formatter_colour_time():
f = DefaultFormatter()
f.stream_is_tty = True
assert f.format(_mk_record('[time] testing')) == '\x1b[35m[time]\x1b[0m\x1b[32m testing\x1b[0m'
def test_access_formatter():
f = AccessFormatter()
msg = json.dumps({'time': '_time_', 'prefix': '_p_', 'msg': '_msg_', 'dim': False})
assert f.format(_mk_record(msg)) == '_time_ _p_ _msg_'
def test_access_formatter_no_json():
f = AccessFormatter()
assert f.format(_mk_record('foobar')) == 'foobar'
def test_access_formatter_colour():
f = AccessFormatter()
f.stream_is_tty = True
msg = json.dumps({'time': '_time_', 'prefix': '_p_', 'msg': '_msg_', 'dim': False})
assert f.format(_mk_record(msg)) == '\x1b[35m_time_\x1b[0m \x1b[34m_p_\x1b[0m \x1b[0m_msg_\x1b[0m'
def test_access_formatter_extra():
f = AccessFormatter()
msg = json.dumps({'time': '_time_', 'prefix': '_p_', 'msg': '_msg_', 'dim': False})
assert f.format(_mk_record(msg, details={'foo': 'bar'})) == (
'details: {\n'
" 'foo': 'bar',\n"
'}\n'
'_time_ _p_ _msg_'
)
def test_access_formatter_exc():
f = AccessFormatter()
try:
raise RuntimeError('testing')
except RuntimeError:
stack = f.formatException(sys.exc_info())
assert stack.startswith('Traceback (most recent call last):\n')
assert stack.endswith('RuntimeError: testing\n')
def test_access_formatter_exc_colour():
f = AccessFormatter()
f.stream_is_tty = True
try:
raise RuntimeError('testing')
except RuntimeError:
stack = f.formatException(sys.exc_info())
assert stack.startswith('\x1b[38;5;26mTraceback')
|
dresden-weekly/ansible | refs/heads/release1.9.1-clean | plugins/inventory/consul_io.py | 11 | #!/usr/bin/env python
#
# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
'''
Consul.io inventory script (http://consul.io)
======================================
Generates Ansible inventory from nodes in a Consul cluster. This script will
group nodes by:
- datacenter,
- registered service
- service tags
- service status
- values from the k/v store
This script can be run with the switches
--list as expected groups all the nodes in all datacenters
--datacenter, to restrict the nodes to a single datacenter
--host to restrict the inventory to a single named node. (requires datacenter config)
The configuration for this plugin is read from a consul.ini file located in the
same directory as this inventory script. All config options in the config file
are optional except the host and port, which must point to a valid agent or
server running the http api. For more information on enabling the endpoint see.
http://www.consul.io/docs/agent/options.html
Other options include:
'datacenter':
which restricts the included nodes to those from the given datacenter
'domain':
if specified then the inventory will generate domain names that will resolve
via Consul's inbuilt DNS. The name is derived from the node name, datacenter
and domain <node_name>.node.<datacenter>.<domain>. Note that you will need to
have consul hooked into your DNS server for these to resolve. See the consul
DNS docs for more info.
which restricts the included nodes to those from the given datacenter
'servers_suffix':
defining the a suffix to add to the service name when creating the service
group. e.g Service name of 'redis' and a suffix of '_servers' will add
each nodes address to the group name 'redis_servers'. No suffix is added
if this is not set
'tags':
boolean flag defining if service tags should be used to create Inventory
groups e.g. an nginx service with the tags ['master', 'v1'] will create
groups nginx_master and nginx_v1 to which the node running the service
will be added. No tag groups are created if this is missing.
'token':
ACL token to use to authorize access to the key value store. May be required
to retrieve the kv_groups and kv_metadata based on your consul configuration.
'kv_groups':
This is used to lookup groups for a node in the key value store. It specifies a
path to which each discovered node's name will be added to create a key to query
the key/value store. There it expects to find a comma separated list of group
names to which the node should be added e.g. if the inventory contains
'nyc-web-1' and kv_groups = 'ansible/groups' then the key
'v1/kv/ansible/groups/nyc-web-1' will be queried for a group list. If this query
returned 'test,honeypot' then the node address to both groups.
'kv_metadata':
kv_metadata is used to lookup metadata for each discovered node. Like kv_groups
above it is used to build a path to lookup in the kv store where it expects to
find a json dictionary of metadata entries. If found, each key/value pair in the
dictionary is added to the metadata for the node.
'availability':
if true then availability groups will be created for each service. The node will
be added to one of the groups based on the health status of the service. The
group name is derived from the service name and the configurable availability
suffixes
'available_suffix':
suffix that should be appended to the service availability groups for available
services e.g. if the suffix is '_up' and the service is nginx, then nodes with
healthy nginx services will be added to the nginix_up group. Defaults to
'_available'
'unavailable_suffix':
as above but for unhealthy services, defaults to '_unavailable'
Note that if the inventory discovers an 'ssh' service running on a node it will
register the port as ansible_ssh_port in the node's metadata and this port will
be used to access the machine.
```
'''
import os
import re
import argparse
from time import time
import ConfigParser
import urllib, urllib2, base64
try:
import json
except ImportError:
import simplejson as json
try:
import consul
except ImportError, e:
print """failed=True msg='python-consul required for this module. see
http://python-consul.readthedocs.org/en/latest/#installation'"""
sys.exit(1)
class ConsulInventory(object):
def __init__(self):
''' Create an inventory based on the catalog of nodes and services
registered in a consul cluster'''
self.node_metadata = {}
self.nodes = {}
self.nodes_by_service = {}
self.nodes_by_tag = {}
self.nodes_by_datacenter = {}
self.nodes_by_kv = {}
self.nodes_by_availability = {}
self.current_dc = None
config = ConsulConfig()
self.config = config
self.consul_api = config.get_consul_api()
if config.has_config('datacenter'):
if config.has_config('host'):
self.load_data_for_node(config.host, config.datacenter)
else:
self.load_data_for_datacenter(config.datacenter)
else:
self.load_all_data_consul()
self.combine_all_results()
print json.dumps(self.inventory, sort_keys=True, indent=2)
def load_all_data_consul(self):
''' cycle through each of the datacenters in the consul catalog and process
the nodes in each '''
self.datacenters = self.consul_api.catalog.datacenters()
for datacenter in self.datacenters:
self.current_dc = datacenter
self.load_data_for_datacenter(datacenter)
def load_availability_groups(self, node, datacenter):
'''check the health of each service on a node and add add the node to either
an 'available' or 'unavailable' grouping. The suffix for each group can be
controlled from the config'''
if self.config.has_config('availability'):
for service_name, service in node['Services'].iteritems():
for node in self.consul_api.health.service(service_name)[1]:
for check in node['Checks']:
if check['ServiceName'] == service_name:
ok = 'passing' == check['Status']
if ok:
suffix = self.config.get_availability_suffix(
'available_suffix', '_available')
else:
suffix = self.config.get_availability_suffix(
'unavailable_suffix', '_unavailable')
self.add_node_to_map(self.nodes_by_availability,
service_name + suffix, node['Node'])
def load_data_for_datacenter(self, datacenter):
'''processes all the nodes in a particular datacenter'''
index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
for node in nodes:
self.add_node_to_map(self.nodes_by_datacenter, datacenter, node)
self.load_data_for_node(node['Node'], datacenter)
def load_data_for_node(self, node, datacenter):
'''loads the data for a sinle node adding it to various groups based on
metadata retrieved from the kv store and service availablity'''
index, node_data = self.consul_api.catalog.node(node, dc=datacenter)
node = node_data['Node']
self.add_node_to_map(self.nodes, 'all', node)
self.add_metadata(node_data, "consul_datacenter", datacenter)
self.add_metadata(node_data, "consul_nodename", node['Node'])
self.load_groups_from_kv(node_data)
self.load_node_metadata_from_kv(node_data)
self.load_availability_groups(node_data, datacenter)
for name, service in node_data['Services'].items():
self.load_data_from_service(name, service, node_data)
def load_node_metadata_from_kv(self, node_data):
''' load the json dict at the metadata path defined by the kv_metadata value
and the node name add each entry in the dictionary to the the node's
metadata '''
node = node_data['Node']
if self.config.has_config('kv_metadata'):
key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node'])
index, metadata = self.consul_api.kv.get(key)
if metadata and metadata['Value']:
try:
metadata = json.loads(metadata['Value'])
for k,v in metadata.items():
self.add_metadata(node_data, k, v)
except:
pass
def load_groups_from_kv(self, node_data):
''' load the comma separated list of groups at the path defined by the
kv_groups config value and the node name add the node address to each
group found '''
node = node_data['Node']
if self.config.has_config('kv_groups'):
key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node'])
index, groups = self.consul_api.kv.get(key)
if groups and groups['Value']:
for group in groups['Value'].split(','):
self.add_node_to_map(self.nodes_by_kv, group.strip(), node)
def load_data_from_service(self, service_name, service, node_data):
'''process a service registered on a node, adding the node to a group with
the service name. Each service tag is extracted and the node is added to a
tag grouping also'''
self.add_metadata(node_data, "consul_services", service_name, True)
if self.is_service("ssh", service_name):
self.add_metadata(node_data, "ansible_ssh_port", service['Port'])
if self.config.has_config('servers_suffix'):
service_name = service_name + self.config.servers_suffix
self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node'])
self.extract_groups_from_tags(service_name, service, node_data)
def is_service(self, target, name):
return name and (name.lower() == target.lower())
def extract_groups_from_tags(self, service_name, service, node_data):
'''iterates each service tag and adds the node to groups derived from the
service and tag names e.g. nginx_master'''
if self.config.has_config('tags') and service['Tags']:
tags = service['Tags']
self.add_metadata(node_data, "consul_%s_tags" % service_name, tags)
for tag in service['Tags']:
tagname = service_name +'_'+tag
self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node'])
def combine_all_results(self):
'''prunes and sorts all groupings for combination into the final map'''
self.inventory = {"_meta": { "hostvars" : self.node_metadata}}
groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service,
self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
for grouping in groupings:
for name, addresses in grouping.items():
self.inventory[name] = sorted(list(set(addresses)))
def add_metadata(self, node_data, key, value, is_list = False):
''' Pushed an element onto a metadata dict for the node, creating
the dict if it doesn't exist '''
key = self.to_safe(key)
node = self.get_inventory_name(node_data['Node'])
if node in self.node_metadata:
metadata = self.node_metadata[node]
else:
metadata = {}
self.node_metadata[node] = metadata
if is_list:
self.push(metadata, key, value)
else:
metadata[key] = value
def get_inventory_name(self, node_data):
'''return the ip or a node name that can be looked up in consul's dns'''
domain = self.config.domain
if domain:
node_name = node_data['Node']
if self.current_dc:
return '%s.node.%s.%s' % ( node_name, self.current_dc, domain)
else:
return '%s.node.%s' % ( node_name, domain)
else:
return node_data['Address']
def add_node_to_map(self, map, name, node):
self.push(map, name, self.get_inventory_name(node))
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the
dict '''
key = self.to_safe(key)
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used
as Ansible groups '''
return re.sub('[^A-Za-z0-9\-\.]', '_', word)
def sanitize_dict(self, d):
new_dict = {}
for k, v in d.items():
if v != None:
new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
return new_dict
def sanitize_list(self, seq):
new_seq = []
for d in seq:
new_seq.append(self.sanitize_dict(d))
return new_seq
class ConsulConfig(dict):
def __init__(self):
self.read_settings()
self.read_cli_args()
def has_config(self, name):
if hasattr(self, name):
return getattr(self, name)
else:
return False
def read_settings(self):
''' Reads the settings from the consul.ini file '''
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini')
config_options = ['host', 'token', 'datacenter', 'servers_suffix',
'tags', 'kv_metadata', 'kv_groups', 'availability',
'unavailable_suffix', 'available_suffix', 'url',
'domain']
for option in config_options:
value = None
if config.has_option('consul', option):
value = config.get('consul', option)
setattr(self, option, value)
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description=
'Produce an Ansible Inventory file based nodes in a Consul cluster')
parser.add_argument('--list', action='store_true',
help='Get all inventory variables from all nodes in the consul cluster')
parser.add_argument('--host', action='store',
help='Get all inventory variables about a specific consul node, \
requires datacenter set in consul.ini.')
parser.add_argument('--datacenter', action='store',
help='Get all inventory about a specific consul datacenter')
args = parser.parse_args()
arg_names = ['host', 'datacenter']
for arg in arg_names:
if getattr(args, arg):
setattr(self, arg, getattr(args, arg))
def get_availability_suffix(self, suffix, default):
if self.has_config(suffix):
return self.has_config(suffix)
return default
def get_consul_api(self):
'''get an instance of the api based on the supplied configuration'''
host = 'localhost'
port = 8500
token = None
if hasattr(self, 'url'):
from urlparse import urlparse
o = urlparse(self.url)
if o.hostname:
host = o.hostname
if o.port:
port = o.port
if hasattr(self, 'token'):
token = self.token
if not token:
token = 'anonymous'
return consul.Consul(host=host, port=port, token=token)
ConsulInventory()
|
o5k/openerp-oemedical-v0.1 | refs/heads/master | openerp/addons/hr_expense/report/hr_expense_report.py | 52 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_expense_report(osv.osv):
_name = "hr.expense.report"
_description = "Expenses Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date ', readonly=True),
'year': fields.char('Year', size=4, readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month',readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Force Journal', readonly=True),
'product_qty':fields.float('Qty', readonly=True),
'employee_id': fields.many2one('hr.employee', "Employee's Name", readonly=True),
'date_confirm': fields.date('Confirmation Date', readonly=True),
'date_valid': fields.date('Validation Date', readonly=True),
'voucher_id': fields.many2one('account.voucher', 'Receipt', readonly=True),
'department_id':fields.many2one('hr.department','Department', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Validation User', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'price_total':fields.float('Total Price', readonly=True, digits_compute=dp.get_precision('Account')),
'delay_valid':fields.float('Delay to Valid', readonly=True),
'delay_confirm':fields.float('Delay to Confirm', readonly=True),
'analytic_account': fields.many2one('account.analytic.account','Analytic account',readonly=True),
'price_average':fields.float('Average Price', readonly=True, digits_compute=dp.get_precision('Account')),
'nbr':fields.integer('# of Lines', readonly=True),
'no_of_products':fields.integer('# of Products', readonly=True),
'no_of_account':fields.integer('# of Accounts', readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('confirm', 'Waiting confirmation'),
('accepted', 'Accepted'),
('done', 'Done'),
('cancelled', 'Cancelled')],
'Status', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_expense_report')
cr.execute("""
create or replace view hr_expense_report as (
select
min(l.id) as id,
date_trunc('day',s.date) as date,
s.employee_id,
s.journal_id,
s.currency_id,
to_date(to_char(s.date_confirm, 'dd-MM-YYYY'),'dd-MM-YYYY') as date_confirm,
to_date(to_char(s.date_valid, 'dd-MM-YYYY'),'dd-MM-YYYY') as date_valid,
s.voucher_id,
s.user_valid as user_id,
s.department_id,
to_char(date_trunc('day',s.create_date), 'YYYY') as year,
to_char(date_trunc('day',s.create_date), 'MM') as month,
to_char(date_trunc('day',s.create_date), 'YYYY-MM-DD') as day,
avg(extract('epoch' from age(s.date_valid,s.date)))/(3600*24) as delay_valid,
avg(extract('epoch' from age(s.date_valid,s.date_confirm)))/(3600*24) as delay_confirm,
l.product_id as product_id,
l.analytic_account as analytic_account,
sum(l.unit_quantity * u.factor) as product_qty,
s.company_id as company_id,
sum(l.unit_quantity*l.unit_amount) as price_total,
(sum(l.unit_quantity*l.unit_amount)/sum(case when l.unit_quantity=0 or u.factor=0 then 1 else l.unit_quantity * u.factor end))::decimal(16,2) as price_average,
count(*) as nbr,
(select unit_quantity from hr_expense_line where id=l.id and product_id is not null) as no_of_products,
(select analytic_account from hr_expense_line where id=l.id and analytic_account is not null) as no_of_account,
s.state
from hr_expense_line l
left join hr_expense_expense s on (s.id=l.expense_id)
left join product_uom u on (u.id=l.uom_id)
group by
date_trunc('day',s.date),
to_char(date_trunc('day',s.create_date), 'YYYY'),
to_char(date_trunc('day',s.create_date), 'MM'),
to_char(date_trunc('day',s.create_date), 'YYYY-MM-DD'),
to_date(to_char(s.date_confirm, 'dd-MM-YYYY'),'dd-MM-YYYY'),
to_date(to_char(s.date_valid, 'dd-MM-YYYY'),'dd-MM-YYYY'),
l.product_id,
l.analytic_account,
s.voucher_id,
s.currency_id,
s.user_valid,
s.department_id,
l.uom_id,
l.id,
s.state,
s.journal_id,
s.company_id,
s.employee_id
)
""")
hr_expense_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
staute/shinken-mod-livestatus | refs/heads/master | module/livestatus_query_error.py | 3 |
class LiveStatusQueryError(Exception):
messages = {
200: 'OK',
404: 'Invalid GET request, no such table \'%s\'',
450: 'Invalid GET request, no such column \'%s\'',
452: 'Completely invalid GET request \'%s\'',
500: 'Internal server error: %r',
}
|
v0i0/lammps | refs/heads/master | tools/i-pi/ipi/tests/test_io.py | 41 | """Deals with testing the io system.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Note that this will only run if you have Python version 2.5 or later.
Otherwise, replace all the with statements with f = filestream.
"""
import sys
sys.path.append("../")
sys.path.append("../../")
import filecmp
import os, sys
import numpy as np
from numpy.testing import assert_equal
from common import local
from ipi.engine.cell import Cell
from ipi.utils.io import io_xyz
from ipi.utils.io import io_pdb
pos = np.array([i for i in range(3*3)])
def test_read_xyz():
"""Tests that xyz files are read correctly."""
with open(local("test.pos_0.xyz"), "r") as f:
atoms = io_xyz.read_xyz(f)
assert(len(atoms) == 3)
assert_equal(pos, atoms.q)
def test_iter_xyz():
"""Tests that xyz files with multiple frames are read correctly."""
with open(local("test.pos_0.xyz"), "r") as f:
for num, atoms in enumerate(io_xyz.iter_xyz(f)):
assert(len(atoms) == 3)
assert_equal(pos*(num+1), atoms.q)
def test_read_pdb():
"""Tests that pdb files are read correctly."""
with open(local("test.pos_0.pdb"), "r") as f:
atoms, cell = io_pdb.read_pdb(f)
assert(len(atoms) == 3)
assert_equal(pos, atoms.q)
# TODO: test cell
def test_iter_pdb():
"""Tests that pdb files with multiple frames are read correctly."""
with open(local("test.pos_0.pdb"), "r") as f:
for num, (atoms, cell) in enumerate(io_pdb.iter_pdb(f)):
assert(len(atoms) == 3)
assert_equal(pos*(num+1), atoms.q)
def test_print_pdb():
"""Tests that pdb files are printed correctly."""
with open(local("test.pos_0.pdb"), "r") as f:
with open(local("test.pos_1.xyz"), "w") as out:
for num, (atoms, cell) in enumerate(io_pdb.iter_pdb(f)):
assert(len(atoms) == 3)
assert_equal(pos*(num+1), atoms.q)
io_xyz.print_xyz(atoms, Cell(h=np.identity(3, float)), filedesc=out)
assert(filecmp.cmp(local("test.pos_0.xyz"), local("test.pos_1.xyz")))
os.unlink(local("test.pos_1.xyz"))
def test_print_xyz():
"""Tests that xyz files are printed correctly."""
with open(local("test.pos_0.pdb"), "r") as f:
with open(local("test.pos_1.pdb"), "w") as out:
for num, (atoms, cell) in enumerate(io_pdb.iter_pdb(f)):
assert(len(atoms) == 3)
assert_equal(pos*(num+1), atoms.q)
io_pdb.print_pdb(atoms, Cell(h=np.identity(3, float)), filedesc=out)
assert(filecmp.cmp(local("test.pos_0.pdb"), local("test.pos_1.pdb")))
os.unlink(local("test.pos_1.pdb"))
|
Distrotech/scons | refs/heads/distrotech-scons | test/CC/SHCCCOMSTR.py | 5 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the $SHCCCOMSTR construction variable allows you to customize
the shared object C compilation output.
"""
import os
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write('mycc.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
infile = open(sys.argv[2], 'rb')
for l in [l for l in infile.readlines() if l[:6] != '/*cc*/']:
outfile.write(l)
sys.exit(0)
""")
if os.path.normcase('.c') == os.path.normcase('.C'):
alt_c_suffix = '.C'
else:
alt_c_suffix = '.c'
test.write('SConstruct', """
env = Environment(SHCCCOM = r'%(_python_)s mycc.py $TARGET $SOURCE',
SHCCCOMSTR = 'Building $TARGET from $SOURCE',
SHOBJPREFIX='',
SHOBJSUFFIX='.obj')
env.SharedObject(target = 'test1', source = 'test1.c')
env.SharedObject(target = 'test2', source = 'test2%(alt_c_suffix)s')
""" % locals())
test.write('test1.c', """\
test1.c
/*cc*/
""")
test.write('test2'+alt_c_suffix, """\
test2.C
/*cc*/
""")
test.run(stdout = test.wrap_stdout("""\
Building test1.obj from test1.c
Building test2.obj from test2%(alt_c_suffix)s
""" % locals()))
test.must_match('test1.obj', "test1.c\n")
test.must_match('test2.obj', "test2.C\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Juniper/python-neutronclient | refs/heads/master | neutronclient/tests/unit/test_cli20_rbac.py | 4 | # Copyright 2015 Huawei Technologies India Pvt Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import testscenarios
from neutronclient.neutron.v2_0 import rbac
from neutronclient.tests.unit import test_cli20
load_tests = testscenarios.load_tests_apply_scenarios
class CLITestV20RBACBaseJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['rbac_policy']
scenarios = [
('network rbac objects',
{'object_type_name': 'network', 'object_type_val': 'network'}),
('qos policy rbac objects',
{'object_type_name': 'qos-policy', 'object_type_val': 'qos_policy'}),
]
def test_create_rbac_policy_with_mandatory_params(self):
# Create rbac: rbac_object --type <object_type_name> --action
# access_as_shared
resource = 'rbac_policy'
cmd = rbac.CreateRBACPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'rbac_object'
myid = 'myid'
args = [name, '--type', self.object_type_name,
'--action', 'access_as_shared']
position_names = ['object_id', 'object_type',
'target_tenant', 'action']
position_values = [name, self.object_type_val, '*',
'access_as_shared']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_rbac_policy_with_all_params(self):
# Create rbac: rbac_object --type <object_type_name>
# --target-tenant tenant_id --action access_as_external
resource = 'rbac_policy'
cmd = rbac.CreateRBACPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'rbac_object'
myid = 'myid'
args = [name, '--type', self.object_type_name,
'--target-tenant', 'tenant_id',
'--action', 'access_as_external']
position_names = ['object_id', 'object_type',
'target_tenant', 'action']
position_values = [name, self.object_type_val, 'tenant_id',
'access_as_external']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_rbac_policy_with_unicode(self):
# Create rbac policy u'\u7f51\u7edc'.
resource = 'rbac_policy'
cmd = rbac.CreateRBACPolicy(test_cli20.MyApp(sys.stdout), None)
name = u'\u7f51\u7edc'
myid = 'myid'
args = [name, '--type', self.object_type_name,
'--target-tenant', 'tenant_id',
'--action', 'access_as_external']
position_names = ['object_id', 'object_type',
'target_tenant', 'action']
position_values = [name, self.object_type_val, 'tenant_id',
'access_as_external']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_update_rbac_policy(self):
# rbac-update <rbac-uuid> --target-tenant <other-tenant-uuid>.
resource = 'rbac_policy'
cmd = rbac.UpdateRBACPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--target-tenant', 'tenant_id'],
{'target_tenant': 'tenant_id', })
def test_delete_rbac_policy(self):
# rbac-delete my-id.
resource = 'rbac_policy'
cmd = rbac.DeleteRBACPolicy(test_cli20.MyApp(sys.stdout), None)
my_id = 'myid1'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
def test_list_rbac_policies(self):
# rbac-list.
resources = "rbac_policies"
cmd = rbac.ListRBACPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_rbac_policies_pagination(self):
# rbac-list with pagination.
resources = "rbac_policies"
cmd = rbac.ListRBACPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_rbac_policies_sort(self):
# sorted list:
# rbac-list --sort-key name --sort-key id --sort-key asc
# --sort-key desc
resources = "rbac_policies"
cmd = rbac.ListRBACPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_rbac_policies_limit(self):
# size (1000) limited list: rbac-list -P.
resources = "rbac_policies"
cmd = rbac.ListRBACPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_rbac_policy(self):
# rbac-show test_id.
resource = 'rbac_policy'
cmd = rbac.ShowRBACPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
|
robcarver17/pysystemtrade | refs/heads/master | sysdata/mongodb/mongo_generic.py | 1 | from copy import copy
from datetime import date, time
from syscore.objects import arg_not_supplied, missingData, existingData, missing_data
from sysdata.mongodb.mongo_connection import (
mongoConnection,
MONGO_ID_KEY,
mongo_clean_ints,
clean_mongo_host
)
class mongoDataWithSingleKey(object):
"""
Read and write data class to get data from a mongo database
"""
def __init__(self, collection_name, key_name, mongo_db=arg_not_supplied):
self.init_mongo(collection_name, key_name, mongo_db=mongo_db)
def init_mongo(self, collection_name: str, key_name: str, mongo_db =arg_not_supplied,):
mongo_object = mongoConnection(collection_name, mongo_db=mongo_db)
self._mongo = mongo_object
self._key_name = key_name
# this won't create the index if it already exists
# if a different index exists (FIX ME WILL HAPPEN UNTIL NEW DATA READY)...
try:
self._mongo.create_index(self.key_name)
except:
pass
## no big deal
def __repr__(self):
return self.name
@property
def key_name(self) -> str:
return self._key_name
@property
def name(self) -> str:
col = self._mongo.collection_name
db = self._mongo.database_name
host = clean_mongo_host(self._mongo.host)
return f"mongoData connection for {col}/{db}, {host}"
@property
def collection(self):
return self._mongo.collection
def get_list_of_keys(self)->list:
cursor = self.collection.find()
key_name = self.key_name
key_list = [db_entry[key_name] for db_entry in cursor]
return key_list
def get_list_of_values_for_dict_key(self, dict_key):
key_list = self.get_list_of_keys()
list_of_results = [self.get_result_dict_for_key(key) for key in key_list]
list_of_values = [item_dict.get(dict_key, None) for item_dict in list_of_results]
return list_of_values
def get_result_dict_for_key(self, key) ->dict:
key_name = self.key_name
result_dict = self.collection.find_one(
{key_name:key}
)
if result_dict is None:
return missing_data
result_dict.pop(MONGO_ID_KEY)
return result_dict
def get_result_dict_for_key_without_key_value(self, key) ->dict:
key_name = self.key_name
result_dict = self.get_result_dict_for_key(key)
if result_dict is missing_data:
return missing_data
result_dict.pop(key_name)
return result_dict
def get_list_of_result_dict_for_custom_dict(self, custom_dict: dict) -> list:
cursor = self._mongo.collection.find(custom_dict)
dict_list = [db_entry for db_entry in cursor]
_ = [dict_item.pop(MONGO_ID_KEY) for dict_item in dict_list]
return dict_list
def key_is_in_data(self, key):
result = self.get_result_dict_for_key(key)
if result is missing_data:
return False
else:
return True
def delete_data_without_any_warning(
self, key):
key_name = self.key_name
if not self.key_is_in_data(key):
raise missingData("%s:%s not in data %s" % (key_name, key, self.name))
self.collection.remove({key_name: key})
def delete_data_with_any_warning_for_custom_dict(self,
custom_dict: dict):
self.collection.remove(custom_dict)
def add_data(self, key, data_dict: dict, allow_overwrite = False, clean_ints = True):
if clean_ints:
cleaned_data_dict = mongo_clean_ints(data_dict)
else:
cleaned_data_dict = copy(data_dict)
if self.key_is_in_data(key):
if allow_overwrite:
self._update_existing_data_with_cleaned_dict(key, cleaned_data_dict)
else:
raise existingData("Can't overwite existing data %s/%s for %s" % (self.key_name, key, self.name))
else:
try:
self._add_new_cleaned_dict(key, cleaned_data_dict)
except:
## this could happen if the key has just been added most likely for logs
raise existingData("Can't overwite existing data %s/%s for %s" % (self.key_name, key, self.name))
def _update_existing_data_with_cleaned_dict(self, key, cleaned_data_dict):
key_name = self.key_name
self.collection.update_one({key_name:key}, {"$set":cleaned_data_dict})
def _add_new_cleaned_dict(self, key, cleaned_data_dict):
key_name = self.key_name
cleaned_data_dict[key_name] = key
self.collection.insert_one(cleaned_data_dict)
class mongoDataWithMultipleKeys(object):
"""
Read and write data class to get data from a mongo database
Use this if you aren't using a specific key as the index
"""
def __init__(self, collection_name: str, mongo_db=arg_not_supplied):
self.init_mongo(collection_name, mongo_db=mongo_db)
def init_mongo(self, collection_name: str, mongo_db =arg_not_supplied,):
mongo_object = mongoConnection(collection_name, mongo_db=mongo_db)
self._mongo = mongo_object
def __repr__(self):
return self.name
@property
def name(self) -> str:
mongo_object = self._mongo
name = (
"mongoData connection for mongodb %s/%s @ %s -p %s " %
(
mongo_object.database_name,
mongo_object.collection_name,
mongo_object.host,
mongo_object.port,
))
return name
def get_list_of_all_dicts(self)->list:
cursor = self._mongo.collection.find()
dict_list = [db_entry for db_entry in cursor]
_ = [dict_item.pop(MONGO_ID_KEY) for dict_item in dict_list]
return dict_list
def get_result_dict_for_dict_keys(self, dict_of_keys: dict) ->dict:
result_dict = self._mongo.collection.find_one(
dict_of_keys
)
if result_dict is None:
return missing_data
result_dict.pop(MONGO_ID_KEY)
return result_dict
def get_list_of_result_dicts_for_dict_keys(self, dict_of_keys: dict) -> list:
cursor_of_result_dicts = self._mongo.collection.find(
dict_of_keys
)
if cursor_of_result_dicts is None:
return []
list_of_result_dicts = list(cursor_of_result_dicts
)
_ = [result_dict.pop(MONGO_ID_KEY) for result_dict in list_of_result_dicts]
return list_of_result_dicts
def key_dict_is_in_data(self, dict_of_keys: dict) -> bool:
result = self.get_result_dict_for_dict_keys(dict_of_keys)
if result is missing_data:
return False
else:
return True
def add_data(self, dict_of_keys: dict, data_dict: dict, allow_overwrite = False, clean_ints = True):
if clean_ints:
cleaned_data_dict = mongo_clean_ints(data_dict)
else:
cleaned_data_dict = copy(data_dict)
if self.key_dict_is_in_data(dict_of_keys):
if allow_overwrite:
self._update_existing_data_with_cleaned_dict(dict_of_keys, cleaned_data_dict)
else:
raise existingData("Can't overwite existing data %s for %s" % (str(dict_of_keys), self.name))
else:
self._add_new_cleaned_dict(dict_of_keys, cleaned_data_dict)
def _update_existing_data_with_cleaned_dict(self, dict_of_keys: dict, cleaned_data_dict: dict):
self._mongo.collection.update_one(dict_of_keys, {"$set":cleaned_data_dict})
def _add_new_cleaned_dict(self, dict_of_keys: dict, cleaned_data_dict: dict):
dict_with_both_keys_and_data= {}
dict_with_both_keys_and_data.update(cleaned_data_dict)
dict_with_both_keys_and_data.update(dict_of_keys)
self._mongo.collection.insert_one(dict_with_both_keys_and_data)
def delete_data_without_any_warning(
self, dict_of_keys):
self._mongo.collection.remove(dict_of_keys)
_date = date
_time = time |
osh/gnuradio | refs/heads/master | volk/gen/volk_kernel_defs.py | 32 | #
# Copyright 2011-2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import os
import re
import sys
import glob
########################################################################
# Strip comments from a c/cpp file.
# Input is code string, output is code string without comments.
# http://stackoverflow.com/questions/241327/python-snippet-to-remove-c-and-c-comments
########################################################################
def comment_remover(text):
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return ""
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
########################################################################
# Split code into nested sections according to ifdef preprocessor macros
########################################################################
def split_into_nested_ifdef_sections(code):
sections = list()
section = ''
header = 'text'
in_section_depth = 0
for i, line in enumerate(code.splitlines()):
m = re.match('^(\s*)#(\s*)(\w+)(.*)$', line)
line_is = 'normal'
if m:
p0, p1, fcn, stuff = m.groups()
if fcn in ('if', 'ifndef', 'ifdef'): line_is = 'if'
if fcn in ('else', 'elif'): line_is = 'else'
if fcn in ('endif',): line_is = 'end'
if line_is == 'if': in_section_depth += 1
if line_is == 'end': in_section_depth -= 1
if in_section_depth == 1 and line_is == 'if':
sections.append((header, section))
section = ''
header = line
continue
if in_section_depth == 1 and line_is == 'else':
sections.append((header, section))
section = ''
header = line
continue
if in_section_depth == 0 and line_is == 'end':
sections.append((header, section))
section = ''
header = 'text'
continue
section += line + '\n'
sections.append((header, section)) #and pack remainder into sections
sections = [sec for sec in sections if sec[1].strip()] #filter empty sections
#recurse into non-text sections to fill subsections
for i, (header, section) in enumerate(sections):
if header == 'text': continue
sections[i] = (header, split_into_nested_ifdef_sections(section))
return sections
########################################################################
# Recursive print of sections to test code above
########################################################################
def print_sections(sections, indent = ' '):
for header, body in sections:
if header == 'text':
print indent, ('\n'+indent).join(body.splitlines())
continue
print indent.replace(' ', '-') + '>', header
print_sections(body, indent + ' ')
########################################################################
# Flatten a section to just body text
########################################################################
def flatten_section_text(sections):
output = ''
for hdr, bdy in sections:
if hdr != 'text': output += flatten_section_text(bdy)
else: output += bdy
return output
########################################################################
# Extract kernel info from section, represent as an implementation
########################################################################
class impl_class:
def __init__(self, kern_name, header, body):
#extract LV_HAVE_*
self.deps = set(map(str.lower, re.findall('LV_HAVE_(\w+)', header)))
#extract function suffix and args
body = flatten_section_text(body)
try:
fcn_matcher = re.compile('^.*(%s\\w*)\\s*\\((.*)$'%kern_name, re.DOTALL | re.MULTILINE)
body = body.split('{')[0].rsplit(')', 1)[0] #get the part before the open ){ bracket
m = fcn_matcher.match(body)
impl_name, the_rest = m.groups()
self.name = impl_name.replace(kern_name+'_', '')
self.args = list()
fcn_args = the_rest.split(',')
for fcn_arg in fcn_args:
arg_matcher = re.compile('^\s*(.*\\W)\s*(\w+)\s*$', re.DOTALL | re.MULTILINE)
m = arg_matcher.match(fcn_arg)
arg_type, arg_name = m.groups()
self.args.append((arg_type, arg_name))
except Exception as ex:
raise Exception, 'I cant parse the function prototype from: %s in %s\n%s'%(kern_name, body, ex)
assert self.name
self.is_aligned = self.name.startswith('a_')
def __repr__(self):
return self.name
########################################################################
# Get sets of LV_HAVE_* from the code
########################################################################
def extract_lv_haves(code):
haves = list()
for line in code.splitlines():
if not line.strip().startswith('#'): continue
have_set = set(map(str.lower, re.findall('LV_HAVE_(\w+)', line)))
if have_set: haves.append(have_set)
return haves
########################################################################
# Represent a processing kernel, parse from file
########################################################################
class kernel_class:
def __init__(self, kernel_file):
self.name = os.path.splitext(os.path.basename(kernel_file))[0]
self.pname = self.name.replace('volk_', 'p_')
code = open(kernel_file, 'r').read()
code = comment_remover(code)
sections = split_into_nested_ifdef_sections(code)
self._impls = list()
for header, section in sections:
if 'ifndef' not in header.lower(): continue
for sub_hdr, body in section:
if 'if' not in sub_hdr.lower(): continue
if 'LV_HAVE_' not in sub_hdr: continue
self._impls.append(impl_class(
kern_name=self.name, header=sub_hdr, body=body,
))
assert(self._impls)
self.has_dispatcher = False
for impl in self._impls:
if impl.name == 'dispatcher':
self._impls.remove(impl)
self.has_dispatcher = True
break
self.args = self._impls[0].args
self.arglist_types = ', '.join([a[0] for a in self.args])
self.arglist_full = ', '.join(['%s %s'%a for a in self.args])
self.arglist_names = ', '.join([a[1] for a in self.args])
def get_impls(self, archs):
archs = set(archs)
impls = list()
for impl in self._impls:
if impl.deps.intersection(archs) == impl.deps:
impls.append(impl)
return impls
def __repr__(self):
return self.name
########################################################################
# Extract information from the VOLK kernels
########################################################################
__file__ = os.path.abspath(__file__)
srcdir = os.path.dirname(os.path.dirname(__file__))
kernel_files = glob.glob(os.path.join(srcdir, "kernels", "volk", "*.h"))
kernels = map(kernel_class, kernel_files)
if __name__ == '__main__':
print kernels
|
havatv/QGIS | refs/heads/master | tests/src/python/test_provider_db2.py | 45 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the DB2 provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'David Adler'
__date__ = '2016-03-01'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
import os
from qgis.core import QgsVectorLayer
from PyQt4.QtCore import QgsSettings
from utilities import unitTestDataPath
from qgis.testing import start_app, unittest
from providertestbase import ProviderTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsDb2Provider(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.dbconn = "dbname='ostest' driver='IBM DB2 ODBC DRIVER' host=dadler.dynalias.org port=50000 user='osuser' password='osuserpw'"
if 'QGIS_DB2_DB' in os.environ:
cls.dbconn = os.environ['QGIS_DB2TEST_DB']
# Create test layer
cls.vl = QgsVectorLayer(cls.dbconn + ' srid=4326 type=Point table="QGIS_TEST"."SOMEDATA" (GEOM) sql=', 'test', 'DB2')
assert(cls.vl.isValid())
cls.source = cls.vl.dataProvider()
cls.poly_vl = QgsVectorLayer(
cls.dbconn + ' srid=4326 type=POLYGON table="QGIS_TEST"."SOME_POLY_DATA" (geom) sql=', 'test', 'DB2')
assert(cls.poly_vl.isValid())
cls.poly_provider = cls.poly_vl.dataProvider()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def setUp(self):
print(("starting " + self._testMethodName))
def getSubsetString(self):
"""Individual providers may need to override this depending on their subset string formats"""
return 'cnt > 100 and cnt < 410'
def enableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', True)
return True
def disableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', False)
if __name__ == '__main__':
unittest.main()
|
ztemt/V5s_N918St_KitKat_kernel | refs/heads/master | tools/perf/scripts/python/check-perf-trace.py | 11214 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
wathen/PhD | refs/heads/master | MHD/FEniCS/DGLaplacian/DGLaplacian.py | 1 | from dolfin import *
import ipdb
import numpy as np
import matplotlib.pylab as plt
m =9
err = np.zeros((m-1,1))
N = np.zeros((m-1,1))
errh1 = np.zeros((m-1,1))
nn = 2
for xx in xrange(1,m):
# Create mesh and define function space
n = 2**xx
N[xx-1] = n
mesh = UnitSquareMesh(n,n)
tic()
V = FunctionSpace(mesh, "DG",2 )
print 'time to create function spaces',toc(),'\n\n'
# Define test and trial functions
v = TestFunction(V)
u = TrialFunction(V)
def boundary(x, on_boundary):
return on_boundary
u0 = Expression('x[0]*x[1]')
# # p0 = ('0')
# bcs = DirichletBC(V,u0, boundary)
# Define normal component, mesh size and right-hand side
n = FacetNormal(mesh)
h = CellSize(mesh)
h_avg = (h('+') + h('-'))/2
f = Expression('-2*(x[0]*x[0]-x[0]) - 2*(x[1]*x[1]-x[1])')
# Define parameters
alpha = 10.0
gamma = 10.0
# Define variational problem
tic()
a = dot(grad(v), grad(u))*dx \
- dot(avg(grad(v)), jump(u, n))*dS \
- dot(jump(v, n), avg(grad(u)))*dS \
+ alpha/h_avg*dot(jump(v, n), jump(u, n))*dS \
- dot(v*n, grad(u))*ds \
- dot(grad(v), u*n)*ds \
+ gamma/h*v*u*ds
L = v*f*dx + gamma/h*u0*v*ds - inner(grad(v),n)*u0*ds
AA,bb = assemble_system(a,L)
print 'time to creat linear system',toc(),'\n\n'
# Compute solution
u = Function(V)
tic()
set_log_level(PROGRESS)
solver = KrylovSolver("cg","hypre_amg")
solver.parameters["relative_tolerance"] = 1e-6
solver.parameters["absolute_tolerance"] = 1e-6
solver.solve(AA,u.vector(),bb)
set_log_level(PROGRESS)
print 'time to solve linear system', toc(),'\n\n'
# solve(a == L,u,bcs)
ue = Expression('x[0]*x[1]*(x[1]-1)*(x[0]-1) + x[0]*x[1]')
# ue = Expression('x[0]*x[1]*x[2]*(x[1]-1)*(x[2]-1)*(x[0]-1)')
erru = ue- Function(V,u)
err[xx-1]=errornorm(ue,Function(V,u),norm_type="L2", degree_rise=3,mesh=mesh)
errh1[xx-1]=errornorm(ue,Function(V,u),norm_type="H1", degree_rise=3,mesh=mesh)
print 'L2',err[xx-1]
print 'H1',errh1[xx-1]
# print sqrt(assemble(dolfin.dot(grad(erru),grad(erru))*dx))
# Plot solution
# plot(u, interactive=True)
plt.loglog(N,err)
plt.title('Error plot for DG1 elements - L2 convergence = %f' % np.log2(np.average((err[0:m-2]/err[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
plt.figure()
plt.loglog(N,err)
plt.title('Error plot for DG1 elements - H1 convergence = %f' % np.log2(np.average((errh1[0:m-2]/errh1[1:m-1]))))
plt.xlabel('N')
plt.ylabel('H1 error')
plt.show() |
reinbach/django-machina | refs/heads/master | machina/apps/forum_search/app.py | 2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from haystack.views import search_view_factory
from machina.core.app import Application
from machina.core.loading import get_class
class SearchApp(Application):
name = 'forum_search'
search_view = get_class('forum_search.views', 'FacetedSearchView')
search_form = get_class('forum_search.forms', 'SearchForm')
def get_urls(self):
return [
url(r'^$', search_view_factory(
view_class=self.search_view,
form_class=self.search_form),
name='search'),
]
application = SearchApp()
|
raphaelrieuhelft/KPHP | refs/heads/master | src/scripts/remove-trace.py | 4 | #!/usr/bin/python
import re
import sys
import os
import shutil
# arg 1: folder
folder = sys.argv[1]
# arg 2: extension for backup
backup_extension = sys.argv[2]
# list of files to be processed.
files = os.listdir(folder)
# iterate throught all the files in the directory
for file in files:
# we ignore directories for now
if os.path.isdir(file) or (not file.endswith(".k")):
continue
# make full path names for both original and backup files
filename = folder + "/" + file
backup_filename = folder + "/" + file + '.' + backup_extension
# make a backup copy of the present file
shutil.copy(filename, backup_filename)
# open the file
f = open(filename)
# initialise result
result = ""
# take all lines from original file not containing <trace>
for line in f:
if not "<trace>" in line:
result = result + line
# update the original file
f = open(filename, 'w')
f.write(result)
# and finally close the file
f.close() |
antar2801/namebench | refs/heads/master | nb_third_party/dns/rdtypes/ANY/HINFO.py | 248 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
class HINFO(dns.rdata.Rdata):
"""HINFO record
@ivar cpu: the CPU type
@type cpu: string
@ivar os: the OS type
@type os: string
@see: RFC 1035"""
__slots__ = ['cpu', 'os']
def __init__(self, rdclass, rdtype, cpu, os):
super(HINFO, self).__init__(rdclass, rdtype)
self.cpu = cpu
self.os = os
def to_text(self, origin=None, relativize=True, **kw):
return '"%s" "%s"' % (dns.rdata._escapify(self.cpu),
dns.rdata._escapify(self.os))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
cpu = tok.get_string()
os = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, cpu, os)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.cpu)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.cpu)
l = len(self.os)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.os)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
cpu = wire[current : current + l]
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
os = wire[current : current + l]
return cls(rdclass, rdtype, cpu, os)
from_wire = classmethod(from_wire)
def _cmp(self, other):
v = cmp(self.cpu, other.cpu)
if v == 0:
v = cmp(self.os, other.os)
return v
|
mrquim/mrquimrepo | refs/heads/master | script.module.youtube.dl/lib/youtube_dl/extractor/audimedia.py | 67 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
sanitized_Request,
)
class AudiMediaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?audi-mediacenter\.com/(?:en|de)/audimediatv/(?P<id>[^/?#]+)'
_TEST = {
'url': 'https://www.audi-mediacenter.com/en/audimediatv/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test-1467',
'md5': '79a8b71c46d49042609795ab59779b66',
'info_dict': {
'id': '1565',
'ext': 'mp4',
'title': '60 Seconds of Audi Sport 104/2015 - WEC Bahrain, Rookie Test',
'description': 'md5:60e5d30a78ced725f7b8d34370762941',
'upload_date': '20151124',
'timestamp': 1448354940,
'duration': 74022,
'view_count': int,
}
}
# extracted from https://audimedia.tv/assets/embed/embedded-player.js (dataSourceAuthToken)
_AUTH_TOKEN = 'e25b42847dba18c6c8816d5d8ce94c326e06823ebf0859ed164b3ba169be97f2'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
raw_payload = self._search_regex([
r'class="amtv-embed"[^>]+id="([^"]+)"',
r'class=\\"amtv-embed\\"[^>]+id=\\"([^"]+)\\"',
], webpage, 'raw payload')
_, stage_mode, video_id, lang = raw_payload.split('-')
# TODO: handle s and e stage_mode (live streams and ended live streams)
if stage_mode not in ('s', 'e'):
request = sanitized_Request(
'https://audimedia.tv/api/video/v1/videos/%s?embed[]=video_versions&embed[]=thumbnail_image&where[content_language_iso]=%s' % (video_id, lang),
headers={'X-Auth-Token': self._AUTH_TOKEN})
json_data = self._download_json(request, video_id)['results']
formats = []
stream_url_hls = json_data.get('stream_url_hls')
if stream_url_hls:
formats.extend(self._extract_m3u8_formats(
stream_url_hls, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
stream_url_hds = json_data.get('stream_url_hds')
if stream_url_hds:
formats.extend(self._extract_f4m_formats(
stream_url_hds + '?hdcore=3.4.0',
video_id, f4m_id='hds', fatal=False))
for video_version in json_data.get('video_versions'):
video_version_url = video_version.get('download_url') or video_version.get('stream_url')
if not video_version_url:
continue
f = {
'url': video_version_url,
'width': int_or_none(video_version.get('width')),
'height': int_or_none(video_version.get('height')),
'abr': int_or_none(video_version.get('audio_bitrate')),
'vbr': int_or_none(video_version.get('video_bitrate')),
}
bitrate = self._search_regex(r'(\d+)k', video_version_url, 'bitrate', default=None)
if bitrate:
f.update({
'format_id': 'http-%s' % bitrate,
})
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': json_data['title'],
'description': json_data.get('subtitle'),
'thumbnail': json_data.get('thumbnail_image', {}).get('file'),
'timestamp': parse_iso8601(json_data.get('publication_date')),
'duration': int_or_none(json_data.get('duration')),
'view_count': int_or_none(json_data.get('view_count')),
'formats': formats,
}
|
mosbasik/buzhug | refs/heads/master | javasrc/lib/Jython/Lib/encodings/cp1026.py | 593 | """ Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1026',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'{' # 0x48 -> LEFT CURLY BRACKET
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'[' # 0x68 -> LEFT SQUARE BRACKET
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
u':' # 0x7A -> COLON
u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'}' # 0x8C -> RIGHT CURLY BRACKET
u'`' # 0x8D -> GRAVE ACCENT
u'\xa6' # 0x8E -> BROKEN BAR
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u']' # 0xAC -> RIGHT SQUARE BRACKET
u'$' # 0xAD -> DOLLAR SIGN
u'@' # 0xAE -> COMMERCIAL AT
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'~' # 0xCC -> TILDE
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\\' # 0xDC -> REVERSE SOLIDUS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'#' # 0xEC -> NUMBER SIGN
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'"' # 0xFC -> QUOTATION MARK
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
proxysh/Safejumper-for-Mac | refs/heads/master | buildmac/Resources/env/lib/python2.7/site-packages/wheel/test/test_keys.py | 565 | import tempfile
import os.path
import unittest
import json
from wheel.signatures import keys
wheel_json = """
{
"verifiers": [
{
"scope": "+",
"vk": "bp-bjK2fFgtA-8DhKKAAPm9-eAZcX_u03oBv2RlKOBc"
},
{
"scope": "+",
"vk": "KAHZBfyqFW3OcFDbLSG4nPCjXxUPy72phP9I4Rn9MAo"
},
{
"scope": "+",
"vk": "tmAYCrSfj8gtJ10v3VkvW7jOndKmQIYE12hgnFu3cvk"
}
],
"signers": [
{
"scope": "+",
"vk": "tmAYCrSfj8gtJ10v3VkvW7jOndKmQIYE12hgnFu3cvk"
},
{
"scope": "+",
"vk": "KAHZBfyqFW3OcFDbLSG4nPCjXxUPy72phP9I4Rn9MAo"
}
],
"schema": 1
}
"""
class TestWheelKeys(unittest.TestCase):
def setUp(self):
self.config = tempfile.NamedTemporaryFile(suffix='.json')
self.config.close()
self.config_path, self.config_filename = os.path.split(self.config.name)
def load(*args):
return [self.config_path]
def save(*args):
return self.config_path
keys.load_config_paths = load
keys.save_config_path = save
self.wk = keys.WheelKeys()
self.wk.CONFIG_NAME = self.config_filename
def tearDown(self):
os.unlink(self.config.name)
def test_load_save(self):
self.wk.data = json.loads(wheel_json)
self.wk.add_signer('+', '67890')
self.wk.add_signer('scope', 'abcdefg')
self.wk.trust('epocs', 'gfedcba')
self.wk.trust('+', '12345')
self.wk.save()
del self.wk.data
self.wk.load()
signers = self.wk.signers('scope')
self.assertTrue(signers[0] == ('scope', 'abcdefg'), self.wk.data['signers'])
self.assertTrue(signers[1][0] == '+', self.wk.data['signers'])
trusted = self.wk.trusted('epocs')
self.assertTrue(trusted[0] == ('epocs', 'gfedcba'))
self.assertTrue(trusted[1][0] == '+')
self.wk.untrust('epocs', 'gfedcba')
trusted = self.wk.trusted('epocs')
self.assertTrue(('epocs', 'gfedcba') not in trusted)
def test_load_save_incomplete(self):
self.wk.data = json.loads(wheel_json)
del self.wk.data['signers']
self.wk.data['schema'] = self.wk.SCHEMA+1
self.wk.save()
try:
self.wk.load()
except ValueError:
pass
else:
raise Exception("Expected ValueError")
del self.wk.data['schema']
self.wk.save()
self.wk.load()
|
agry/NGECore2 | refs/heads/master | scripts/object/tangible/quest/gravestone_shmi.py | 85615 | import sys
def setup(core, object):
return |
IKholopov/HackUPC2017 | refs/heads/master | hackupc/env/lib/python3.5/site-packages/wheel/wininst2wheel.py | 93 | #!/usr/bin/env python
import os.path
import re
import sys
import tempfile
import zipfile
import wheel.bdist_wheel
import distutils.dist
from distutils.archive_util import make_archive
from shutil import rmtree
from wheel.archive import archive_wheelfile
from argparse import ArgumentParser
from glob import iglob
egg_info_re = re.compile(r'''(^|/)(?P<name>[^/]+?)-(?P<ver>.+?)
(-(?P<pyver>.+?))?(-(?P<arch>.+?))?.egg-info(/|$)''', re.VERBOSE)
def parse_info(wininfo_name, egginfo_name):
"""Extract metadata from filenames.
Extracts the 4 metadataitems needed (name, version, pyversion, arch) from
the installer filename and the name of the egg-info directory embedded in
the zipfile (if any).
The egginfo filename has the format::
name-ver(-pyver)(-arch).egg-info
The installer filename has the format::
name-ver.arch(-pyver).exe
Some things to note:
1. The installer filename is not definitive. An installer can be renamed
and work perfectly well as an installer. So more reliable data should
be used whenever possible.
2. The egg-info data should be preferred for the name and version, because
these come straight from the distutils metadata, and are mandatory.
3. The pyver from the egg-info data should be ignored, as it is
constructed from the version of Python used to build the installer,
which is irrelevant - the installer filename is correct here (even to
the point that when it's not there, any version is implied).
4. The architecture must be taken from the installer filename, as it is
not included in the egg-info data.
5. Architecture-neutral installers still have an architecture because the
installer format itself (being executable) is architecture-specific. We
should therefore ignore the architecture if the content is pure-python.
"""
egginfo = None
if egginfo_name:
egginfo = egg_info_re.search(egginfo_name)
if not egginfo:
raise ValueError("Egg info filename %s is not valid" %
(egginfo_name,))
# Parse the wininst filename
# 1. Distribution name (up to the first '-')
w_name, sep, rest = wininfo_name.partition('-')
if not sep:
raise ValueError("Installer filename %s is not valid" %
(wininfo_name,))
# Strip '.exe'
rest = rest[:-4]
# 2. Python version (from the last '-', must start with 'py')
rest2, sep, w_pyver = rest.rpartition('-')
if sep and w_pyver.startswith('py'):
rest = rest2
w_pyver = w_pyver.replace('.', '')
else:
# Not version specific - use py2.py3. While it is possible that
# pure-Python code is not compatible with both Python 2 and 3, there
# is no way of knowing from the wininst format, so we assume the best
# here (the user can always manually rename the wheel to be more
# restrictive if needed).
w_pyver = 'py2.py3'
# 3. Version and architecture
w_ver, sep, w_arch = rest.rpartition('.')
if not sep:
raise ValueError("Installer filename %s is not valid" %
(wininfo_name,))
if egginfo:
w_name = egginfo.group('name')
w_ver = egginfo.group('ver')
return dict(name=w_name, ver=w_ver, arch=w_arch, pyver=w_pyver)
def bdist_wininst2wheel(path, dest_dir=os.path.curdir):
bdw = zipfile.ZipFile(path)
# Search for egg-info in the archive
egginfo_name = None
for filename in bdw.namelist():
if '.egg-info' in filename:
egginfo_name = filename
break
info = parse_info(os.path.basename(path), egginfo_name)
root_is_purelib = True
for zipinfo in bdw.infolist():
if zipinfo.filename.startswith('PLATLIB'):
root_is_purelib = False
break
if root_is_purelib:
paths = {'purelib': ''}
else:
paths = {'platlib': ''}
dist_info = "%(name)s-%(ver)s" % info
datadir = "%s.data/" % dist_info
# rewrite paths to trick ZipFile into extracting an egg
# XXX grab wininst .ini - between .exe, padding, and first zip file.
members = []
egginfo_name = ''
for zipinfo in bdw.infolist():
key, basename = zipinfo.filename.split('/', 1)
key = key.lower()
basepath = paths.get(key, None)
if basepath is None:
basepath = datadir + key.lower() + '/'
oldname = zipinfo.filename
newname = basepath + basename
zipinfo.filename = newname
del bdw.NameToInfo[oldname]
bdw.NameToInfo[newname] = zipinfo
# Collect member names, but omit '' (from an entry like "PLATLIB/"
if newname:
members.append(newname)
# Remember egg-info name for the egg2dist call below
if not egginfo_name:
if newname.endswith('.egg-info'):
egginfo_name = newname
elif '.egg-info/' in newname:
egginfo_name, sep, _ = newname.rpartition('/')
dir = tempfile.mkdtemp(suffix="_b2w")
bdw.extractall(dir, members)
# egg2wheel
abi = 'none'
pyver = info['pyver']
arch = (info['arch'] or 'any').replace('.', '_').replace('-', '_')
# Wininst installers always have arch even if they are not
# architecture-specific (because the format itself is).
# So, assume the content is architecture-neutral if root is purelib.
if root_is_purelib:
arch = 'any'
# If the installer is architecture-specific, it's almost certainly also
# CPython-specific.
if arch != 'any':
pyver = pyver.replace('py', 'cp')
wheel_name = '-'.join((
dist_info,
pyver,
abi,
arch
))
if root_is_purelib:
bw = wheel.bdist_wheel.bdist_wheel(distutils.dist.Distribution())
else:
bw = _bdist_wheel_tag(distutils.dist.Distribution())
bw.root_is_pure = root_is_purelib
bw.python_tag = pyver
bw.plat_name_supplied = True
bw.plat_name = info['arch'] or 'any'
if not root_is_purelib:
bw.full_tag_supplied = True
bw.full_tag = (pyver, abi, arch)
dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info)
bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir)
bw.write_wheelfile(dist_info_dir, generator='wininst2wheel')
bw.write_record(dir, dist_info_dir)
archive_wheelfile(os.path.join(dest_dir, wheel_name), dir)
rmtree(dir)
class _bdist_wheel_tag(wheel.bdist_wheel.bdist_wheel):
# allow the client to override the default generated wheel tag
# The default bdist_wheel implementation uses python and abi tags
# of the running python process. This is not suitable for
# generating/repackaging prebuild binaries.
full_tag_supplied = False
full_tag = None # None or a (pytag, soabitag, plattag) triple
def get_tag(self):
if self.full_tag_supplied and self.full_tag is not None:
return self.full_tag
else:
return super(_bdist_wheel_tag, self).get_tag()
def main():
parser = ArgumentParser()
parser.add_argument('installers', nargs='*', help="Installers to convert")
parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
for pat in args.installers:
for installer in iglob(pat):
if args.verbose:
sys.stdout.write("{0}... ".format(installer))
bdist_wininst2wheel(installer, args.dest_dir)
if args.verbose:
sys.stdout.write("OK\n")
if __name__ == "__main__":
main()
|
tobikausk/nest-simulator | refs/heads/master | doc/copyright_header.py | 52 | # -*- coding: utf-8 -*-
#
# {{file_name}}
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
|
brianwoo/django-tutorial | refs/heads/master | ENV/lib/python2.7/site-packages/django/core/management/base.py | 19 | # -*- coding: utf-8 -*-
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
from __future__ import unicode_literals
import os
import sys
import warnings
from argparse import ArgumentParser
from optparse import OptionParser
import django
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style, no_style
from django.db import connections
from django.utils.deprecation import (
RemovedInDjango19Warning, RemovedInDjango110Warning,
)
from django.utils.encoding import force_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, cmd, **kwargs):
self.cmd = cmd
super(CommandParser, self).__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (hasattr(self.cmd, 'missing_args_message') and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.cmd.missing_args_message)
return super(CommandParser, self).parse_args(args, namespace)
def error(self, message):
if self.cmd._called_from_command_line:
super(CommandParser, self).error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and hasattr(self._out, 'isatty') and self._out.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(force_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<app_label
app_label ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
Deprecated and will be removed in Django 1.10.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``requires_model_validation``
DEPRECATED - This value will only be used if requires_system_checks
has not been provided. Defining both ``requires_system_checks`` and
``requires_model_validation`` will result in an error.
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app_config)`` from ``handle()``, where ``app_config``
is the application's configuration provided by the app registry.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of translations
being deactivated.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrib.auth
permissions) as activating any locale might cause unintended effects.
This option can't be False when the can_import_settings option is set
to False too because attempting to deactivate translations needs access
to settings. This condition will generate a CommandError.
"""
# Metadata about this command.
option_list = ()
help = ''
args = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
can_import_settings = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
# Uncomment the following line of code after deprecation plan for
# requires_model_validation comes to completion:
#
# requires_system_checks = True
def __init__(self, stdout=None, stderr=None, no_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color:
self.style = no_style()
else:
self.style = color_style()
self.stderr.style_func = self.style.ERROR
# `requires_model_validation` is deprecated in favor of
# `requires_system_checks`. If both options are present, an error is
# raised. Otherwise the present option is used. If none of them is
# defined, the default value (True) is used.
has_old_option = hasattr(self, 'requires_model_validation')
has_new_option = hasattr(self, 'requires_system_checks')
if has_old_option:
warnings.warn(
'"requires_model_validation" is deprecated '
'in favor of "requires_system_checks".',
RemovedInDjango19Warning)
if has_old_option and has_new_option:
raise ImproperlyConfigured(
'Command %s defines both "requires_model_validation" '
'and "requires_system_checks", which is illegal. Use only '
'"requires_system_checks".' % self.__class__.__name__)
self.requires_system_checks = (
self.requires_system_checks if has_new_option else
self.requires_model_validation if has_old_option else
True)
@property
def use_argparse(self):
return not bool(self.option_list)
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
if not self.use_argparse:
def store_as_int(option, opt_str, value, parser):
setattr(parser.values, option.dest, int(value))
# Backwards compatibility: use deprecated optparse module
warnings.warn("OptionParser usage for Django management commands "
"is deprecated, use ArgumentParser instead",
RemovedInDjango110Warning)
parser = OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version())
parser.add_option('-v', '--verbosity', action='callback', dest='verbosity', default=1,
type='choice', choices=['0', '1', '2', '3'], callback=store_as_int,
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_option('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
parser.add_option('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_option('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
for opt in self.option_list:
parser.add_option(opt)
else:
parser = CommandParser(self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument('-v', '--verbosity', action='store', dest='verbosity', default='1',
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_argument('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".')
parser.add_argument('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_argument('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
if self.args:
# Keep compatibility and always accept positional arguments, like optparse when args is set
parser.add_argument('args', nargs='*')
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
if self.use_argparse:
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
else:
options, args = parser.parse_args(argv[2:])
cmd_options = vars(options)
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
connections.close_all()
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by attributes ``self.requires_system_checks`` and
``self.requires_model_validation``, except if force-skipped).
"""
if options.get('no_color'):
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options.get('stderr'), self.stderr.style_func)
saved_locale = None
if not self.leave_locale_alone:
# Only mess with locales if we can assume we have a working
# settings file, because django.utils.translation requires settings
# (The final saying about whether the i18n machinery is active will be
# found in the value of the USE_I18N setting)
if not self.can_import_settings:
raise CommandError("Incompatible values of 'leave_locale_alone' "
"(%s) and 'can_import_settings' (%s) command "
"options." % (self.leave_locale_alone,
self.can_import_settings))
# Deactivate translations, because django-admin creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
if (self.requires_system_checks and
not options.get('skip_validation') and # Remove at the end of deprecation for `skip_validation`.
not options.get('skip_checks')):
self.check()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()))
finally:
if saved_locale is not None:
translation.activate(saved_locale)
def validate(self, app_config=None, display_num_errors=False):
""" Deprecated. Delegates to ``check``."""
if app_config is None:
app_configs = None
else:
app_configs = [app_config]
return self.check(app_configs=app_configs, display_num_errors=display_num_errors)
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False):
"""
Uses the system check framework to validate entire Django project.
Raises CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), they are printed to
stderr and no exception is raised.
"""
all_issues = checks.run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(force_str(e))
if e.is_serious()
else self.style.WARNING(force_str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious() and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+',
help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
try:
# During the deprecation path, keep delegating to handle_app if
# handle_app_config isn't implemented in a subclass.
handle_app = self.handle_app
except AttributeError:
# Keep only this exception when the deprecation completes.
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
else:
warnings.warn(
"AppCommand.handle_app() is superseded by "
"AppCommand.handle_app_config().",
RemovedInDjango19Warning, stacklevel=2)
if app_config.models_module is None:
raise CommandError(
"AppCommand cannot handle app '%s' in legacy mode "
"because it doesn't have a models module."
% app_config.label)
return handle_app(app_config.models_module, **options)
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def __init__(self):
warnings.warn(
"NoArgsCommand class is deprecated and will be removed in Django 1.10. "
"Use BaseCommand instead, which takes no arguments by default.",
RemovedInDjango110Warning
)
super(NoArgsCommand, self).__init__()
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError('subclasses of NoArgsCommand must provide a handle_noargs() method')
|
neerja28/Tempest | refs/heads/master | tempest/api/orchestration/stacks/test_volumes.py | 11 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.orchestration import base
from tempest import config
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class CinderResourcesTest(base.BaseOrchestrationTest):
@classmethod
def skip_checks(cls):
super(CinderResourcesTest, cls).skip_checks()
if not CONF.service_available.cinder:
raise cls.skipException('Cinder support is required')
def _cinder_verify(self, volume_id, template):
self.assertIsNotNone(volume_id)
volume = self.volumes_client.show_volume(volume_id)
self.assertEqual('available', volume.get('status'))
self.assertEqual(template['resources']['volume']['properties'][
'size'], volume.get('size'))
self.assertEqual(template['resources']['volume']['properties'][
'description'], volume.get('display_description'))
self.assertEqual(template['resources']['volume']['properties'][
'name'], volume.get('display_name'))
def _outputs_verify(self, stack_identifier, template):
self.assertEqual('available',
self.get_stack_output(stack_identifier, 'status'))
self.assertEqual(str(template['resources']['volume']['properties'][
'size']), self.get_stack_output(stack_identifier, 'size'))
self.assertEqual(template['resources']['volume']['properties'][
'description'], self.get_stack_output(stack_identifier,
'display_description'))
self.assertEqual(template['resources']['volume']['properties'][
'name'], self.get_stack_output(stack_identifier, 'display_name'))
@test.idempotent_id('c3243329-7bdd-4730-b402-4d19d50c41d8')
@test.services('volume')
def test_cinder_volume_create_delete(self):
"""Create and delete a volume via OS::Cinder::Volume."""
stack_name = data_utils.rand_name('heat')
template = self.read_template('cinder_basic')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
cinder_basic_template = self.load_template('cinder_basic')
self._cinder_verify(volume_id, cinder_basic_template)
# Verify the stack outputs are as expected
self._outputs_verify(stack_identifier, cinder_basic_template)
# Delete the stack and ensure the volume is gone
self.client.delete_stack(stack_identifier)
self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
self.assertRaises(lib_exc.NotFound,
self.volumes_client.show_volume,
volume_id)
def _cleanup_volume(self, volume_id):
"""Cleanup the volume direct with cinder."""
self.volumes_client.delete_volume(volume_id)
self.volumes_client.wait_for_resource_deletion(volume_id)
@test.idempotent_id('ea8b3a46-b932-4c18-907a-fe23f00b33f8')
@test.services('volume')
def test_cinder_volume_create_delete_retain(self):
"""Ensure the 'Retain' deletion policy is respected."""
stack_name = data_utils.rand_name('heat')
template = self.read_template('cinder_basic_delete_retain')
stack_identifier = self.create_stack(stack_name, template)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# Verify with cinder that the volume exists, with matching details
volume_id = self.get_stack_output(stack_identifier, 'volume_id')
self.addCleanup(self._cleanup_volume, volume_id)
retain_template = self.load_template('cinder_basic_delete_retain')
self._cinder_verify(volume_id, retain_template)
# Verify the stack outputs are as expected
self._outputs_verify(stack_identifier, retain_template)
# Delete the stack and ensure the volume is *not* gone
self.client.delete_stack(stack_identifier)
self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
self._cinder_verify(volume_id, retain_template)
# Volume cleanup happens via addCleanup calling _cleanup_volume
|
JY-Zhou/FreePSI | refs/heads/master | scripts/computePsiFromFreePSI.py | 1 | import json
import math
import numpy as np
import scipy.spatial.distance as dis
import scipy.cluster.hierarchy as hac
import scipy.stats as stats
import sys
inFile = open(sys.argv[1], 'r')
outFile = open(sys.argv[2], 'w')
psi = json.load(inFile)
newPsi = [[0.0 for e in range(len(psi[g]))] for g in range(len(psi))]
klist = []
ptplist = []
sdlist = []
skewlist = []
dlist = []
#Hierarchical clustering
for g in range(len(psi)):
b = psi[g]
newPsi[g] = b
if len(b) > 1:
maxb = max(b)
a = np.array([b]).T
b_dist = dis.pdist(a)
b_link = hac.linkage(b_dist, method='average')
for k in range(1, len(a)+1):
clust = hac.cut_tree(b_link, k)
Mu = []
SD = []
Skew = []
PTP = []
Dist = []
for i in range(k):
data = a[clust == i, ]
Mu.append(np.mean(data))
SD.append(np.std(data))
Skew.append(stats.skew(data))
PTP.append(np.ptp(data))
dist = 0
if len(data) > 1:
for p in range(len(data)):
for q in range(p+1, len(data)):
dist += abs(data[p] - data[q])
dist /= (len(data) * (len(data) - 1) / 2)
Dist.append(dist)
if np.max(SD) < 0.06 or np.mean(SD) < 0.05:
klist.append(k)
sdlist.append(np.max(SD))
skewlist.append(np.max(Skew))
ptplist.append(np.max(PTP))
dlist.append(np.max(Dist))
for e in range(len(a)):
newPsi[g][e] = Mu[clust[e, 0]]
maxv = max(newPsi[g])
if maxv > 0.05:
for e in range(len(a)):
newPsi[g][e] /= maxv
else:
for e in range(len(a)):
newPsi[g][e] /= 5.0
break
json.dump(newPsi, outFile, indent = 4)
print("AVG cluster number = " + str(np.mean(klist)))
print("max max sd = " + str(np.max(sdlist)))
print("max max skew = " + str(np.max(skewlist)))
print("max max range = " + str(np.max(ptplist)))
print("max max dist = " + str(np.max(dlist)))
|
mixman/djangodev | refs/heads/master | tests/modeltests/proxy_models/tests.py | 1 | from __future__ import absolute_import
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.core.exceptions import FieldError
from django.db import models, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.test import TestCase
from .models import (MyPerson, Person, StatusPerson, LowerStatusPerson,
MyPersonProxy, Abstract, OtherPerson, User, UserProxy, UserProxyProxy,
Country, State, StateProxy, TrackerUser, BaseUser, Bug, ProxyTrackerUser,
Improvement, ProxyProxyBug, ProxyBug, ProxyImprovement)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheretance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted([mpp.name for mpp in MyPersonProxy.objects.all()])
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(Person.DoesNotExist,
MyPersonProxy.objects.get,
name='Zathras'
)
self.assertRaises(Person.MultipleObjectsReturned,
MyPersonProxy.objects.get,
id__lt=max_id+1
)
self.assertRaises(Person.DoesNotExist,
StatusPerson.objects.get,
name='Zathras'
)
sp1 = StatusPerson.objects.create(name='Bazza Jr.')
sp2 = StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(Person.MultipleObjectsReturned,
StatusPerson.objects.get,
id__lt=max_id+1
)
def test_abc(self):
"""
All base classes must be non-abstract
"""
def build_abc():
class NoAbstract(Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
def test_no_cbc(self):
"""
The proxy must actually have one concrete base class
"""
def build_no_cbc():
class TooManyBases(Person, Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
def test_no_base_classes(self):
def build_no_base_classes():
class NoBaseClasses(models.Model):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_base_classes)
def test_new_fields(self):
def build_new_fields():
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
self.assertRaises(FieldError, build_new_fields)
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
dino = MyPerson.objects.create(name=u"dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
dino = MyPersonProxy.objects.create(name=u"pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertTrue(ctype(Person) is ctype(OtherPerson))
def test_user_userproxy_userproxyproxy(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
state = State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name,
'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_proxy_bug(self):
contributor = TrackerUser.objects.create(name='Contributor',
status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta',
assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor',
status='proxy')
Improvement.objects.create(summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0])
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0, commit=False)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
|
metabrainz/picard | refs/heads/master | picard/util/xml.py | 3 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2007 Lukáš Lalinský
# Copyright (C) 2009 Carlin Mangar
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018, 2020 Laurent Monin
# Copyright (C) 2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from PyQt5.QtCore import QXmlStreamReader
_node_name_re = re.compile('[^a-zA-Z0-9]')
class XmlNode(object):
def __init__(self):
self.text = ''
self.children = {}
self.attribs = {}
def __repr__(self):
return repr(self.__dict__)
def append_child(self, name, node=None):
if node is None:
node = XmlNode()
self.children.setdefault(name, []).append(node)
return node
def __getattr__(self, name):
try:
return self.children[name]
except KeyError:
try:
return self.attribs[name]
except KeyError:
raise AttributeError(name)
def _node_name(n):
return _node_name_re.sub('_', n)
def parse_xml(response):
stream = QXmlStreamReader(response)
document = XmlNode()
current_node = document
path = []
while not stream.atEnd():
stream.readNext()
if stream.isStartElement():
node = XmlNode()
attrs = stream.attributes()
for i in range(attrs.count()):
attr = attrs.at(i)
node.attribs[_node_name(attr.name())] = attr.value()
current_node.append_child(_node_name(stream.name()), node)
path.append(current_node)
current_node = node
elif stream.isEndElement():
current_node = path.pop()
elif stream.isCharacters():
current_node.text += stream.text()
return document
|
atdt/gerrit | refs/heads/master | contrib/trivial_rebase.py | 7 | #!/usr/bin/env python2.6
# Copyright (c) 2010, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# # Neither the name of Code Aurora Forum, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This script is designed to detect when a patchset uploaded to Gerrit is
# 'identical' (determined via git-patch-id) and reapply reviews onto the new
# patchset from the previous patchset.
# Get usage and help info by running: ./trivial_rebase.py --help
# Documentation is available here: https://www.codeaurora.org/xwiki/bin/QAEP/Gerrit
import json
from optparse import OptionParser
import subprocess
from sys import exit
class CheckCallError(OSError):
"""CheckCall() returned non-0."""
def __init__(self, command, cwd, retcode, stdout, stderr=None):
OSError.__init__(self, command, cwd, retcode, stdout, stderr)
self.command = command
self.cwd = cwd
self.retcode = retcode
self.stdout = stdout
self.stderr = stderr
def CheckCall(command, cwd=None):
"""Like subprocess.check_call() but returns stdout.
Works on python 2.4
"""
try:
process = subprocess.Popen(command, cwd=cwd, stdout=subprocess.PIPE)
std_out, std_err = process.communicate()
except OSError, e:
raise CheckCallError(command, cwd, e.errno, None)
if process.returncode:
raise CheckCallError(command, cwd, process.returncode, std_out, std_err)
return std_out, std_err
def GsqlQuery(sql_query, server, port):
"""Runs a gerrit gsql query and returns the result"""
gsql_cmd = ['ssh', '-p', port, server, 'gerrit', 'gsql', '--format',
'JSON', '-c', sql_query]
try:
(gsql_out, gsql_stderr) = CheckCall(gsql_cmd)
except CheckCallError, e:
print "return code is %s" % e.retcode
print "stdout and stderr is\n%s%s" % (e.stdout, e.stderr)
raise
new_out = gsql_out.replace('}}\n', '}}\nsplit here\n')
return new_out.split('split here\n')
def FindPrevRev(changeId, patchset, server, port):
"""Finds the revision of the previous patch set on the change"""
sql_query = ("\"SELECT revision FROM patch_sets,changes WHERE "
"patch_sets.change_id = changes.change_id AND "
"patch_sets.patch_set_id = %s AND "
"changes.change_key = \'%s\'\"" % ((patchset - 1), changeId))
revisions = GsqlQuery(sql_query, server, port)
json_dict = json.loads(revisions[0], strict=False)
return json_dict["columns"]["revision"]
def GetApprovals(changeId, patchset, server, port):
"""Get all the approvals on a specific patch set
Returns a list of approval dicts"""
sql_query = ("\"SELECT value,account_id,category_id FROM patch_set_approvals "
"WHERE patch_set_id = %s AND change_id = (SELECT change_id FROM "
"changes WHERE change_key = \'%s\') AND value <> 0\""
% ((patchset - 1), changeId))
gsql_out = GsqlQuery(sql_query, server, port)
approvals = []
for json_str in gsql_out:
dict = json.loads(json_str, strict=False)
if dict["type"] == "row":
approvals.append(dict["columns"])
return approvals
def GetEmailFromAcctId(account_id, server, port):
"""Returns the preferred email address associated with the account_id"""
sql_query = ("\"SELECT preferred_email FROM accounts WHERE account_id = %s\""
% account_id)
email_addr = GsqlQuery(sql_query, server, port)
json_dict = json.loads(email_addr[0], strict=False)
return json_dict["columns"]["preferred_email"]
def GetPatchId(revision):
git_show_cmd = ['git', 'show', revision]
patch_id_cmd = ['git', 'patch-id']
patch_id_process = subprocess.Popen(patch_id_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
git_show_process = subprocess.Popen(git_show_cmd, stdout=subprocess.PIPE)
return patch_id_process.communicate(git_show_process.communicate()[0])[0]
def SuExec(server, port, private_key, as_user, cmd):
suexec_cmd = ['ssh', '-l', "Gerrit Code Review", '-p', port, server, '-i',
private_key, 'suexec', '--as', as_user, '--', cmd]
CheckCall(suexec_cmd)
def DiffCommitMessages(commit1, commit2):
log_cmd1 = ['git', 'log', '--pretty=format:"%an %ae%n%s%n%b"',
commit1 + '^!']
commit1_log = CheckCall(log_cmd1)
log_cmd2 = ['git', 'log', '--pretty=format:"%an %ae%n%s%n%b"',
commit2 + '^!']
commit2_log = CheckCall(log_cmd2)
if commit1_log != commit2_log:
return True
return False
def Main():
server = 'localhost'
usage = "usage: %prog <required options> [--server-port=PORT]"
parser = OptionParser(usage=usage)
parser.add_option("--change", dest="changeId", help="Change identifier")
parser.add_option("--project", help="Project path in Gerrit")
parser.add_option("--commit", help="Git commit-ish for this patchset")
parser.add_option("--patchset", type="int", help="The patchset number")
parser.add_option("--private-key-path", dest="private_key_path",
help="Full path to Gerrit SSH daemon's private host key")
parser.add_option("--server-port", dest="port", default='29418',
help="Port to connect to Gerrit's SSH daemon "
"[default: %default]")
(options, args) = parser.parse_args()
if not options.changeId:
parser.print_help()
exit(0)
if options.patchset == 1:
# Nothing to detect on first patchset
exit(0)
prev_revision = None
prev_revision = FindPrevRev(options.changeId, options.patchset, server,
options.port)
if not prev_revision:
# Couldn't find a previous revision
exit(0)
prev_patch_id = GetPatchId(prev_revision)
cur_patch_id = GetPatchId(options.commit)
if cur_patch_id.split()[0] != prev_patch_id.split()[0]:
# patch-ids don't match
exit(0)
# Patch ids match. This is a trivial rebase.
# In addition to patch-id we should check if the commit message changed. Most
# approvers would want to re-review changes when the commit message changes.
changed = DiffCommitMessages(prev_revision, options.commit)
if changed:
# Insert a comment into the change letting the approvers know only the
# commit message changed
comment_msg = ("\'--message=New patchset patch-id matches previous patchset"
", but commit message has changed.'")
comment_cmd = ['ssh', '-p', options.port, server, 'gerrit', 'approve',
'--project', options.project, comment_msg, options.commit]
CheckCall(comment_cmd)
exit(0)
# Need to get all approvals on prior patch set, then suexec them onto
# this patchset.
approvals = GetApprovals(options.changeId, options.patchset, server,
options.port)
gerrit_approve_msg = ("\'Automatically re-added by Gerrit trivial rebase "
"detection script.\'")
for approval in approvals:
# Note: Sites with different 'copy_min_score' values in the
# approval_categories DB table might want different behavior here.
# Additional categories should also be added if desired.
if approval["category_id"] == "CRVW":
approve_category = '--code-review'
elif approval["category_id"] == "VRIF":
# Don't re-add verifies
#approve_category = '--verified'
continue
elif approval["category_id"] == "SUBM":
# We don't care about previous submit attempts
continue
else:
print "Unsupported category: %s" % approval
exit(0)
score = approval["value"]
gerrit_approve_cmd = ['gerrit', 'approve', '--project', options.project,
'--message', gerrit_approve_msg, approve_category,
score, options.commit]
email_addr = GetEmailFromAcctId(approval["account_id"], server,
options.port)
SuExec(server, options.port, options.private_key_path, email_addr,
' '.join(gerrit_approve_cmd))
exit(0)
if __name__ == "__main__":
Main()
|
amounir86/amounir86-2011-elections | refs/heads/master | voter-info/shapes/dbfUtils.py | 18 | #!/usr/bin/env python
# dbfUtils.py
# By Raymond Hettinger
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362715
import struct, datetime, decimal, itertools
def dbfreader(f):
"""Returns an iterator over records in a Xbase DBF file.
The first row returned contains the field names.
The second row contains field specs: (type, size, decimal places).
Subsequent rows contain the data records.
If a record is marked as deleted, it is skipped.
File should be opened for binary reads.
"""
# See DBF format spec at:
# http://www.pgts.com.au/download/public/xbase.htm#DBF_STRUCT
numrec, lenheader = struct.unpack('<xxxxLH22x', f.read(32))
numfields = (lenheader - 33) // 32
fields = []
for fieldno in xrange(numfields):
name, typ, size, deci = struct.unpack('<11sc4xBB14x', f.read(32))
name = name.replace('\0', '') # eliminate NULs from string
fields.append((name, typ, size, deci))
yield [field[0] for field in fields]
yield [tuple(field[1:]) for field in fields]
terminator = f.read(1)
assert terminator == '\r'
fields.insert(0, ('DeletionFlag', 'C', 1, 0))
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in fields])
fmtsiz = struct.calcsize(fmt)
for i in xrange(numrec):
record = struct.unpack(fmt, f.read(fmtsiz))
if record[0] != ' ':
continue # deleted record
result = []
for (name, typ, size, deci), value in itertools.izip(fields, record):
if name == 'DeletionFlag':
continue
if typ == "N":
value = value.replace('\0', '').lstrip()
if value == '':
value = 0
elif deci:
value = decimal.Decimal(value)
else:
value = int(value)
elif typ == 'D':
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = datetime.date(y, m, d)
elif typ == 'L':
value = (value in 'YyTt' and 'T') or (value in 'NnFf' and 'F') or '?'
result.append(value)
yield result
def dbfwriter(f, fieldnames, fieldspecs, records):
""" Return a string suitable for writing directly to a binary dbf file.
File f should be open for writing in a binary mode.
Fieldnames should be no longer than ten characters and not include \x00.
Fieldspecs are in the form (type, size, deci) where
type is one of:
C for ascii character data
M for ascii character memo data (real memo fields not supported)
D for datetime objects
N for ints or decimal objects
L for logical values 'T', 'F', or '?'
size is the field width
deci is the number of decimal places in the provided decimal object
Records can be an iterable over the records (sequences of field values).
"""
# header info
ver = 3
now = datetime.datetime.now()
yr, mon, day = now.year-1900, now.month, now.day
numrec = len(records)
numfields = len(fieldspecs)
lenheader = numfields * 32 + 33
lenrecord = sum(field[1] for field in fieldspecs) + 1
hdr = struct.pack('<BBBBLHH20x', ver, yr, mon, day, numrec, lenheader, lenrecord)
f.write(hdr)
# field specs
for name, (typ, size, deci) in itertools.izip(fieldnames, fieldspecs):
name = name.ljust(11, '\x00')
fld = struct.pack('<11sc4xBB14x', name, typ, size, deci)
f.write(fld)
# terminator
f.write('\r')
# records
for record in records:
f.write(' ') # deletion flag
for (typ, size, deci), value in itertools.izip(fieldspecs, record):
if typ == "N":
value = str(value).rjust(size, ' ')
elif typ == 'D':
value = value.strftime('%Y%m%d')
elif typ == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size, ' ')
assert len(value) == size
f.write(value)
# End of file
f.write('\x1A')
|
seem-sky/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/shelve.py | 83 | """Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
from pickle import Pickler, Unpickler
from io import BytesIO
import collections
__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
class _ClosedDict(collections.MutableMapping):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(collections.MutableMapping):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
self.dict = dict
if protocol is None:
protocol = 3
self._protocol = protocol
self.writeback = writeback
self.cache = {}
self.keyencoding = keyencoding
def __iter__(self):
for k in self.dict.keys():
yield k.decode(self.keyencoding)
def __len__(self):
return len(self.dict)
def __contains__(self, key):
return key.encode(self.keyencoding) in self.dict
def get(self, key, default=None):
if key.encode(self.keyencoding) in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = BytesIO(self.dict[key.encode(self.keyencoding)])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = BytesIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key.encode(self.keyencoding)] = f.getvalue()
def __delitem__(self, key):
del self.dict[key.encode(self.keyencoding)]
try:
del self.cache[key]
except KeyError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.sync()
try:
self.dict.close()
except AttributeError:
pass
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except (NameError, TypeError):
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
# see http://bugs.python.org/issue1339007 for details
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.items():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
Shelf.__init__(self, dict, protocol, writeback, keyencoding)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def next(self):
(key, value) = next(self.dict)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol (0, 1, or 2).
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback)
|
mitocw/edx-platform | refs/heads/master | common/djangoapps/third_party_auth/tests/specs/test_testshib.py | 1 | """
Third_party_auth integration tests using a mock version of the TestShib provider
"""
import datetime
import json
import logging
import os
import unittest
from unittest import skip
import ddt
import httpretty
from django.conf import settings
from django.contrib import auth
from freezegun import freeze_time
from mock import MagicMock, patch
from social_core import actions
from social_django import views as social_views
from social_django.models import UserSocialAuth
from testfixtures import LogCapture
from enterprise.models import EnterpriseCustomerIdentityProvider, EnterpriseCustomerUser
from openedx.core.djangoapps.user_authn.views.login import login_user
from openedx.core.djangoapps.user_api.accounts.settings_views import account_settings_context
from openedx.features.enterprise_support.tests.factories import EnterpriseCustomerFactory
from third_party_auth import pipeline
from third_party_auth.saml import SapSuccessFactorsIdentityProvider, log as saml_log
from third_party_auth.tasks import fetch_saml_metadata
from third_party_auth.tests import testutil, utils
from .base import IntegrationTestMixin
TESTSHIB_ENTITY_ID = 'https://idp.testshib.org/idp/shibboleth'
TESTSHIB_METADATA_URL = 'https://mock.testshib.org/metadata/testshib-providers.xml'
TESTSHIB_METADATA_URL_WITH_CACHE_DURATION = 'https://mock.testshib.org/metadata/testshib-providers-cache.xml'
TESTSHIB_SSO_URL = 'https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO'
class SamlIntegrationTestUtilities(object):
"""
Class contains methods particular to SAML integration testing so that they
can be separated out from the actual test methods.
"""
PROVIDER_ID = "saml-testshib"
PROVIDER_NAME = "TestShib"
PROVIDER_BACKEND = "tpa-saml"
PROVIDER_IDP_SLUG = "testshib"
USER_EMAIL = "myself@testshib.org"
USER_NAME = "Me Myself And I"
USER_USERNAME = "myself"
def setUp(self):
super(SamlIntegrationTestUtilities, self).setUp()
self.enable_saml(
private_key=self._get_private_key(),
public_key=self._get_public_key(),
entity_id="https://saml.example.none",
)
# Mock out HTTP requests that may be made to TestShib:
httpretty.enable()
httpretty.reset()
self.addCleanup(httpretty.reset)
self.addCleanup(httpretty.disable)
def metadata_callback(_request, _uri, headers):
""" Return a cached copy of TestShib's metadata by reading it from disk """
return (200, headers, self.read_data_file('testshib_metadata.xml'))
httpretty.register_uri(httpretty.GET, TESTSHIB_METADATA_URL, content_type='text/xml', body=metadata_callback)
def cache_duration_metadata_callback(_request, _uri, headers):
"""Return a cached copy of TestShib's metadata with a cacheDuration attribute"""
return (200, headers, self.read_data_file('testshib_metadata_with_cache_duration.xml'))
httpretty.register_uri(
httpretty.GET,
TESTSHIB_METADATA_URL_WITH_CACHE_DURATION,
content_type='text/xml',
body=cache_duration_metadata_callback
)
# Configure the SAML library to use the same request ID for every request.
# Doing this and freezing the time allows us to play back recorded request/response pairs
uid_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.generate_unique_id', return_value='TESTID')
uid_patch.start()
self.addCleanup(uid_patch.stop)
self._freeze_time(timestamp=1434326820) # This is the time when the saved request/response was recorded.
def _freeze_time(self, timestamp):
""" Mock the current time for SAML, so we can replay canned requests/responses """
now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)
now_patch.start()
self.addCleanup(now_patch.stop)
def _configure_testshib_provider(self, **kwargs):
""" Enable and configure the TestShib SAML IdP as a third_party_auth provider """
fetch_metadata = kwargs.pop('fetch_metadata', True)
assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)
kwargs.setdefault('name', self.PROVIDER_NAME)
kwargs.setdefault('enabled', True)
kwargs.setdefault('visible', True)
kwargs.setdefault("backend_name", "tpa-saml")
kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
kwargs.setdefault('max_session_length', None)
saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member
if fetch_metadata:
self.assertTrue(httpretty.is_enabled())
num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()
if assert_metadata_updates:
self.assertEqual(num_total, 1)
self.assertEqual(num_skipped, 0)
self.assertEqual(num_attempted, 1)
self.assertEqual(num_updated, 1)
self.assertEqual(num_failed, 0)
self.assertEqual(len(failure_messages), 0)
return saml_provider
def do_provider_login(self, provider_redirect_url):
""" Mocked: the user logs in to TestShib and then gets redirected back """
# The SAML provider (TestShib) will authenticate the user, then get the browser to POST a response:
self.assertTrue(provider_redirect_url.startswith(TESTSHIB_SSO_URL))
saml_response_xml = utils.read_and_pre_process_xml(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'testshib_saml_response.xml')
)
return self.client.post(
self.complete_url,
content_type='application/x-www-form-urlencoded',
data=utils.prepare_saml_response_from_xml(saml_response_xml),
)
@ddt.ddt
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, testutil.AUTH_FEATURES_KEY + ' not enabled')
class TestShibIntegrationTest(SamlIntegrationTestUtilities, IntegrationTestMixin, testutil.SAMLTestCase):
"""
TestShib provider Integration Test, to test SAML functionality
"""
TOKEN_RESPONSE_DATA = {
'access_token': 'access_token_value',
'expires_in': 'expires_in_value',
}
USER_RESPONSE_DATA = {
'lastName': 'lastName_value',
'id': 'id_value',
'firstName': 'firstName_value',
'idp_name': 'testshib',
'attributes': {u'urn:oid:0.9.2342.19200300.100.1.1': [u'myself']}
}
@patch('openedx.features.enterprise_support.api.enterprise_customer_for_request')
@patch('openedx.core.djangoapps.user_api.accounts.settings_views.enterprise_customer_for_request')
def test_full_pipeline_succeeds_for_unlinking_testshib_account(
self,
mock_enterprise_customer_for_request_settings_view,
mock_enterprise_customer_for_request,
):
# First, create, the request and strategy that store pipeline state,
# configure the backend, and mock out wire traffic.
self.provider = self._configure_testshib_provider()
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
request.backend.auth_complete = MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(
strategy, 'user@example.com', 'password', self.get_username())
self.assert_social_auth_exists_for_user(user, strategy)
request.user = user
# We're already logged in, so simulate that the cookie is set correctly
self.set_logged_in_cookies(request)
# linking a learner with enterprise customer.
enterprise_customer = EnterpriseCustomerFactory()
assert EnterpriseCustomerUser.objects.count() == 0, "Precondition check: no link records should exist"
EnterpriseCustomerUser.objects.link_user(enterprise_customer, user.email)
self.assertTrue(
EnterpriseCustomerUser.objects.filter(enterprise_customer=enterprise_customer, user_id=user.id).count() == 1
)
EnterpriseCustomerIdentityProvider.objects.get_or_create(enterprise_customer=enterprise_customer,
provider_id=self.provider.provider_id)
enterprise_customer_data = {
'uuid': enterprise_customer.uuid,
'name': enterprise_customer.name,
'identity_provider': 'saml-default',
}
mock_enterprise_customer_for_request.return_value = enterprise_customer_data
mock_enterprise_customer_for_request_settings_view.return_value = enterprise_customer_data
# Instrument the pipeline to get to the dashboard with the full expected state.
self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN))
actions.do_complete(request.backend, social_views._do_login, # pylint: disable=protected-access
request=request)
with self._patch_edxmako_current_request(strategy.request):
login_user(strategy.request)
actions.do_complete(request.backend, social_views._do_login, user=user, # pylint: disable=protected-access
request=request)
# First we expect that we're in the linked state, with a backend entry.
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=True)
self.assert_social_auth_exists_for_user(request.user, strategy)
FEATURES_WITH_ENTERPRISE_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_ENTERPRISE_ENABLED['ENABLE_ENTERPRISE_INTEGRATION'] = True
with patch.dict("django.conf.settings.FEATURES", FEATURES_WITH_ENTERPRISE_ENABLED):
# Fire off the disconnect pipeline without the user information.
actions.do_disconnect(
request.backend,
None,
None,
redirect_field_name=auth.REDIRECT_FIELD_NAME,
request=request
)
self.assertNotEqual(
EnterpriseCustomerUser.objects.filter(enterprise_customer=enterprise_customer, user_id=user.id).count(),
0
)
# Fire off the disconnect pipeline to unlink.
self.assert_redirect_after_pipeline_completes(
actions.do_disconnect(
request.backend,
user,
None,
redirect_field_name=auth.REDIRECT_FIELD_NAME,
request=request
)
)
# Now we expect to be in the unlinked state, with no backend entry.
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=False)
self.assert_social_auth_does_not_exist_for_user(user, strategy)
self.assertEqual(
EnterpriseCustomerUser.objects.filter(enterprise_customer=enterprise_customer, user_id=user.id).count(),
0
)
def get_response_data(self):
"""Gets dict (string -> object) of merged data about the user."""
response_data = dict(self.TOKEN_RESPONSE_DATA)
response_data.update(self.USER_RESPONSE_DATA)
return response_data
def get_username(self):
response_data = self.get_response_data()
return response_data.get('idp_name')
def test_login_before_metadata_fetched(self):
self._configure_testshib_provider(fetch_metadata=False)
# The user goes to the login page, and sees a button to login with TestShib:
testshib_login_url = self._check_login_page()
# The user clicks on the TestShib button:
try_login_response = self.client.get(testshib_login_url)
# The user should be redirected to back to the login page:
self.assertEqual(try_login_response.status_code, 302)
self.assertEqual(try_login_response['Location'], self.login_page_url)
# When loading the login page, the user will see an error message:
response = self.client.get(self.login_page_url)
self.assertContains(response, 'Authentication with TestShib is currently unavailable.')
def test_login(self):
""" Configure TestShib before running the login test """
self._configure_testshib_provider()
self._test_login()
def test_register(self):
""" Configure TestShib before running the register test """
self._configure_testshib_provider()
self._test_register()
def test_login_records_attributes(self):
"""
Test that attributes sent by a SAML provider are stored in the UserSocialAuth table.
"""
self.test_login()
record = UserSocialAuth.objects.get(
user=self.user, provider=self.PROVIDER_BACKEND, uid__startswith=self.PROVIDER_IDP_SLUG
)
attributes = record.extra_data
self.assertEqual(
attributes.get("urn:oid:1.3.6.1.4.1.5923.1.1.1.9"), ["Member@testshib.org", "Staff@testshib.org"]
)
self.assertEqual(attributes.get("urn:oid:2.5.4.3"), ["Me Myself And I"])
self.assertEqual(attributes.get("urn:oid:0.9.2342.19200300.100.1.1"), ["myself"])
self.assertEqual(attributes.get("urn:oid:2.5.4.20"), ["555-5555"]) # Phone number
@ddt.data(True, False)
def test_debug_mode_login(self, debug_mode_enabled):
""" Test SAML login logs with debug mode enabled or not """
self._configure_testshib_provider(debug_mode=debug_mode_enabled)
with patch.object(saml_log, 'info') as mock_log:
self._test_login()
if debug_mode_enabled:
# We expect that test_login() does two full logins, and each attempt generates two
# logs - one for the request and one for the response
self.assertEqual(mock_log.call_count, 4)
expected_next_url = "/dashboard"
(msg, action_type, idp_name, request_data, next_url, xml), _kwargs = mock_log.call_args_list[0]
self.assertTrue(msg.startswith(u"SAML login %s"))
self.assertEqual(action_type, "request")
self.assertEqual(idp_name, self.PROVIDER_IDP_SLUG)
self.assertDictContainsSubset(
{"idp": idp_name, "auth_entry": "login", "next": expected_next_url},
request_data
)
self.assertEqual(next_url, expected_next_url)
self.assertIn('<samlp:AuthnRequest', xml)
(msg, action_type, idp_name, response_data, next_url, xml), _kwargs = mock_log.call_args_list[1]
self.assertTrue(msg.startswith(u"SAML login %s"))
self.assertEqual(action_type, "response")
self.assertEqual(idp_name, self.PROVIDER_IDP_SLUG)
self.assertDictContainsSubset({"RelayState": idp_name}, response_data)
self.assertIn('SAMLResponse', response_data)
self.assertEqual(next_url, expected_next_url)
self.assertIn('<saml2p:Response', xml)
else:
self.assertFalse(mock_log.called)
def test_configure_testshib_provider_with_cache_duration(self):
""" Enable and configure the TestShib SAML IdP as a third_party_auth provider """
kwargs = {}
kwargs.setdefault('name', self.PROVIDER_NAME)
kwargs.setdefault('enabled', True)
kwargs.setdefault('visible', True)
kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
self.configure_saml_provider(**kwargs)
self.assertTrue(httpretty.is_enabled())
num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()
self.assertEqual(num_total, 1)
self.assertEqual(num_skipped, 0)
self.assertEqual(num_attempted, 1)
self.assertEqual(num_updated, 1)
self.assertEqual(num_failed, 0)
self.assertEqual(len(failure_messages), 0)
def test_login_with_testshib_provider_short_session_length(self):
"""
Test that when we have a TPA provider which as an explicit maximum
session length set, waiting for longer than that between requests
results in us being logged out.
"""
# Configure the provider with a 10-second timeout
self._configure_testshib_provider(max_session_length=10)
now = datetime.datetime.utcnow()
with freeze_time(now):
# Test the login flow, adding the user in the process
self._test_login()
# Wait 30 seconds; longer than the manually-set 10-second timeout
later = now + datetime.timedelta(seconds=30)
with freeze_time(later):
# Test returning as a logged in user; this method verifies that we're logged out first.
self._test_return_login(previous_session_timed_out=True)
@unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, testutil.AUTH_FEATURES_KEY + ' not enabled')
class SuccessFactorsIntegrationTest(SamlIntegrationTestUtilities, IntegrationTestMixin, testutil.SAMLTestCase):
"""
Test basic SAML capability using the TestShib details, and then check that we're able
to make the proper calls using the SAP SuccessFactors API.
"""
# Note that these details are different than those that will be provided by the SAML
# assertion metadata. Rather, they will be fetched from the mocked SAPSuccessFactors API.
USER_EMAIL = "john@smith.com"
USER_NAME = "John Smith"
USER_USERNAME = "John"
def setUp(self):
"""
Mock out HTTP calls to various endpoints using httpretty.
"""
super(SuccessFactorsIntegrationTest, self).setUp()
# Mock the call to the SAP SuccessFactors assertion endpoint
SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'
def assertion_callback(_request, _uri, headers):
"""
Return a fake assertion after checking that the input is what we expect.
"""
self.assertIn(b'private_key=fake_private_key_here', _request.body)
self.assertIn(b'user_id=myself', _request.body)
self.assertIn(b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken', _request.body)
self.assertIn(b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB', _request.body)
return (200, headers, 'fake_saml_assertion')
httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)
SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'
def bad_callback(_request, _uri, headers):
"""
Return a 404 error when someone tries to call the URL.
"""
return (404, headers, 'NOT AN ASSERTION')
httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)
# Mock the call to the SAP SuccessFactors token endpoint
SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'
def token_callback(_request, _uri, headers):
"""
Return a fake assertion after checking that the input is what we expect.
"""
self.assertIn(b'assertion=fake_saml_assertion', _request.body)
self.assertIn(b'company_id=NCC1701D', _request.body)
self.assertIn(b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer', _request.body)
self.assertIn(b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB', _request.body)
return (200, headers, '{"access_token": "faketoken"}')
httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)
# Mock the call to the SAP SuccessFactors OData user endpoint
ODATA_USER_URL = (
'http://api.successfactors.com/odata/v2/User(userId=\'myself\')'
'?$select=firstName,lastName,defaultFullName,email'
)
def user_callback(request, _uri, headers):
auth_header = request.headers.get('Authorization')
self.assertEqual(auth_header, 'Bearer faketoken')
return (
200,
headers,
json.dumps({
'd': {
'username': 'jsmith',
'firstName': 'John',
'lastName': 'Smith',
'defaultFullName': 'John Smith',
'email': 'john@smith.com',
'country': 'Australia',
}
})
)
httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)
def _mock_odata_api_for_error(self, odata_api_root_url, username):
"""
Mock an error response when calling the OData API for user details.
"""
def callback(request, uri, headers):
"""
Return a 500 error when someone tries to call the URL.
"""
headers['CorrelationId'] = 'aefd38b7-c92c-445a-8c7a-487a3f0c7a9d'
headers['RequestNo'] = '[787177]' # This is the format SAPSF returns for the transaction request number
return 500, headers, 'Failure!'
fields = ','.join(SapSuccessFactorsIdentityProvider.default_field_mapping.copy())
url = '{root_url}User(userId=\'{user_id}\')?$select={fields}'.format(
root_url=odata_api_root_url,
user_id=username,
fields=fields,
)
httpretty.register_uri(httpretty.GET, url, body=callback, content_type='application/json')
return url
def test_register_insufficient_sapsf_metadata(self):
"""
Configure the provider such that it doesn't have enough details to contact the SAP
SuccessFactors API, and test that it falls back to the data it receives from the SAML assertion.
"""
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings='{"key_i_dont_need":"value_i_also_dont_need"}',
)
# Because we're getting details from the assertion, fall back to the initial set of details.
self.USER_EMAIL = "myself@testshib.org"
self.USER_NAME = "Me Myself And I"
self.USER_USERNAME = "myself"
self._test_register()
@patch.dict('django.conf.settings.REGISTRATION_EXTRA_FIELDS', country='optional')
def test_register_sapsf_metadata_present(self):
"""
Configure the provider such that it can talk to a mocked-out version of the SAP SuccessFactors
API, and ensure that the data it gets that way gets passed to the registration form.
Check that value mappings overrides work in cases where we override a value other than
what we're looking for, and when an empty override is provided (expected behavior is that
existing value maps will be left alone).
"""
expected_country = 'AU'
provider_settings = {
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
}
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps(provider_settings)
)
self._test_register(country=expected_country)
def test_register_sapsf_with_value_default(self):
"""
Configure the provider such that it can talk to a mocked-out version of the SAP SuccessFactors
API, and ensure that the data it gets that way gets passed to the registration form.
Check that value mappings overrides work in cases where we override a value other than
what we're looking for, and when an empty override is provided it should use the default value
provided by the configuration.
"""
# Mock the call to the SAP SuccessFactors OData user endpoint
ODATA_USER_URL = (
'http://api.successfactors.com/odata/v2/User(userId=\'myself\')'
'?$select=firstName,country,lastName,defaultFullName,email'
)
def user_callback(request, _uri, headers):
auth_header = request.headers.get('Authorization')
self.assertEqual(auth_header, 'Bearer faketoken')
return (
200,
headers,
json.dumps({
'd': {
'username': 'jsmith',
'firstName': 'John',
'lastName': 'Smith',
'defaultFullName': 'John Smith',
'country': 'Australia'
}
})
)
httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)
provider_settings = {
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
}
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps(provider_settings),
default_email='default@testshib.org'
)
self.USER_EMAIL = 'default@testshib.org'
self._test_register()
@patch.dict('django.conf.settings.REGISTRATION_EXTRA_FIELDS', country='optional')
def test_register_sapsf_metadata_present_override_relevant_value(self):
"""
Configure the provider such that it can talk to a mocked-out version of the SAP SuccessFactors
API, and ensure that the data it gets that way gets passed to the registration form.
Check that value mappings overrides work in cases where we override a value other than
what we're looking for, and when an empty override is provided (expected behavior is that
existing value maps will be left alone).
"""
value_map = {'country': {'Australia': 'NZ'}}
expected_country = 'NZ'
provider_settings = {
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
}
if value_map:
provider_settings['sapsf_value_mappings'] = value_map
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps(provider_settings)
)
self._test_register(country=expected_country)
@patch.dict('django.conf.settings.REGISTRATION_EXTRA_FIELDS', country='optional')
def test_register_sapsf_metadata_present_override_other_value(self):
"""
Configure the provider such that it can talk to a mocked-out version of the SAP SuccessFactors
API, and ensure that the data it gets that way gets passed to the registration form.
Check that value mappings overrides work in cases where we override a value other than
what we're looking for, and when an empty override is provided (expected behavior is that
existing value maps will be left alone).
"""
value_map = {'country': {'United States': 'blahfake'}}
expected_country = 'AU'
provider_settings = {
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
}
if value_map:
provider_settings['sapsf_value_mappings'] = value_map
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps(provider_settings)
)
self._test_register(country=expected_country)
@patch.dict('django.conf.settings.REGISTRATION_EXTRA_FIELDS', country='optional')
def test_register_sapsf_metadata_present_empty_value_override(self):
"""
Configure the provider such that it can talk to a mocked-out version of the SAP SuccessFactors
API, and ensure that the data it gets that way gets passed to the registration form.
Check that value mappings overrides work in cases where we override a value other than
what we're looking for, and when an empty override is provided (expected behavior is that
existing value maps will be left alone).
"""
value_map = {'country': {}}
expected_country = 'AU'
provider_settings = {
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
}
if value_map:
provider_settings['sapsf_value_mappings'] = value_map
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps(provider_settings)
)
self._test_register(country=expected_country)
def test_register_http_failure(self):
"""
Ensure that if there's an HTTP failure while fetching metadata, we continue, using the
metadata from the SAML assertion.
"""
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps({
'sapsf_oauth_root_url': 'http://successfactors.com/oauth-fake/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
})
)
# Because we're getting details from the assertion, fall back to the initial set of details.
self.USER_EMAIL = "myself@testshib.org"
self.USER_NAME = "Me Myself And I"
self.USER_USERNAME = "myself"
self._test_register()
def test_register_http_failure_in_odata(self):
"""
Ensure that if there's an HTTP failure while fetching user details from
SAP SuccessFactors OData API.
"""
# Because we're getting details from the assertion, fall back to the initial set of details.
self.USER_EMAIL = "myself@testshib.org"
self.USER_NAME = "Me Myself And I"
self.USER_USERNAME = "myself"
odata_company_id = 'NCC1701D'
odata_api_root_url = 'http://api.successfactors.com/odata/v2/'
mocked_odata_api_url = self._mock_odata_api_for_error(odata_api_root_url, self.USER_USERNAME)
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps({
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': odata_api_root_url,
'odata_company_id': odata_company_id,
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
})
)
with LogCapture(level=logging.WARNING) as log_capture:
self._test_register()
logging_messages = str([log_msg.getMessage() for log_msg in log_capture.records]).replace('\\', '')
self.assertIn(odata_company_id, logging_messages)
self.assertIn(mocked_odata_api_url, logging_messages)
self.assertIn(self.USER_USERNAME, logging_messages)
self.assertIn("SAPSuccessFactors", logging_messages)
self.assertIn("Error message", logging_messages)
self.assertIn("System message", logging_messages)
self.assertIn("Headers", logging_messages)
@skip('Test not necessary for this subclass')
def test_get_saml_idp_class_with_fake_identifier(self):
pass
@skip('Test not necessary for this subclass')
def test_login(self):
pass
@skip('Test not necessary for this subclass')
def test_register(self):
pass
|
bobobox/ansible | refs/heads/devel | lib/ansible/modules/packaging/language/npm.py | 16 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chris Hoffman <christopher.hoffman@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: npm
short_description: Manage node.js packages with npm
description:
- Manage node.js packages with Node Package Manager (npm)
version_added: 1.2
author: "Chris Hoffman (@chrishoffman)"
options:
name:
description:
- The name of a node.js library to install
required: false
path:
description:
- The base path where to install the node.js libraries
required: false
version:
description:
- The version to be installed
required: false
global:
description:
- Install the node.js library globally
required: false
default: no
choices: [ "yes", "no" ]
executable:
description:
- The executable location for npm.
- This is useful if you are using a version manager, such as nvm
required: false
ignore_scripts:
description:
- Use the --ignore-scripts flag when installing.
required: false
choices: [ "yes", "no" ]
default: no
version_added: "1.8"
production:
description:
- Install dependencies in production mode, excluding devDependencies
required: false
choices: [ "yes", "no" ]
default: no
registry:
description:
- The registry to install modules from.
required: false
version_added: "1.6"
state:
description:
- The state of the node.js library
required: false
default: present
choices: [ "present", "absent", "latest" ]
'''
EXAMPLES = '''
- name: Install "coffee-script" node.js package.
npm:
name: coffee-script
path: /app/location
- name: Install "coffee-script" node.js package on version 1.6.1.
npm:
name: coffee-script
version: '1.6.1'
path: /app/location
- name: Install "coffee-script" node.js package globally.
npm:
name: coffee-script
global: yes
- name: Remove the globally package "coffee-script".
npm:
name: coffee-script
global: yes
state: absent
- name: Install "coffee-script" node.js package from custom registry.
npm:
name: coffee-script
registry: 'http://registry.mysite.com'
- name: Install packages based on package.json.
npm:
path: /app/location
- name: Update packages based on package.json to their latest version.
npm:
path: /app/location
state: latest
- name: Install packages based on package.json using the npm installed with nvm v0.10.1.
npm:
path: /app/location
executable: /opt/nvm/v0.10.1/bin/npm
state: present
'''
import os
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
class Npm(object):
def __init__(self, module, **kwargs):
self.module = module
self.glbl = kwargs['glbl']
self.name = kwargs['name']
self.version = kwargs['version']
self.path = kwargs['path']
self.registry = kwargs['registry']
self.production = kwargs['production']
self.ignore_scripts = kwargs['ignore_scripts']
if kwargs['executable']:
self.executable = kwargs['executable'].split(' ')
else:
self.executable = [module.get_bin_path('npm', True)]
if kwargs['version']:
self.name_version = self.name + '@' + str(self.version)
else:
self.name_version = self.name
def _exec(self, args, run_in_check_mode=False, check_rc=True):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = self.executable + args
if self.glbl:
cmd.append('--global')
if self.production:
cmd.append('--production')
if self.ignore_scripts:
cmd.append('--ignore-scripts')
if self.name:
cmd.append(self.name_version)
if self.registry:
cmd.append('--registry')
cmd.append(self.registry)
#If path is specified, cd into that path and run the command.
cwd = None
if self.path:
if not os.path.exists(self.path):
os.makedirs(self.path)
if not os.path.isdir(self.path):
self.module.fail_json(msg="path %s is not a directory" % self.path)
cwd = self.path
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
return out
return ''
def list(self):
cmd = ['list', '--json']
installed = list()
missing = list()
data = json.loads(self._exec(cmd, True, False))
if 'dependencies' in data:
for dep in data['dependencies']:
if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
missing.append(dep)
elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
missing.append(dep)
else:
installed.append(dep)
if self.name and self.name not in installed:
missing.append(self.name)
#Named dependency not installed
else:
missing.append(self.name)
return installed, missing
def install(self):
return self._exec(['install'])
def update(self):
return self._exec(['update'])
def uninstall(self):
return self._exec(['uninstall'])
def list_outdated(self):
outdated = list()
data = self._exec(['outdated'], True, False)
for dep in data.splitlines():
if dep:
# node.js v0.10.22 changed the `npm outdated` module separator
# from "@" to " ". Split on both for backwards compatibility.
pkg, other = re.split('\s|@', dep, 1)
outdated.append(pkg)
return outdated
def main():
arg_spec = dict(
name=dict(default=None),
path=dict(default=None, type='path'),
version=dict(default=None),
production=dict(default='no', type='bool'),
executable=dict(default=None, type='path'),
registry=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
)
arg_spec['global'] = dict(default='no', type='bool')
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
path = module.params['path']
version = module.params['version']
glbl = module.params['global']
production = module.params['production']
executable = module.params['executable']
registry = module.params['registry']
state = module.params['state']
ignore_scripts = module.params['ignore_scripts']
if not path and not glbl:
module.fail_json(msg='path must be specified when not using global')
if state == 'absent' and not name:
module.fail_json(msg='uninstalling a package is only available for named packages')
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \
executable=executable, registry=registry, ignore_scripts=ignore_scripts)
changed = False
if state == 'present':
installed, missing = npm.list()
if len(missing):
changed = True
npm.install()
elif state == 'latest':
installed, missing = npm.list()
outdated = npm.list_outdated()
if len(missing):
changed = True
npm.install()
if len(outdated):
changed = True
npm.update()
else: #absent
installed, missing = npm.list()
if name in installed:
changed = True
npm.uninstall()
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
beatrizChagas/scrm-solutions | refs/heads/master | extracao/processamentoEmoji/emoji-master/emoji/core.py | 1 | # -*- coding: UTF-8 -*-
"""
emoji.core
~~~~~~~~~~
Core components for emoji.
"""
import re
import sys
from emoji import unicode_codes
__all__ = ['emojize', 'demojize', 'get_emoji_regexp']
PY2 = sys.version_info[0] is 2
_EMOJI_REGEXP = None
def emojize(string, use_aliases=False):
"""Replace emoji names in a string with unicode codes.
:param string: String contains emoji names.
:param use_aliases: (optional) Enable emoji aliases. See ``emoji.UNICODE_EMOJI_ALIAS``.
>>> import emoji
>>> print(emoji.emojize("Python is fun :thumbsup:", use_aliases=True))
Python is fun �?
>>> print(emoji.emojize("Python is fun :thumbs_up_sign:"))
Python is fun �?
"""
pattern = re.compile(u'(:[a-zA-Z0-9\+\-_&.ô’Åéãíç]+:)')
def replace(match):
if use_aliases:
return unicode_codes.EMOJI_ALIAS_UNICODE.get(match.group(1), match.group(1))
else:
return unicode_codes.EMOJI_UNICODE.get(match.group(1), match.group(1))
return pattern.sub(replace, string)
def demojize(string):
"""Replace unicode emoji in a string with emoji shortcodes. Useful for storage.
:param string: String contains unicode characters. MUST BE UNICODE.
>>> import emoji
>>> print(emoji.emojize("Python is fun :thumbs_up_sign:"))
Python is fun �?
>>> print(emoji.demojize(u"Python is fun �?"))
Python is fun :thumbs_up_sign:
>>> print(emoji.demojize("Unicode is tricky 😯".decode('utf-8')))
Unicode is tricky :hushed_face:
"""
def replace(match):
val = unicode_codes.UNICODE_EMOJI.get(match.group(0), match.group(0))
return val
return get_emoji_regexp().sub(replace, string)
def get_emoji_regexp():
"""Returns compiled regular expression that matches emojis defined in
``emoji.UNICODE_EMOJI_ALIAS``. The regular expression is only compiled once.
"""
global _EMOJI_REGEXP
# Build emoji regexp once
if _EMOJI_REGEXP is None:
# Sort emojis by length to make sure mulit-character emojis are
# matched first
emojis = sorted(unicode_codes.EMOJI_UNICODE.values(), key=len,
reverse=True)
pattern = u'(' + u'|'.join(re.escape(u) for u in emojis) + u')'
_EMOJI_REGEXP = re.compile(pattern)
return _EMOJI_REGEXP
|
dannybrowne86/django-bootstrap-calendar | refs/heads/master | django_bootstrap_calendar/__init__.py | 143 | __version__ = '0.1.0' |
grimoirelab/GrimoireELK | refs/heads/master | utils/gh2arthur.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import argparse
import json
import logging
import sys
from datetime import datetime
from os import path
from time import sleep
import MySQLdb
import requests
from dateutil import parser
from grimoire_elk.elastic import ElasticSearch, ElasticConnectException
from grimoire_elk.utils import config_logging
GITHUB_URL = "https://github.com/"
GITHUB_API_URL = "https://api.github.com"
NREPOS = 0 # Default number of repos to be analyzed: all
CAULDRON_DASH_URL = "https://cauldron.io/dashboards"
GIT_CLONE_DIR = "/tmp"
OCEAN_INDEX = "ocean"
PERCEVAL_BACKEND = "git"
PROJECTS_DS = "scm"
def get_params_parser():
"""Parse command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--elastic_url", default="http://127.0.0.1:9200",
help="Host with elastic search (default: http://127.0.0.1:9200)")
parser.add_argument('-g', '--debug', dest='debug', action='store_true')
parser.add_argument('-t', '--token', dest='token', help="GitHub token")
parser.add_argument('-o', '--org', dest='org', nargs='*', help='GitHub Organization/s to be analyzed')
parser.add_argument('-l', '--list', dest='list', action='store_true', help='Just list the repositories')
parser.add_argument('-n', '--nrepos', dest='nrepos', type=int, default=NREPOS,
help='Number of GitHub repositories from the Organization to be analyzed (default:0, no limit)')
parser.add_argument('--db-projects-map', help="Database to include the projects Mapping DB")
return parser
def get_params():
parser = get_params_parser()
args = parser.parse_args()
if not args.org or not args.token:
parser.error("token and org params must be provided.")
sys.exit(1)
return args
def get_payload():
# 100 max in repos
payload = {'per_page': 100,
'fork': False,
'sort': 'updated', # does not work in repos listing
'direction': 'desc'}
return payload
def get_headers(token):
headers = {'Authorization': 'token ' + token}
return headers
def get_owner_repos_url(owner, token):
""" The owner could be a org or a user.
It waits if need to have rate limit.
Also it fixes a djando issue changing - with _
"""
url_org = GITHUB_API_URL + "/orgs/" + owner + "/repos"
url_user = GITHUB_API_URL + "/users/" + owner + "/repos"
url_owner = url_org # Use org by default
try:
r = requests.get(url_org,
params=get_payload(),
headers=get_headers(token))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if r.status_code == 403:
rate_limit_reset_ts = datetime.fromtimestamp(int(r.headers['X-RateLimit-Reset']))
seconds_to_reset = (rate_limit_reset_ts - datetime.utcnow()).seconds + 1
logging.info("GitHub rate limit exhausted. Waiting %i secs for rate limit reset." % (seconds_to_reset))
sleep(seconds_to_reset)
else:
# owner is not an org, try with a user
url_owner = url_user
return url_owner
def get_repositores(owner_url, token, nrepos):
""" owner could be an org or and user """
all_repos = []
url = owner_url
while True:
logging.debug("Getting repos from: %s" % (url))
try:
r = requests.get(url,
params=get_payload(),
headers=get_headers(token))
r.raise_for_status()
all_repos += r.json()
logging.debug("Rate limit: %s" % (r.headers['X-RateLimit-Remaining']))
if 'next' not in r.links:
break
url = r.links['next']['url'] # Loving requests :)
except requests.exceptions.ConnectionError:
logging.error("Can not connect to GitHub")
break
# Remove forks
nrepos_recent = [repo for repo in all_repos if not repo['fork']]
# Sort by updated_at and limit to nrepos
nrepos_sorted = sorted(nrepos_recent, key=lambda repo: parser.parse(repo['updated_at']), reverse=True)
if nrepos > 0:
nrepos_sorted = nrepos_sorted[0:nrepos]
# First the small repositories to feedback the user quickly
nrepos_sorted = sorted(nrepos_sorted, key=lambda repo: repo['size'])
for repo in nrepos_sorted:
logging.debug("%s %i %s" % (repo['updated_at'], repo['size'], repo['name']))
return nrepos_sorted
def create_projects_schema(cursor):
project_table = """
CREATE TABLE projects (
project_id int(11) NOT NULL AUTO_INCREMENT,
id varchar(255) NOT NULL,
title varchar(255) NOT NULL,
PRIMARY KEY (project_id)
) ENGINE=MyISAM DEFAULT CHARSET=utf8
"""
project_repositories_table = """
CREATE TABLE project_repositories (
project_id int(11) NOT NULL,
data_source varchar(32) NOT NULL,
repository_name varchar(255) NOT NULL,
UNIQUE (project_id, data_source, repository_name)
) ENGINE=MyISAM DEFAULT CHARSET=utf8
"""
project_children_table = """
CREATE TABLE project_children (
project_id int(11) NOT NULL,
subproject_id int(11) NOT NULL,
UNIQUE (project_id, subproject_id)
) ENGINE=MyISAM DEFAULT CHARSET=utf8
"""
# The data in tables is created automatically.
# No worries about dropping tables.
cursor.execute("DROP TABLE IF EXISTS projects")
cursor.execute("DROP TABLE IF EXISTS project_repositories")
cursor.execute("DROP TABLE IF EXISTS project_children")
cursor.execute(project_table)
cursor.execute(project_repositories_table)
cursor.execute(project_children_table)
def insert_projects_mapping(db_projects_map, project, repositories):
try:
db = MySQLdb.connect(user="root", passwd="", host="mariadb",
db=db_projects_map)
except Exception:
# Try to create the database and the tables
db = MySQLdb.connect(user="root", passwd="", host="mariadb")
cursor = db.cursor()
cursor.execute("CREATE DATABASE %s CHARACTER SET utf8" % (db_projects_map))
db = MySQLdb.connect(user="root", passwd="", host="mariadb",
db=db_projects_map)
cursor = db.cursor()
create_projects_schema(cursor)
cursor = db.cursor()
# Insert the project in projects
query = "INSERT INTO projects (id, title) VALUES (%s, %s)"
q = "INSERT INTO projects (title, id) values (%s, %s)"
cursor.execute(q, (project, project))
project_id = db.insert_id()
# Insert its repositories in project_repositories
for repo in repositories:
repo_url = repo['clone_url']
q = "INSERT INTO project_repositories (project_id, data_source, repository_name) VALUES (%s, %s, %s)"
cursor.execute(q, (project_id, PROJECTS_DS, repo_url))
db.close()
if __name__ == '__main__':
"""GitHub to Kibana"""
task_init = datetime.now()
arthur_repos = {"repositories": []}
args = get_params()
config_logging(args.debug)
total_repos = 0
# enrich ocean
index_enrich = OCEAN_INDEX + "_" + PERCEVAL_BACKEND + "_enrich"
es_enrich = None
try:
es_enrich = ElasticSearch(args.elastic_url, index_enrich)
except ElasticConnectException:
logging.error("Can't connect to Elastic Search. Is it running?")
# The owner could be an org or an user.
for org in args.org:
owner_url = get_owner_repos_url(org, args.token)
try:
repos = get_repositores(owner_url, args.token, args.nrepos)
except requests.exceptions.HTTPError:
logging.error("Can't get repos for %s" % (owner_url))
continue
if args.db_projects_map:
insert_projects_mapping(args.db_projects_map, org, repos)
for repo in repos:
repo_url = repo['clone_url']
origin = repo_url
clone_dir = path.join(GIT_CLONE_DIR, repo_url.replace("/", "_"))
filter_ = {"name": "origin", "value": origin}
last_update = None
if es_enrich:
last_update = es_enrich.get_last_date("metadata__updated_on", filter_)
if last_update:
last_update = last_update.isoformat()
repo_args = {
"gitpath": clone_dir,
"uri": repo_url,
"cache": False
}
if last_update:
repo_args["from_date"] = last_update
arthur_repos["repositories"].append({
"args": repo_args,
"backend": PERCEVAL_BACKEND,
"origin": repo_url,
"elastic_index": PERCEVAL_BACKEND
})
total_repos += len(repos)
logging.debug("Total repos listed: %i" % (total_repos))
print(json.dumps(arthur_repos, indent=4, sort_keys=True))
|
obi-two/Rebelion | refs/heads/master | data/scripts/templates/object/draft_schematic/space/weapon/missile/shared_countermeasure_confuser_launcher.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/weapon/missile/shared_countermeasure_confuser_launcher.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
willrogerpereira/willbuyer | refs/heads/master | willstores/willstores/errors/nocontent_error.py | 1 | from willstores.errors import Error
class NoContentError(Error):
def __init__(self, message):
self.__message = message
def __str__(self):
return self.__message |
cshallue/models | refs/heads/master | research/slim/nets/inception_v1.py | 3 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v1_base(inputs,
final_endpoint='Mixed_5c',
scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1',
global_pool=False):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='AvgPool_0a_7x7')
end_points['AvgPool_0a_7x7'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
inception_v1_arg_scope = inception_utils.inception_arg_scope
|
ApuliaSoftware/multi_carrier | refs/heads/master | account/account.py | 1 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Andre@ (<a.gallina@cgsoftware.it>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
class account_invoice(osv.osv):
_inherit = "account.invoice"
_columns = {
'carrier_ids': fields.many2many(
'delivery.carrier',
'invoice_carrier_rel',
'invoice_id',
'carrier_id',
'Related Carrier'),
}
|
quinot/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_wait.py | 23 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_wait
short_description: Wait for a BIG-IP condition before continuing
description:
- You can wait for BIG-IP to be "ready". By "ready", we mean that BIG-IP is ready
to accept configuration.
- This module can take into account situations where the device is in the middle
of rebooting due to a configuration change.
version_added: "2.5"
options:
timeout:
description:
- Maximum number of seconds to wait for.
- When used without other conditions it is equivalent of just sleeping.
- The default timeout is deliberately set to 2 hours because no individual
REST API.
default: 7200
delay:
description:
- Number of seconds to wait before starting to poll.
default: 0
sleep:
default: 1
description:
- Number of seconds to sleep between checks, before 2.3 this was hardcoded to 1 second.
msg:
description:
- This overrides the normal error message from a failure to meet the required conditions.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Wait for BIG-IP to be ready to take configuration
bigip_wait:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Wait a maximum of 300 seconds for BIG-IP to be ready to take configuration
bigip_wait:
timeout: 300
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Wait for BIG-IP to be ready, don't start checking for 10 seconds
bigip_wait:
delay: 10
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import datetime
import signal
import time
from ansible.module_utils.basic import AnsibleModule
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
def hard_timeout(module, want, start):
elapsed = datetime.datetime.utcnow() - start
module.fail_json(
want.msg or "Timeout when waiting for BIG-IP", elapsed=elapsed.seconds
)
class Parameters(AnsibleF5Parameters):
returnables = [
'elapsed'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
@property
def delay(self):
if self._values['delay'] is None:
return None
return int(self._values['delay'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def sleep(self):
if self._values['sleep'] is None:
return None
return int(self._values['sleep'])
class Changes(Parameters):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def exec_module(self):
result = dict()
try:
changed = self.execute()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def execute(self):
signal.signal(
signal.SIGALRM,
lambda sig, frame: hard_timeout(self.module, self.want, start)
)
# setup handler before scheduling signal, to eliminate a race
signal.alarm(int(self.want.timeout))
start = datetime.datetime.utcnow()
if self.want.delay:
time.sleep(float(self.want.delay))
end = start + datetime.timedelta(seconds=int(self.want.timeout))
while datetime.datetime.utcnow() < end:
time.sleep(int(self.want.sleep))
try:
# The first test verifies that the REST API is available; this is done
# by repeatedly trying to login to it.
self.client = F5Client(**self.module.params)
if not self.client:
continue
if self._device_is_rebooting():
# Wait for the reboot to happen and then start from the beginning
# of the waiting.
continue
if self._is_mprov_running_on_device():
self._wait_for_module_provisioning()
break
except Exception:
# The types of exception's we're handling here are "REST API is not
# ready" exceptions.
#
# For example,
#
# Typically caused by device starting up:
#
# icontrol.exceptions.iControlUnexpectedHTTPError: 404 Unexpected Error:
# Not Found for uri: https://localhost:10443/mgmt/tm/sys/
# icontrol.exceptions.iControlUnexpectedHTTPError: 503 Unexpected Error:
# Service Temporarily Unavailable for uri: https://localhost:10443/mgmt/tm/sys/
#
#
# Typically caused by a device being down
#
# requests.exceptions.SSLError: HTTPSConnectionPool(host='localhost', port=10443):
# Max retries exceeded with url: /mgmt/tm/sys/ (Caused by SSLError(
# SSLError("bad handshake: SysCallError(-1, 'Unexpected EOF')",),))
#
#
# Typically caused by device still booting
#
# raise SSLError(e, request=request)\nrequests.exceptions.SSLError:
# HTTPSConnectionPool(host='localhost', port=10443): Max retries
# exceeded with url: /mgmt/shared/authn/login (Caused by
# SSLError(SSLError(\"bad handshake: SysCallError(-1, 'Unexpected EOF')\",),)),
continue
else:
elapsed = datetime.datetime.utcnow() - start
self.module.fail_json(
msg=self.want.msg or "Timeout when waiting for BIG-IP", elapsed=elapsed.seconds
)
elapsed = datetime.datetime.utcnow() - start
self.changes.update({'elapsed': elapsed.seconds})
return False
def _device_is_rebooting(self):
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "runlevel"'
)
try:
if '6' in output.commandResult:
return True
except AttributeError:
return False
def _wait_for_module_provisioning(self):
# To prevent things from running forever, the hack is to check
# for mprov's status twice. If mprov is finished, then in most
# cases (not ASM) the provisioning is probably ready.
nops = 0
# Sleep a little to let provisioning settle and begin properly
time.sleep(5)
while nops < 4:
try:
if not self._is_mprov_running_on_device():
nops += 1
else:
nops = 0
except Exception:
# This can be caused by restjavad restarting.
pass
time.sleep(10)
def _is_mprov_running_on_device(self):
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "ps aux | grep \'[m]prov\'"'
)
if hasattr(output, 'commandResult'):
return True
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
timeout=dict(default=7200, type='int'),
delay=dict(default=0, type='int'),
sleep=dict(default=1, type='int'),
msg=dict()
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
GitAngel/django | refs/heads/master | tests/migrations/test_migrations_run_before/0003_third.py | 427 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
"""
This is a wee bit crazy, but it's just to show that run_before works.
"""
dependencies = [
("migrations", "0001_initial"),
]
run_before = [
("migrations", "0002_second"),
]
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
],
)
]
|
djrobstep/schemainspect | refs/heads/master | schemainspect/pg/obj.py | 1 | from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import sys
from collections import OrderedDict as od
from itertools import groupby
from sqlalchemy import text
from ..inspected import ColumnInfo, Inspected
from ..inspected import InspectedSelectable as BaseInspectedSelectable
from ..inspected import TableRelated
from ..inspector import DBInspector
from ..misc import quoted_identifier, resource_text
CREATE_TABLE = """create {}table {} ({}
){}{};
"""
CREATE_TABLE_SUBCLASS = """create {}table {} partition of {} {};
"""
CREATE_FUNCTION_FORMAT = """create or replace function {signature}
returns {result_string} as
$${definition}$$
language {language} {volatility} {strictness} {security_type};"""
ALL_RELATIONS_QUERY = resource_text("sql/relations.sql")
ALL_RELATIONS_QUERY_9 = resource_text("sql/relations9.sql")
SCHEMAS_QUERY = resource_text("sql/schemas.sql")
INDEXES_QUERY = resource_text("sql/indexes.sql")
SEQUENCES_QUERY = resource_text("sql/sequences.sql")
CONSTRAINTS_QUERY = resource_text("sql/constraints.sql")
FUNCTIONS_QUERY = resource_text("sql/functions.sql")
TYPES_QUERY = resource_text("sql/types.sql")
DOMAINS_QUERY = resource_text("sql/domains.sql")
EXTENSIONS_QUERY = resource_text("sql/extensions.sql")
ENUMS_QUERY = resource_text("sql/enums.sql")
DEPS_QUERY = resource_text("sql/deps.sql")
PRIVILEGES_QUERY = resource_text("sql/privileges.sql")
TRIGGERS_QUERY = resource_text("sql/triggers.sql")
COLLATIONS_QUERY = resource_text("sql/collations.sql")
COLLATIONS_QUERY_9 = resource_text("sql/collations9.sql")
RLSPOLICIES_QUERY = resource_text("sql/rlspolicies.sql")
class InspectedSelectable(BaseInspectedSelectable):
def has_compatible_columns(self, other):
def names_and_types(cols):
return [(k, c.dbtype) for k, c in cols.items()]
items = names_and_types(self.columns)
if self.relationtype != "f":
old_arg_count = len(other.columns)
items = items[:old_arg_count]
return items == names_and_types(other.columns)
def can_replace(self, other):
if not (self.relationtype in ("v", "f") or self.is_table):
return False
if self.signature != other.signature:
return False
if self.relationtype != other.relationtype:
return False
return self.has_compatible_columns(other)
@property
def persistence_modifier(self):
if self.persistence == "t":
return "temporary "
elif self.persistence == "u":
return "unlogged "
else:
return ""
@property
def is_unlogged(self):
return self.persistence == "u"
@property
def create_statement(self):
n = self.quoted_full_name
if self.relationtype in ("r", "p"):
if not self.is_partitioning_child_table:
colspec = ",\n".join(
" " + c.creation_clause for c in self.columns.values()
)
if colspec:
colspec = "\n" + colspec
if self.partition_def:
partition_key = " partition by " + self.partition_def
inherits_clause = ""
elif self.parent_table:
inherits_clause = " inherits ({})".format(self.parent_table)
partition_key = ""
else:
partition_key = ""
inherits_clause = ""
create_statement = CREATE_TABLE.format(
self.persistence_modifier,
n,
colspec,
partition_key,
inherits_clause,
)
else:
create_statement = CREATE_TABLE_SUBCLASS.format(
self.persistence_modifier, n, self.parent_table, self.partition_def
)
elif self.relationtype == "v":
create_statement = "create or replace view {} as {}\n".format(
n, self.definition
)
elif self.relationtype == "m":
create_statement = "create materialized view {} as {}\n".format(
n, self.definition
)
elif self.relationtype == "c":
colspec = ", ".join(c.creation_clause for c in self.columns.values())
create_statement = "create type {} as ({});".format(n, colspec)
else:
raise NotImplementedError # pragma: no cover
return create_statement
@property
def drop_statement(self):
n = self.quoted_full_name
if self.relationtype in ("r", "p"):
drop_statement = "drop table {};".format(n)
elif self.relationtype == "v":
drop_statement = "drop view if exists {};".format(n)
elif self.relationtype == "m":
drop_statement = "drop materialized view if exists {};".format(n)
elif self.relationtype == "c":
drop_statement = "drop type {};".format(n)
else:
raise NotImplementedError # pragma: no cover
return drop_statement
def alter_table_statement(self, clause):
if self.is_alterable:
alter = "alter table {} {};".format(self.quoted_full_name, clause)
else:
raise NotImplementedError # pragma: no cover
return alter
@property
def is_partitioned(self):
return self.relationtype == "p"
@property
def is_inheritance_child_table(self):
return bool(self.parent_table) and not self.partition_def
@property
def is_table(self):
return self.relationtype in ("p", "r")
@property
def is_alterable(self):
return self.is_table and (
not self.parent_table or self.is_inheritance_child_table
)
@property
def contains_data(self):
return bool(
self.relationtype == "r" and (self.parent_table or not self.partition_def)
)
# for back-compat only
@property
def is_child_table(self):
return self.is_partitioning_child_table
@property
def is_partitioning_child_table(self):
return bool(
self.relationtype == "r" and self.parent_table and self.partition_def
)
@property
def uses_partitioning(self):
return self.is_partitioning_child_table or self.is_partitioned
@property
def attach_statement(self):
if self.parent_table:
if self.partition_def:
return "alter table {} attach partition {} {};".format(
self.quoted_full_name, self.parent_table, self.partition_spec
)
else:
return "alter table {} inherit {}".format(
self.quoted_full_name, self.parent_table
)
@property
def detach_statement(self):
if self.parent_table:
if self.partition_def:
return "alter table {} detach partition {};".format(
self.parent_table, self.quoted_full_name
)
else:
return "alter table {} no inherit {}".format(
self.quoted_full_name, self.parent_table
)
def attach_detach_statements(self, before):
slist = []
if self.parent_table != before.parent_table:
if before.parent_table:
slist.append(before.detach_statement)
if self.parent_table:
slist.append(self.attach_statement)
return slist
@property
def alter_rls_clause(self):
keyword = "enable" if self.rowsecurity else "disable"
return "{} row level security".format(keyword)
@property
def alter_rls_statement(self):
return self.alter_table_statement(self.alter_rls_clause)
@property
def alter_unlogged_statement(self):
keyword = "unlogged" if self.is_unlogged else "logged"
return self.alter_table_statement("set {}".format(keyword))
class InspectedFunction(InspectedSelectable):
def __init__(
self,
name,
schema,
columns,
inputs,
definition,
volatility,
strictness,
security_type,
identity_arguments,
result_string,
language,
full_definition,
comment,
returntype,
kind,
):
self.identity_arguments = identity_arguments
self.result_string = result_string
self.language = language
self.volatility = volatility
self.strictness = strictness
self.security_type = security_type
self.full_definition = full_definition
self.returntype = returntype
self.kind = kind
super(InspectedFunction, self).__init__(
name=name,
schema=schema,
columns=columns,
inputs=inputs,
definition=definition,
relationtype="f",
comment=comment,
)
@property
def returntype_is_table(self):
return "." in self.returntype
@property
def signature(self):
return "{}({})".format(self.quoted_full_name, self.identity_arguments)
@property
def create_statement(self):
return self.full_definition + ";"
"""
return CREATE_FUNCTION_FORMAT.format(
signature=self.signature,
result_string=self.result_string,
definition=self.definition,
language=self.language,
volatility=self.volatility,
strictness=self.strictness,
security_type=self.security_type,
)
"""
@property
def thing(self):
kinds = dict(f="function", p="procedure", a="aggregate", w="window function")
return kinds[self.kind]
@property
def drop_statement(self):
return "drop {} if exists {};".format(self.thing, self.signature)
def __eq__(self, other):
return (
self.signature == other.signature
and self.result_string == other.result_string
and self.definition == other.definition
and self.language == other.language
and self.volatility == other.volatility
and self.strictness == other.strictness
and self.security_type == other.security_type
and self.kind == other.kind
)
class InspectedTrigger(Inspected):
def __init__(
self, name, schema, table_name, proc_schema, proc_name, enabled, full_definition
):
(
self.name,
self.schema,
self.table_name,
self.proc_schema,
self.proc_name,
self.enabled,
self.full_definition,
) = (name, schema, table_name, proc_schema, proc_name, enabled, full_definition)
self.dependent_on = [self.quoted_full_selectable_name]
self.dependents = []
@property
def signature(self):
return self.quoted_full_name
@property
def quoted_full_name(self):
return "{}.{}.{}".format(
quoted_identifier(self.schema),
quoted_identifier(self.table_name),
quoted_identifier(self.name),
)
@property
def quoted_full_selectable_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.table_name)
)
@property
def drop_statement(self):
return 'drop trigger if exists "{}" on "{}"."{}";'.format(
self.name, self.schema, self.table_name
)
@property
def create_statement(self):
return self.full_definition + ";"
def __eq__(self, other):
"""
:type other: InspectedTrigger
:rtype: bool
"""
return (
self.name == other.name
and self.schema == other.schema
and self.table_name == other.table_name
and self.proc_schema == other.proc_schema
and self.proc_name == other.proc_name
and self.enabled == other.enabled
and self.full_definition == other.full_definition
)
class InspectedIndex(Inspected, TableRelated):
def __init__(
self,
name,
schema,
table_name,
key_columns,
key_options,
num_att,
is_unique,
is_pk,
is_exclusion,
is_immediate,
is_clustered,
key_collations,
key_expressions,
partial_predicate,
algorithm,
definition=None,
constraint=None,
index_columns=None,
included_columns=None,
):
self.name = name
self.schema = schema
self.definition = definition
self.table_name = table_name
self.key_columns = key_columns
self.key_options = key_options
self.num_att = num_att
self.is_unique = is_unique
self.is_pk = is_pk
self.is_exclusion = is_exclusion
self.is_immediate = is_immediate
self.is_clustered = is_clustered
self.key_collations = key_collations
self.key_expressions = key_expressions
self.partial_predicate = partial_predicate
self.algorithm = algorithm
self.constraint = constraint
self.index_columns = index_columns
self.included_columns = included_columns
@property
def drop_statement(self):
return "drop index if exists {};".format(self.quoted_full_name)
@property
def create_statement(self):
return "{};".format(self.definition)
def __eq__(self, other):
"""
:type other: InspectedIndex
:rtype: bool
"""
equalities = (
self.name == other.name,
self.schema == other.schema,
self.table_name == other.table_name,
self.key_columns == other.key_columns,
self.included_columns == other.included_columns,
self.key_options == other.key_options,
self.num_att == other.num_att,
self.is_unique == other.is_unique,
self.is_pk == other.is_pk,
self.is_exclusion == other.is_exclusion,
self.is_immediate == other.is_immediate,
self.is_clustered == other.is_clustered,
self.key_collations == other.key_collations,
self.key_expressions == other.key_expressions,
self.partial_predicate == other.partial_predicate,
self.algorithm == other.algorithm
# self.constraint == other.constraint
)
return all(equalities)
class InspectedSequence(Inspected):
def __init__(self, name, schema, table_name=None, column_name=None):
self.name = name
self.schema = schema
self.table_name = table_name
self.column_name = column_name
@property
def drop_statement(self):
return "drop sequence if exists {};".format(self.quoted_full_name)
@property
def create_statement(self):
return "create sequence {};".format(self.quoted_full_name)
@property
def create_statement_with_ownership(self):
t_col_name = self.quoted_table_and_column_name
if self.table_name and self.column_name:
return "create sequence {} owned by {};".format(
self.quoted_full_name, t_col_name
)
else:
return "create sequence {};".format(self.quoted_full_name)
@property
def alter_ownership_statement(self):
t_col_name = self.quoted_table_and_column_name
if t_col_name is not None:
return "alter sequence {} owned by {};".format(
self.quoted_full_name, t_col_name
)
else:
return "alter sequence {} owned by none;".format(self.quoted_full_name)
@property
def quoted_full_table_name(self):
if self.table_name is not None:
return quoted_identifier(self.table_name, self.schema)
@property
def quoted_table_and_column_name(self):
if self.column_name is not None and self.table_name is not None:
return (
self.quoted_full_table_name + "." + quoted_identifier(self.column_name)
)
def __eq__(self, other):
equalities = (
self.name == other.name,
self.schema == other.schema,
self.quoted_table_and_column_name == other.quoted_table_and_column_name,
)
return all(equalities)
class InspectedCollation(Inspected):
def __init__(self, name, schema, provider, encoding, lc_collate, lc_ctype, version):
self.name = name
self.schema = schema
self.provider = provider
self.lc_collate = lc_collate
self.lc_ctype = lc_ctype
self.encoding = encoding
self.version = version
@property
def locale(self):
return self.lc_collate
@property
def drop_statement(self):
return "drop collation if exists {};".format(self.quoted_full_name)
@property
def create_statement(self):
return "create collation if not exists {} (provider = '{}', locale = '{}');".format(
self.quoted_full_name, self.provider, self.locale
)
def __eq__(self, other):
equalities = (
self.name == other.name,
self.schema == other.schema,
self.provider == other.provider,
self.locale == other.locale,
)
return all(equalities)
class InspectedEnum(Inspected):
def __init__(self, name, schema, elements, pg_version=None):
self.name = name
self.schema = schema
self.elements = elements
self.pg_version = pg_version
self.dependents = []
self.dependent_on = []
@property
def drop_statement(self):
return "drop type {};".format(self.quoted_full_name)
@property
def create_statement(self):
return "create type {} as enum ({});".format(
self.quoted_full_name, self.quoted_elements
)
@property
def quoted_elements(self):
quoted = ["'{}'".format(e) for e in self.elements]
return ", ".join(quoted)
def alter_rename_statement(self, new_name):
name = new_name
return "alter type {} rename to {};".format(
self.quoted_full_name, quoted_identifier(name)
)
def drop_statement_with_rename(self, new_name):
name = new_name
new_name = quoted_identifier(name, self.schema)
return "drop type {};".format(new_name)
def change_statements(self, new):
if not self.can_be_changed_to(new):
raise ValueError
new = new.elements
old = self.elements
statements = []
previous = None
for c in new:
if c not in old:
if not previous:
s = "alter type {} add value '{}' before '{}';".format(
self.quoted_full_name, c, old[0]
)
else:
s = "alter type {} add value '{}' after '{}';".format(
self.quoted_full_name, c, previous
)
statements.append(s)
previous = c
return statements
def can_be_changed_to(self, new, when_within_transaction=False):
old = self.elements
if when_within_transaction and self.pg_version and self.pg_version < 12:
return False
# new must already have the existing items from old, in the same order
return [e for e in new.elements if e in old] == old
def __eq__(self, other):
equalities = (
self.name == other.name,
self.schema == other.schema,
self.elements == other.elements,
)
return all(equalities)
class InspectedSchema(Inspected):
def __init__(self, schema):
self.schema = schema
self.name = None
@property
def create_statement(self):
return "create schema if not exists {};".format(self.quoted_schema)
@property
def drop_statement(self):
return "drop schema if exists {};".format(self.quoted_schema)
@property
def quoted_full_name(self):
return self.quoted_name
@property
def quoted_name(self):
return quoted_identifier(self.schema)
def __eq__(self, other):
return self.schema == other.schema
class InspectedType(Inspected):
def __init__(self, name, schema, columns):
self.name = name
self.schema = schema
self.columns = columns
@property
def drop_statement(self):
return "drop type {};".format(self.signature)
@property
def create_statement(self):
sql = "create type {} as (\n".format(self.signature)
indent = " " * 4
typespec = [
"{}{} {}".format(indent, quoted_identifier(name), _type)
for name, _type in self.columns.items()
]
sql += ",\n".join(typespec)
sql += "\n);"
return sql
def __eq__(self, other):
return (
self.schema == other.schema
and self.name == other.name
and self.columns == other.columns
)
class InspectedDomain(Inspected):
def __init__(
self,
name,
schema,
data_type,
collation,
constraint_name,
not_null,
default,
check,
):
self.name = name
self.schema = schema
self.data_type = data_type
self.collation = collation
self.constraint_name = constraint_name
self.not_null = not_null
self.default = default
self.check = check
@property
def drop_statement(self):
return "drop domain {};".format(self.signature)
@property
def create_statement(self):
T = """\
create domain {name}
as {_type}
{collation}{default}{nullable}{check}
"""
sql = T.format(
name=self.signature,
_type=self.data_type,
collation=self.collation_clause,
default=self.default_clause,
check=self.check_clause,
nullable=self.nullable_clause,
)
return sql
@property
def check_clause(self):
if self.check:
return "{}\n".format(self.check)
return ""
@property
def collation_clause(self):
if self.collation:
return "collation {}\n".format(self.collation)
return ""
@property
def default_clause(self):
if self.default:
return "default {}\n".format(self.default)
return ""
@property
def nullable_clause(self):
if self.not_null:
return "not null\n"
else:
return "null\n"
equality_attributes = (
"schema name data_type collation default constraint_name not_null check".split()
)
def __eq__(self, other):
try:
return all(
[
getattr(self, a) == getattr(other, a)
for a in self.equality_attributes
]
)
except AttributeError:
return False
class InspectedExtension(Inspected):
def __init__(self, name, schema, version):
self.name = name
self.schema = schema
self.version = version
@property
def drop_statement(self):
return "drop extension if exists {};".format(self.quoted_name)
@property
def create_statement(self):
return "create extension if not exists {} with schema {} version '{}';".format(
self.quoted_name, self.quoted_schema, self.version
)
@property
def update_statement(self):
return "alter extension {} update to '{}';".format(
self.quoted_name, self.version
)
def alter_statements(self, other=None):
return [self.update_statement]
def __eq__(self, other):
equalities = (
self.name == other.name,
self.schema == other.schema,
self.version == other.version,
)
return all(equalities)
class InspectedConstraint(Inspected, TableRelated):
def __init__(
self,
name,
schema,
constraint_type,
table_name,
definition,
index,
is_fk=False,
is_deferrable=False,
initially_deferred=False,
):
self.name = name
self.schema = schema
self.constraint_type = constraint_type
self.table_name = table_name
self.definition = definition
self.index = index
self.is_fk = is_fk
self.quoted_full_foreign_table_name = None
self.fk_columns_local = None
self.fk_columns_foreign = None
self.is_deferrable = is_deferrable
self.initially_deferred = initially_deferred
@property
def drop_statement(self):
return "alter table {} drop constraint {};".format(
self.quoted_full_table_name, self.quoted_name
)
@property
def deferrable_subclause(self):
# [ DEFERRABLE | NOT DEFERRABLE ] [ INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
if not self.is_deferrable:
return ""
else:
clause = " DEFERRABLE"
if self.initially_deferred:
clause += " INITIALLY DEFERRED"
return clause
@property
def create_statement(self):
if self.index:
using_clause = "{} using index {}{}".format(
self.constraint_type, self.quoted_name, self.deferrable_subclause
)
else:
using_clause = self.definition
USING = "alter table {} add constraint {} {};"
return USING.format(self.quoted_full_table_name, self.quoted_name, using_clause)
@property
def quoted_full_name(self):
return "{}.{}.{}".format(
quoted_identifier(self.schema),
quoted_identifier(self.table_name),
quoted_identifier(self.name),
)
@property
def quoted_full_table_name(self):
return "{}.{}".format(
quoted_identifier(self.schema), quoted_identifier(self.table_name)
)
def __eq__(self, other):
equalities = (
self.name == other.name,
self.schema == other.schema,
self.table_name == other.table_name,
self.definition == other.definition,
self.index == other.index,
self.is_deferrable == other.is_deferrable,
self.initially_deferred == other.initially_deferred,
)
return all(equalities)
class InspectedPrivilege(Inspected):
def __init__(self, object_type, schema, name, privilege, target_user):
self.schema = schema
self.object_type = object_type
self.name = name
self.privilege = privilege.lower()
self.target_user = target_user
@property
def quoted_target_user(self):
return quoted_identifier(self.target_user)
@property
def drop_statement(self):
return "revoke {} on {} {} from {};".format(
self.privilege,
self.object_type,
self.quoted_full_name,
self.quoted_target_user,
)
@property
def create_statement(self):
return "grant {} on {} {} to {};".format(
self.privilege,
self.object_type,
self.quoted_full_name,
self.quoted_target_user,
)
def __eq__(self, other):
equalities = (
self.schema == other.schema,
self.object_type == other.object_type,
self.name == other.name,
self.privilege == other.privilege,
self.target_user == other.target_user,
)
return all(equalities)
@property
def key(self):
return self.object_type, self.quoted_full_name, self.target_user, self.privilege
RLS_POLICY_CREATE = """create policy {name}
on {table_name}
as {permissiveness}
for {commandtype_keyword}
to {roleslist}{qual_clause}{withcheck_clause};
"""
COMMANDTYPES = {"*": "all", "r": "select", "a": "insert", "w": "update", "d": "delete"}
class InspectedRowPolicy(Inspected, TableRelated):
def __init__(
self, name, schema, table_name, commandtype, permissive, roles, qual, withcheck
):
self.name = name
self.schema = schema
self.table_name = table_name
self.commandtype = commandtype
self.permissive = permissive
self.roles = roles
self.qual = qual
self.withcheck = withcheck
@property
def permissiveness(self):
return "permissive" if self.permissive else "restrictive"
@property
def commandtype_keyword(self):
return COMMANDTYPES[self.commandtype]
@property
def key(self):
return "{}.{}".format(self.quoted_full_table_name, self.quoted_name)
@property
def create_statement(self):
if self.qual:
qual_clause = "\nusing ({})".format(self.qual)
else:
qual_clause = ""
if self.withcheck:
withcheck_clause = "\nwith check {}".format(self.withcheck)
else:
withcheck_clause = ""
roleslist = ", ".join(self.roles)
return RLS_POLICY_CREATE.format(
name=self.quoted_name,
table_name=self.quoted_full_table_name,
permissiveness=self.permissiveness,
commandtype_keyword=self.commandtype_keyword,
roleslist=roleslist,
qual_clause=qual_clause,
withcheck_clause=withcheck_clause,
)
@property
def drop_statement(self):
return "drop policy {} on {};".format(
self.quoted_name, self.quoted_full_table_name
)
def __eq__(self, other):
equalities = (
self.name == self.name,
self.schema == other.schema,
self.permissiveness == other.permissiveness,
self.commandtype == other.commandtype,
self.permissive == other.permissive,
self.roles == other.roles,
self.qual == other.qual,
self.withcheck == other.withcheck,
)
return all(equalities)
PROPS = "schemas relations tables views functions selectables sequences constraints indexes enums extensions privileges collations triggers"
class PostgreSQL(DBInspector):
def __init__(self, c, include_internal=False):
pg_version = c.dialect.server_version_info[0]
self.pg_version = pg_version
def processed(q):
if not include_internal:
q = q.replace("-- SKIP_INTERNAL", "")
if self.pg_version >= 11:
q = q.replace("-- 11_AND_LATER", "")
else:
q = q.replace("-- 10_AND_EARLIER", "")
q = text(q)
return q
if pg_version <= 9:
self.ALL_RELATIONS_QUERY = processed(ALL_RELATIONS_QUERY_9)
self.COLLATIONS_QUERY = processed(COLLATIONS_QUERY_9)
self.RLSPOLICIES_QUERY = None
else:
all_relations_query = ALL_RELATIONS_QUERY
if pg_version >= 12:
replace = "-- 12_ONLY"
else:
replace = "-- PRE_12"
all_relations_query = all_relations_query.replace(replace, "")
self.ALL_RELATIONS_QUERY = processed(all_relations_query)
self.COLLATIONS_QUERY = processed(COLLATIONS_QUERY)
self.RLSPOLICIES_QUERY = processed(RLSPOLICIES_QUERY)
self.INDEXES_QUERY = processed(INDEXES_QUERY)
self.SEQUENCES_QUERY = processed(SEQUENCES_QUERY)
self.CONSTRAINTS_QUERY = processed(CONSTRAINTS_QUERY)
self.FUNCTIONS_QUERY = processed(FUNCTIONS_QUERY)
self.TYPES_QUERY = processed(TYPES_QUERY)
self.DOMAINS_QUERY = processed(DOMAINS_QUERY)
self.EXTENSIONS_QUERY = processed(EXTENSIONS_QUERY)
self.ENUMS_QUERY = processed(ENUMS_QUERY)
self.DEPS_QUERY = processed(DEPS_QUERY)
self.SCHEMAS_QUERY = processed(SCHEMAS_QUERY)
self.PRIVILEGES_QUERY = processed(PRIVILEGES_QUERY)
self.TRIGGERS_QUERY = processed(TRIGGERS_QUERY)
super(PostgreSQL, self).__init__(c, include_internal)
def load_all(self):
self.load_schemas()
self.load_all_relations()
self.load_functions()
self.selectables = od()
self.selectables.update(self.relations)
self.selectables.update(self.functions)
self.load_privileges()
self.load_triggers()
self.load_collations()
self.load_rlspolicies()
self.load_types()
self.load_domains()
self.load_deps()
self.load_deps_all()
def load_schemas(self):
q = self.c.execute(self.SCHEMAS_QUERY)
schemas = [InspectedSchema(schema=each.schema) for each in q]
self.schemas = od((schema.schema, schema) for schema in schemas)
def load_rlspolicies(self):
if self.pg_version <= 9:
self.rlspolicies = od()
return
q = self.c.execute(self.RLSPOLICIES_QUERY)
rlspolicies = [
InspectedRowPolicy(
name=p.name,
schema=p.schema,
table_name=p.table_name,
commandtype=p.commandtype,
permissive=p.permissive,
roles=p.roles,
qual=p.qual,
withcheck=p.withcheck,
)
for p in q
]
self.rlspolicies = od((p.key, p) for p in rlspolicies)
def load_collations(self):
q = self.c.execute(self.COLLATIONS_QUERY)
collations = [
InspectedCollation(
schema=i.schema,
name=i.name,
provider=i.provider,
encoding=i.encoding,
lc_collate=i.lc_collate,
lc_ctype=i.lc_ctype,
version=i.version,
)
for i in q
]
self.collations = od((i.quoted_full_name, i) for i in collations)
def load_privileges(self):
q = self.c.execute(self.PRIVILEGES_QUERY)
privileges = [
InspectedPrivilege(
object_type=i.object_type,
schema=i.schema,
name=i.name,
privilege=i.privilege,
target_user=i.user,
)
for i in q
]
self.privileges = od((i.key, i) for i in privileges)
def load_deps(self):
q = self.c.execute(self.DEPS_QUERY)
self.deps = list(q)
for dep in self.deps:
x = quoted_identifier(dep.name, dep.schema, dep.identity_arguments)
x_dependent_on = quoted_identifier(
dep.name_dependent_on,
dep.schema_dependent_on,
dep.identity_arguments_dependent_on,
)
self.selectables[x].dependent_on.append(x_dependent_on)
self.selectables[x].dependent_on.sort()
self.selectables[x_dependent_on].dependents.append(x)
self.selectables[x_dependent_on].dependents.sort()
for k, t in self.triggers.items():
for dep_name in t.dependent_on:
try:
dependency = self.selectables[dep_name]
except KeyError:
continue
dependency.dependents.append(k)
for k, r in self.relations.items():
for kc, c in r.columns.items():
if c.is_enum:
e_sig = c.enum.signature
if e_sig in self.enums:
r.dependent_on.append(e_sig)
c.enum.dependents.append(k)
if r.parent_table:
pt = self.relations[r.parent_table]
r.dependent_on.append(r.parent_table)
pt.dependents.append(r.signature)
def get_dependency_by_signature(self, signature):
things = [self.selectables, self.enums, self.triggers]
for thing in things:
try:
return thing[signature]
except KeyError:
continue
def load_deps_all(self):
def get_related_for_item(item, att):
related = [self.get_dependency_by_signature(_) for _ in getattr(item, att)]
return [item.signature] + [
_ for d in related for _ in get_related_for_item(d, att)
]
for k, x in self.selectables.items():
d_all = get_related_for_item(x, "dependent_on")[1:]
d_all.sort()
x.dependent_on_all = d_all
d_all = get_related_for_item(x, "dependents")[1:]
d_all.sort()
x.dependents_all = d_all
def dependency_order(
self,
drop_order=False,
selectables=True,
triggers=True,
enums=True,
include_fk_deps=False,
):
if sys.version_info < (3, 0):
raise NotImplementedError
from schemainspect import TopologicalSorter
graph, things = {}, {}
if enums:
things.update(self.enums)
if selectables:
things.update(self.selectables)
if triggers:
things.update(self.triggers)
for k, x in things.items():
dependent_on = list(x.dependent_on)
if k in self.tables and x.parent_table:
dependent_on.append(x.parent_table)
graph[k] = list(x.dependent_on)
if include_fk_deps:
fk_deps = {}
for k, x in self.constraints.items():
if x.is_fk:
t, other_t = (
x.quoted_full_table_name,
x.quoted_full_foreign_table_name,
)
fk_deps[t] = [other_t]
graph.update(fk_deps)
ts = TopologicalSorter(graph)
ordering = []
ts.prepare()
while ts.is_active():
items = ts.get_ready()
itemslist = list(items)
# itemslist.sort()
ordering += itemslist
ts.done(*items)
if drop_order:
ordering.reverse()
return ordering
@property
def partitioned_tables(self):
return od((k, v) for k, v in self.tables.items() if v.is_partitioned)
@property
def alterable_tables(self): # ordinary tables and parent tables
return od((k, v) for k, v in self.tables.items() if v.is_alterable)
@property
def data_tables(self): # ordinary tables and child tables
return od((k, v) for k, v in self.tables.items() if v.contains_data)
@property
def partitioning_child_tables(self):
return od(
(k, v) for k, v in self.tables.items() if v.is_partitioning_child_table
)
@property
def tables_using_partitioning(self):
return od((k, v) for k, v in self.tables.items() if v.uses_partitioning)
@property
def tables_not_using_partitioning(self):
return od((k, v) for k, v in self.tables.items() if not v.uses_partitioning)
def load_all_relations(self):
self.tables = od()
self.views = od()
self.materialized_views = od()
self.composite_types = od()
q = self.c.execute(self.ENUMS_QUERY)
enumlist = [
InspectedEnum(
name=i.name,
schema=i.schema,
elements=i.elements,
pg_version=self.pg_version,
)
for i in q
]
self.enums = od((i.quoted_full_name, i) for i in enumlist)
q = self.c.execute(self.ALL_RELATIONS_QUERY)
for _, g in groupby(q, lambda x: (x.relationtype, x.schema, x.name)):
clist = list(g)
f = clist[0]
def get_enum(name, schema):
if not name and not schema:
return None
quoted_full_name = "{}.{}".format(
quoted_identifier(schema), quoted_identifier(name)
)
return self.enums[quoted_full_name]
columns = [
ColumnInfo(
name=c.attname,
dbtype=c.datatype,
dbtypestr=c.datatypestring,
pytype=self.to_pytype(c.datatype),
default=c.defaultdef,
not_null=c.not_null,
is_enum=c.is_enum,
enum=get_enum(c.enum_name, c.enum_schema),
collation=c.collation,
is_identity=c.is_identity,
is_identity_always=c.is_identity_always,
is_generated=c.is_generated,
)
for c in clist
if c.position_number
]
s = InspectedSelectable(
name=f.name,
schema=f.schema,
columns=od((c.name, c) for c in columns),
relationtype=f.relationtype,
definition=f.definition,
comment=f.comment,
parent_table=f.parent_table,
partition_def=f.partition_def,
rowsecurity=f.rowsecurity,
forcerowsecurity=f.forcerowsecurity,
persistence=f.persistence,
)
RELATIONTYPES = {
"r": "tables",
"v": "views",
"m": "materialized_views",
"c": "composite_types",
"p": "tables",
}
att = getattr(self, RELATIONTYPES[f.relationtype])
att[s.quoted_full_name] = s
for k, t in self.tables.items():
if t.is_inheritance_child_table:
parent_table = self.tables[t.parent_table]
for cname, c in t.columns.items():
if cname in parent_table.columns:
c.is_inherited = True
self.relations = od()
for x in (self.tables, self.views, self.materialized_views):
self.relations.update(x)
q = self.c.execute(self.INDEXES_QUERY)
indexlist = [
InspectedIndex(
name=i.name,
schema=i.schema,
definition=i.definition,
table_name=i.table_name,
key_columns=i.key_columns,
index_columns=i.index_columns,
included_columns=i.included_columns,
key_options=i.key_options,
num_att=i.num_att,
is_unique=i.is_unique,
is_pk=i.is_pk,
is_exclusion=i.is_exclusion,
is_immediate=i.is_immediate,
is_clustered=i.is_clustered,
key_collations=i.key_collations,
key_expressions=i.key_expressions,
partial_predicate=i.partial_predicate,
algorithm=i.algorithm,
)
for i in q
]
self.indexes = od((i.quoted_full_name, i) for i in indexlist)
q = self.c.execute(self.SEQUENCES_QUERY)
sequencelist = [
InspectedSequence(
name=i.name,
schema=i.schema,
table_name=i.table_name,
column_name=i.column_name,
)
for i in q
]
self.sequences = od((i.quoted_full_name, i) for i in sequencelist)
q = self.c.execute(self.CONSTRAINTS_QUERY)
constraintlist = []
for i in q:
constraint = InspectedConstraint(
name=i.name,
schema=i.schema,
constraint_type=i.constraint_type,
table_name=i.table_name,
definition=i.definition,
index=i['index'],
is_fk=i.is_fk,
is_deferrable=i.is_deferrable,
initially_deferred=i.initially_deferred,
)
if constraint.index:
index_name = quoted_identifier(constraint.index, schema=i.schema)
index = self.indexes[index_name]
index.constraint = constraint
constraint.index = index
if constraint.is_fk:
constraint.quoted_full_foreign_table_name = quoted_identifier(
i.foreign_table_name, schema=i.foreign_table_schema
)
constraint.fk_columns_foreign = i.fk_columns_foreign
constraint.fk_columns_local = i.fk_columns_local
constraintlist.append(constraint)
self.constraints = od((i.quoted_full_name, i) for i in constraintlist)
q = self.c.execute(self.EXTENSIONS_QUERY)
extensionlist = [
InspectedExtension(name=i.name, schema=i.schema, version=i.version)
for i in q
]
# extension names are unique per-database rather than per-schema like other things (even though extensions are assigned to a particular schema)
self.extensions = od((i.name, i) for i in extensionlist)
# add indexes and constraints to each table
for each in self.indexes.values():
t = each.quoted_full_table_name
n = each.quoted_full_name
self.relations[t].indexes[n] = each
for each in self.constraints.values():
t = each.quoted_full_table_name
n = each.quoted_full_name
self.relations[t].constraints[n] = each
def load_functions(self):
self.functions = od()
q = self.c.execute(self.FUNCTIONS_QUERY)
for _, g in groupby(q, lambda x: (x.schema, x.name, x.identity_arguments)):
clist = list(g)
f = clist[0]
outs = [c for c in clist if c.parameter_mode == "OUT"]
columns = [
ColumnInfo(
name=c.parameter_name,
dbtype=c.data_type,
pytype=self.to_pytype(c.data_type),
)
for c in outs
]
if outs:
columns = [
ColumnInfo(
name=c.parameter_name,
dbtype=c.data_type,
pytype=self.to_pytype(c.data_type),
)
for c in outs
]
else:
columns = [
ColumnInfo(
name=f.name,
dbtype=f.data_type,
pytype=self.to_pytype(f.returntype),
default=f.parameter_default,
)
]
plist = [
ColumnInfo(
name=c.parameter_name,
dbtype=c.data_type,
pytype=self.to_pytype(c.data_type),
default=c.parameter_default,
)
for c in clist
if c.parameter_mode == "IN"
]
s = InspectedFunction(
schema=f.schema,
name=f.name,
columns=od((c.name, c) for c in columns),
inputs=plist,
identity_arguments=f.identity_arguments,
result_string=f.result_string,
language=f.language,
definition=f.definition,
strictness=f.strictness,
security_type=f.security_type,
volatility=f.volatility,
full_definition=f.full_definition,
comment=f.comment,
returntype=f.returntype,
kind=f.kind,
)
identity_arguments = "({})".format(s.identity_arguments)
self.functions[s.quoted_full_name + identity_arguments] = s
def load_triggers(self):
q = self.c.execute(self.TRIGGERS_QUERY)
triggers = [
InspectedTrigger(
i.name,
i.schema,
i.table_name,
i.proc_schema,
i.proc_name,
i.enabled,
i.full_definition,
)
for i in q
] # type: list[InspectedTrigger]
self.triggers = od((t.signature, t) for t in triggers)
def load_types(self):
q = self.c.execute(self.TYPES_QUERY)
def col(defn):
return defn["attribute"], defn["type"]
types = [
InspectedType(i.name, i.schema, dict(col(_) for _ in i.columns)) for i in q
] # type: list[InspectedType]
self.types = od((t.signature, t) for t in types)
def load_domains(self):
q = self.c.execute(self.DOMAINS_QUERY)
def col(defn):
return defn["attribute"], defn["type"]
domains = [
InspectedDomain(
i.name,
i.schema,
i.data_type,
i.collation,
i.constraint_name,
i.not_null,
i.default,
i.check,
)
for i in q
] # type: list[InspectedType]
self.domains = od((t.signature, t) for t in domains)
def filter_schema(self, schema=None, exclude_schema=None):
if schema and exclude_schema:
raise ValueError("Can only have schema or exclude schema, not both")
def equal_to_schema(x):
return x.schema == schema
def not_equal_to_exclude_schema(x):
return x.schema != exclude_schema
if schema:
comparator = equal_to_schema
elif exclude_schema:
comparator = not_equal_to_exclude_schema
else:
raise ValueError("schema or exclude_schema must be not be none")
for prop in PROPS.split():
att = getattr(self, prop)
filtered = {k: v for k, v in att.items() if comparator(v)}
setattr(self, prop, filtered)
def _as_dicts(self):
def obj_to_d(x):
if isinstance(x, dict):
return {k: obj_to_d(v) for k, v in x.items()}
elif isinstance(x, (ColumnInfo, Inspected)):
return {
k: obj_to_d(getattr(x, k))
for k in dir(x)
if not k.startswith("_") and not inspect.ismethod(getattr(x, k))
}
else:
return str(x)
d = {}
for prop in PROPS.split():
att = getattr(self, prop)
_d = {k: obj_to_d(v) for k, v in att.items()}
d[prop] = _d
return d
def one_schema(self, schema):
self.filter_schema(schema=schema)
def exclude_schema(self, schema):
self.filter_schema(exclude_schema=schema)
def __eq__(self, other):
"""
:type other: PostgreSQL
:rtype: bool
"""
return (
type(self) == type(other)
and self.schemas == other.schemas
and self.relations == other.relations
and self.sequences == other.sequences
and self.enums == other.enums
and self.constraints == other.constraints
and self.extensions == other.extensions
and self.functions == other.functions
and self.triggers == other.triggers
and self.collations == other.collations
and self.rlspolicies == other.rlspolicies
)
|
dwadler/QGIS | refs/heads/master | python/plugins/processing/algs/grass7/Grass7AlgorithmProvider.py | 1 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Grass7AlgorithmProvider.py
---------------------
Date : April 2014
Copyright : (C) 2014 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2014'
__copyright__ = '(C) 2014, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (Qgis,
QgsApplication,
QgsProcessingProvider,
QgsVectorFileWriter,
QgsMessageLog,
QgsProcessingUtils)
from processing.core.ProcessingConfig import (ProcessingConfig, Setting)
from .Grass7Utils import Grass7Utils
from .Grass7Algorithm import Grass7Algorithm
from processing.tools.system import isWindows, isMac
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class Grass7AlgorithmProvider(QgsProcessingProvider):
def __init__(self):
super().__init__()
self.algs = []
def load(self):
ProcessingConfig.settingIcons[self.name()] = self.icon()
ProcessingConfig.addSetting(Setting(self.name(), 'ACTIVATE_GRASS7',
self.tr('Activate'), True))
if isMac():
ProcessingConfig.addSetting(Setting(
self.name(),
Grass7Utils.GRASS_FOLDER, self.tr('GRASS7 folder'),
Grass7Utils.grassPath(), valuetype=Setting.FOLDER))
ProcessingConfig.addSetting(Setting(
self.name(),
Grass7Utils.GRASS_LOG_COMMANDS,
self.tr('Log execution commands'), False))
ProcessingConfig.addSetting(Setting(
self.name(),
Grass7Utils.GRASS_LOG_CONSOLE,
self.tr('Log console output'), False))
ProcessingConfig.addSetting(Setting(
self.name(),
Grass7Utils.GRASS_HELP_PATH,
self.tr('Location of GRASS docs'),
Grass7Utils.grassHelpPath()))
# Add a setting for using v.external instead of v.in.ogr
# But set it to False by default because some algorithms
# can't be used with external data (need a solid v.in.ogr).
ProcessingConfig.addSetting(Setting(
self.name(),
Grass7Utils.GRASS_USE_VEXTERNAL,
self.tr('For vector layers, use v.external (faster) instead of v.in.ogr'),
False))
ProcessingConfig.readSettings()
self.refreshAlgorithms()
return True
def unload(self):
ProcessingConfig.removeSetting('ACTIVATE_GRASS7')
if isMac():
ProcessingConfig.removeSetting(Grass7Utils.GRASS_FOLDER)
ProcessingConfig.removeSetting(Grass7Utils.GRASS_LOG_COMMANDS)
ProcessingConfig.removeSetting(Grass7Utils.GRASS_LOG_CONSOLE)
ProcessingConfig.removeSetting(Grass7Utils.GRASS_HELP_PATH)
ProcessingConfig.removeSetting(Grass7Utils.GRASS_USE_VEXTERNAL)
def isActive(self):
return ProcessingConfig.getSetting('ACTIVATE_GRASS7')
def setActive(self, active):
ProcessingConfig.setSettingValue('ACTIVATE_GRASS7', active)
def createAlgsList(self):
algs = []
folder = Grass7Utils.grassDescriptionPath()
for descriptionFile in os.listdir(folder):
if descriptionFile.endswith('txt'):
try:
alg = Grass7Algorithm(os.path.join(folder, descriptionFile))
if alg.name().strip() != '':
algs.append(alg)
else:
QgsMessageLog.logMessage(self.tr('Could not open GRASS GIS 7 algorithm: {0}').format(descriptionFile), self.tr('Processing'), Qgis.Critical)
except Exception as e:
QgsMessageLog.logMessage(
self.tr('Could not open GRASS GIS 7 algorithm: {0}\n{1}').format(descriptionFile, str(e)), self.tr('Processing'), Qgis.Critical)
return algs
def loadAlgorithms(self):
self.algs = self.createAlgsList()
for a in self.algs:
self.addAlgorithm(a)
def name(self):
return 'GRASS'
def longName(self):
version = Grass7Utils.installedVersion()
return 'GRASS GIS ({})'.format(version) if version is not None else "GRASS GIS"
def id(self):
return 'grass7'
def helpId(self):
return 'grass7'
def icon(self):
return QgsApplication.getThemeIcon("/providerGrass.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/providerGrass.svg")
def defaultVectorFileExtension(self, hasGeometry=True):
return 'gpkg'
def supportsNonFileBasedOutput(self):
"""
GRASS7 Provider doesn't support non file based outputs
"""
return False
def supportedOutputVectorLayerExtensions(self):
# We use the same extensions than QGIS because:
# - QGIS is using OGR like GRASS
# - There are very chances than OGR version used in GRASS is
# different from QGIS OGR version.
return QgsVectorFileWriter.supportedFormatExtensions()
def supportedOutputRasterLayerExtensions(self):
return Grass7Utils.getSupportedOutputRasterExtensions()
def canBeActivated(self):
return not bool(Grass7Utils.checkGrass7IsInstalled())
def tr(self, string, context=''):
if context == '':
context = 'Grass7AlgorithmProvider'
return QCoreApplication.translate(context, string)
|
BaladiDogGames/baladidoggames.github.io | refs/heads/master | mingw/bin/lib/lib2to3/fixes/fix_operator.py | 326 | """Fixer for operator functions.
operator.isCallable(obj) -> hasattr(obj, '__call__')
operator.sequenceIncludes(obj) -> operator.contains(obj)
operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence)
operator.isMappingType(obj) -> isinstance(obj, collections.Mapping)
operator.isNumberType(obj) -> isinstance(obj, numbers.Number)
operator.repeat(obj, n) -> operator.mul(obj, n)
operator.irepeat(obj, n) -> operator.imul(obj, n)
"""
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import Call, Name, String, touch_import
def invocation(s):
def dec(f):
f.invocation = s
return f
return dec
class FixOperator(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
methods = """
method=('isCallable'|'sequenceIncludes'
|'isSequenceType'|'isMappingType'|'isNumberType'
|'repeat'|'irepeat')
"""
obj = "'(' obj=any ')'"
PATTERN = """
power< module='operator'
trailer< '.' %(methods)s > trailer< %(obj)s > >
|
power< %(methods)s trailer< %(obj)s > >
""" % dict(methods=methods, obj=obj)
def transform(self, node, results):
method = self._check_method(node, results)
if method is not None:
return method(node, results)
@invocation("operator.contains(%s)")
def _sequenceIncludes(self, node, results):
return self._handle_rename(node, results, u"contains")
@invocation("hasattr(%s, '__call__')")
def _isCallable(self, node, results):
obj = results["obj"]
args = [obj.clone(), String(u", "), String(u"'__call__'")]
return Call(Name(u"hasattr"), args, prefix=node.prefix)
@invocation("operator.mul(%s)")
def _repeat(self, node, results):
return self._handle_rename(node, results, u"mul")
@invocation("operator.imul(%s)")
def _irepeat(self, node, results):
return self._handle_rename(node, results, u"imul")
@invocation("isinstance(%s, collections.Sequence)")
def _isSequenceType(self, node, results):
return self._handle_type2abc(node, results, u"collections", u"Sequence")
@invocation("isinstance(%s, collections.Mapping)")
def _isMappingType(self, node, results):
return self._handle_type2abc(node, results, u"collections", u"Mapping")
@invocation("isinstance(%s, numbers.Number)")
def _isNumberType(self, node, results):
return self._handle_type2abc(node, results, u"numbers", u"Number")
def _handle_rename(self, node, results, name):
method = results["method"][0]
method.value = name
method.changed()
def _handle_type2abc(self, node, results, module, abc):
touch_import(None, module, node)
obj = results["obj"]
args = [obj.clone(), String(u", " + u".".join([module, abc]))]
return Call(Name(u"isinstance"), args, prefix=node.prefix)
def _check_method(self, node, results):
method = getattr(self, "_" + results["method"][0].value.encode("ascii"))
if callable(method):
if "module" in results:
return method
else:
sub = (unicode(results["obj"]),)
invocation_str = unicode(method.invocation) % sub
self.warning(node, u"You should use '%s' here." % invocation_str)
return None
|
ubiar/odoo | refs/heads/8.0 | addons/website_forum_doc/controllers/__init__.py | 4497 | # -*- coding: utf-8 -*-
import main
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.